source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_unop__identity_fp32_bool.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp32_bool // op(A') function: GB_unop_tran__identity_fp32_bool // C type: float // A type: bool // cast: float cij = (float) aij // unaryop: cij = aij #define GB_ATYPE \ bool #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ float z = (float) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ bool aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = (float) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP32 || GxB_NO_BOOL) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp32_bool ( float *Cx, // Cx and Ax may be aliased const bool *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; float z = (float) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp32_bool ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_fc64_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fc64_uint8) // op(A') function: GB (_unop_tran__identity_fc64_uint8) // C type: GxB_FC64_t // A type: uint8_t // cast: GxB_FC64_t cij = GxB_CMPLX ((double) (aij), 0) // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FC64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fc64_uint8) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const uint8_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (uint8_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint8_t aij = Ax [p] ; GxB_FC64_t z = GxB_CMPLX ((double) (aij), 0) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fc64_uint8) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
mandelbrot.c
/*======================= M A N D E L B R O T =======================*/ // Implementation Based on Rosetta Code Example // 1) Draws Mandelbrot set for Fc(z)=z*z +c using // Mandelbrot algorithm (boolean escape time). // 2) Technique of creating ppm file is based on // the code of Claudio Rocchini. http://en. // wikipedia.org/wiki/Image:Color_complex_plot // .jpg. Create 24 bit color graphic file, // portable pixmap file = PPM, see http://en. // wikipedia.org/wiki/Portable_pixmap to see // the file use external application (graphic // viewer). // Inclusions #include <math.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> // Definitions #define MAX_COLOR_COMPONENT_VALUE 255 #define I_MAX 200 #define CXMIN -1.5 #define CXMAX 0.5 #define CYMIN -1.0 #define CYMAX 1.0 #define ESCAPE_RADIUS_2 4.0 // Image Structure Definition typedef struct { unsigned char color[3]; } image; // Function Prototypes void instructions( int argc, char** argv ); // Main int main ( int argc, char **argv ) { instructions( argc, argv ); // Display Usage Instructions If Wrong Arguments double time_start, time_end; time_start = omp_get_wtime(); char *filename = argv[1]; // Parse Input Arguments unsigned int iXMax = atoi( argv[2] ); // Generated Image Width unsigned int iYMax = iXMax; // Generated Image Height unsigned int display = atoi( argv[3] ); // Argument to Display Debug Text unsigned int i = 0; // Iteration Number unsigned int iX = 0; // Screen (Integer) X Coordinate unsigned int iY = 0; // Screen (Integer) Y Coordinate unsigned int thisPixelNum = 0; // Iterator for Tracking Pixel Number double cX = 0.0; // World (Double) X Coordinate double cY = 0.0; // World (Double) Y Coordinate double zX = 0.0; // Z = Zx + Zy * i; Z0 = 0 double zY = 0.0; // (see just above) double zX2 = 0.0; // Square of Zx double zY2 = 0.0; // Square of Zy char *comment = "# "; // Dynamic File Header Comment // Intro Text and Setup if( display ) { printf( "\n = = = Mandelbrot Set Generator = = = \n\n" ); } unsigned int size = iXMax * iYMax; // Determination of Size double pixelWidth = ( CXMAX - CXMIN ) / iXMax; // Determination of Pixel Width/ double pixelHeight = ( CYMAX - CYMIN ) / iYMax; // Height from Window/Size image *fractal = malloc( size * sizeof( *fractal ) ); // Allocate Storage for Image // Compute Fractal Image if( display ) { printf( "Generating Mandelbrot Set...\n" ); } #pragma omp parallel for private(iY, iX, cY, cX, zY, zX, zY2, zX2, i, thisPixelNum) shared (iYMax, iXMax, pixelHeight, pixelWidth, fractal) default (none) schedule(guided) for( iY = 0; iY < iYMax; iY++ ) { // Iterate Through Image Rows cY = CYMIN + iY * pixelHeight; if( fabs( cY ) < ( pixelHeight / 2 ) ) { cY = 0.0; // Main Antenna } for( iX = 0; iX < iXMax; iX++ ) { // Iterate Through Image Columns cX = CXMIN + iX * pixelWidth; zX = 0.0; // Initial Value of Orbit - Critical Point Z = 0 zY = 0.0; zX2 = zX * zX; zY2 = zY * zY; for( i = 0; ( i < I_MAX ) && ( ( zX2 + zY2 ) < ESCAPE_RADIUS_2 ); i++ ) { zY = 2 * zX * zY + cY; zX = zX2 - zY2 + cX; zX2 = zX * zX; zY2 = zY * zY; }; // Save Pixel Color thisPixelNum = iY * iYMax + iX; // Where is this pixel in the image? if( i == I_MAX ) { // Color for Interior of Mandelbrot Set fractal[thisPixelNum].color[0] = 37; // Red fractal[thisPixelNum].color[1] = 37; // Green fractal[thisPixelNum].color[2] = 37; // Blue } else { // Color for Exterior of Mandelbrot Set fractal[thisPixelNum].color[0] = 0; // Red fractal[thisPixelNum].color[1] = 0; // Green fractal[thisPixelNum].color[2] = 255; // Blue } // End If } // End iX For } // End iY For // Image File Write Phase if( display ) { printf( "Writing File Out...\n" ); } // Create New File - give it a name and open it in binary mode. FILE *filePtr = fopen( filename, "wb" ); // b - Binary Mode // Write ASCII Header to the File fprintf( filePtr, "P6\n %s\n %d\n %d\n %d\n", comment, iXMax, iYMax, MAX_COLOR_COMPONENT_VALUE ); // Image File Write Out - must be done serially. for( iY = 0; iY < iYMax; iY++ ) { for( iX = 0; iX < iXMax; iX++ ) { thisPixelNum = iY * iYMax + iX; // Set Dereference Pixel Location fwrite( fractal[thisPixelNum].color, 1, 3, filePtr ); // Write Pixel Color to File } } // Final Tasks fclose( filePtr ); free( fractal ); if( display ) { printf( "Operation Complete!\n\n" ); } time_end = omp_get_wtime(); printf("Execution Time (s): %f\n", time_end-time_start ); return EXIT_SUCCESS; } // Function Implementations // Instructions - display usage instructions if argument count incorrent. void instructions( int argc, char** argv ) { if( argc != 4 ) { printf( "\nUsage: %s <output> <x/y> <display>\n", argv[0] ); printf( " Output - a .ppm image to output with the fractal.\n" ); printf( " X/Y - width and height of image in pixels.\n" ); printf( " Display - 1 displays debug text, 0 just displays time values for raw data tables.\n\n" ); exit( EXIT_FAILURE ); } } // End mandelbrot.c - EWG SDG
blake2sp.c
/* * Copyright (c) 2015-2018 Nexenta Systems, inc. * * This file is part of EdgeFS Project * (see https://github.com/Nexenta/edgefs). * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* BLAKE2 reference source code package - optimized C implementations Copyright 2012, Samuel Neves <[email protected]>. You may use this under the terms of the CC0, the OpenSSL Licence, or the Apache Public License 2.0, at your option. The terms of these licenses can be found at: - CC0 1.0 Universal : http://creativecommons.org/publicdomain/zero/1.0 - OpenSSL license : https://www.openssl.org/source/license.html - Apache 2.0 : http://www.apache.org/licenses/LICENSE-2.0 More information about the BLAKE2 hash function can be found at https://blake2.net. */ #include <stdlib.h> #include <string.h> #include <stdio.h> #if defined(_OPENMP) #include <omp.h> #endif #include "blake2.h" #include "blake2-impl.h" #define PARALLELISM_DEGREE 8 /* blake2sp_init_param defaults to setting the expecting output length from the digest_length parameter block field. In some cases, however, we do not want this, as the output length of these instances is given by inner_length instead. */ static int blake2sp_init_leaf_param( blake2s_state *S, const blake2s_param *P ) { int err = blake2s_init_param(S, P); S->outlen = P->inner_length; return err; } static int blake2sp_init_leaf( blake2s_state *S, size_t outlen, size_t keylen, uint64_t offset ) { blake2s_param P[1]; P->digest_length = (uint8_t)outlen; P->key_length = (uint8_t)keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; P->node_offset = offset; P->xof_length = 0; P->node_depth = 0; P->inner_length = BLAKE2S_OUTBYTES; memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2sp_init_leaf_param( S, P ); } static int blake2sp_init_root( blake2s_state *S, size_t outlen, size_t keylen ) { blake2s_param P[1]; P->digest_length = (uint8_t)outlen; P->key_length = (uint8_t)keylen; P->fanout = PARALLELISM_DEGREE; P->depth = 2; P->leaf_length = 0; P->node_offset = 0; P->xof_length = 0; P->node_depth = 1; P->inner_length = BLAKE2S_OUTBYTES; memset( P->salt, 0, sizeof( P->salt ) ); memset( P->personal, 0, sizeof( P->personal ) ); return blake2s_init_param( S, P ); } int blake2sp_init( blake2sp_state *S, size_t outlen ) { size_t i; if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; S->outlen = outlen; if( blake2sp_init_root( S->R, outlen, 0 ) < 0 ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S->S[i], outlen, 0, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; return 0; } int blake2sp_init_key( blake2sp_state *S, size_t outlen, const void *key, size_t keylen ) { size_t i; if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; if( !key || !keylen || keylen > BLAKE2S_KEYBYTES ) return -1; memset( S->buf, 0, sizeof( S->buf ) ); S->buflen = 0; S->outlen = outlen; if( blake2sp_init_root( S->R, outlen, keylen ) < 0 ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S->S[i], outlen, keylen, i ) < 0 ) return -1; S->R->last_node = 1; S->S[PARALLELISM_DEGREE - 1]->last_node = 1; { uint8_t block[BLAKE2S_BLOCKBYTES]; memset( block, 0, BLAKE2S_BLOCKBYTES ); memcpy( block, key, keylen ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->S[i], block, BLAKE2S_BLOCKBYTES ); secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */ } return 0; } int blake2sp_update( blake2sp_state *S, const void *pin, size_t inlen ) { const unsigned char * in = (const unsigned char *)pin; size_t left = S->buflen; size_t fill = sizeof( S->buf ) - left; size_t i; if( left && inlen >= fill ) { memcpy( S->buf + left, in, fill ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, BLAKE2S_BLOCKBYTES ); in += fill; inlen -= fill; left = 0; } #if defined(_OPENMP) #pragma omp parallel shared(S), num_threads(PARALLELISM_DEGREE) #else for( i = 0; i < PARALLELISM_DEGREE; ++i ) #endif { #if defined(_OPENMP) size_t i = omp_get_thread_num(); #endif size_t inlen__ = inlen; const unsigned char *in__ = ( const unsigned char * )in; in__ += i * BLAKE2S_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ) { blake2s_update( S->S[i], in__, BLAKE2S_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; } } in += inlen - inlen % ( PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ); inlen %= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; if( inlen > 0 ) memcpy( S->buf + left, in, inlen ); S->buflen = left + inlen; return 0; } int blake2sp_final( blake2sp_state *S, void *out, size_t outlen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES]; size_t i; if(out == NULL || outlen < S->outlen) { return -1; } for( i = 0; i < PARALLELISM_DEGREE; ++i ) { if( S->buflen > i * BLAKE2S_BLOCKBYTES ) { size_t left = S->buflen - i * BLAKE2S_BLOCKBYTES; if( left > BLAKE2S_BLOCKBYTES ) left = BLAKE2S_BLOCKBYTES; blake2s_update( S->S[i], S->buf + i * BLAKE2S_BLOCKBYTES, left ); } blake2s_final( S->S[i], hash[i], BLAKE2S_OUTBYTES ); } for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S->R, hash[i], BLAKE2S_OUTBYTES ); return blake2s_final( S->R, out, S->outlen ); } int blake2sp( void *out, size_t outlen, const void *in, size_t inlen, const void *key, size_t keylen ) { uint8_t hash[PARALLELISM_DEGREE][BLAKE2S_OUTBYTES]; blake2s_state S[PARALLELISM_DEGREE][1]; blake2s_state FS[1]; size_t i; /* Verify parameters */ if ( NULL == in && inlen > 0 ) return -1; if ( NULL == out ) return -1; if ( NULL == key && keylen > 0) return -1; if( !outlen || outlen > BLAKE2S_OUTBYTES ) return -1; if( keylen > BLAKE2S_KEYBYTES ) return -1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) if( blake2sp_init_leaf( S[i], outlen, keylen, i ) < 0 ) return -1; S[PARALLELISM_DEGREE - 1]->last_node = 1; /* mark last node */ if( keylen > 0 ) { uint8_t block[BLAKE2S_BLOCKBYTES]; memset( block, 0, BLAKE2S_BLOCKBYTES ); memcpy( block, key, keylen ); for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( S[i], block, BLAKE2S_BLOCKBYTES ); secure_zero_memory( block, BLAKE2S_BLOCKBYTES ); /* Burn the key from stack */ } #if defined(_OPENMP) #pragma omp parallel shared(S,hash), num_threads(PARALLELISM_DEGREE) #else for( i = 0; i < PARALLELISM_DEGREE; ++i ) #endif { #if defined(_OPENMP) size_t i = omp_get_thread_num(); #endif size_t inlen__ = inlen; const unsigned char *in__ = ( const unsigned char * )in; in__ += i * BLAKE2S_BLOCKBYTES; while( inlen__ >= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES ) { blake2s_update( S[i], in__, BLAKE2S_BLOCKBYTES ); in__ += PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; inlen__ -= PARALLELISM_DEGREE * BLAKE2S_BLOCKBYTES; } if( inlen__ > i * BLAKE2S_BLOCKBYTES ) { const size_t left = inlen__ - i * BLAKE2S_BLOCKBYTES; const size_t len = left <= BLAKE2S_BLOCKBYTES ? left : BLAKE2S_BLOCKBYTES; blake2s_update( S[i], in__, len ); } blake2s_final( S[i], hash[i], BLAKE2S_OUTBYTES ); } if( blake2sp_init_root( FS, outlen, keylen ) < 0 ) return -1; FS->last_node = 1; for( i = 0; i < PARALLELISM_DEGREE; ++i ) blake2s_update( FS, hash[i], BLAKE2S_OUTBYTES ); return blake2s_final( FS, out, outlen ); } #if defined(BLAKE2SP_SELFTEST) #include <string.h> #include "blake2-kat.h" int main( void ) { uint8_t key[BLAKE2S_KEYBYTES]; uint8_t buf[BLAKE2_KAT_LENGTH]; size_t i, step; for( i = 0; i < BLAKE2S_KEYBYTES; ++i ) key[i] = ( uint8_t )i; for( i = 0; i < BLAKE2_KAT_LENGTH; ++i ) buf[i] = ( uint8_t )i; /* Test simple API */ for( i = 0; i < BLAKE2_KAT_LENGTH; ++i ) { uint8_t hash[BLAKE2S_OUTBYTES]; blake2sp( hash, BLAKE2S_OUTBYTES, buf, i, key, BLAKE2S_KEYBYTES ); if( 0 != memcmp( hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES ) ) { goto fail; } } /* Test streaming API */ for(step = 1; step < BLAKE2S_BLOCKBYTES; ++step) { for (i = 0; i < BLAKE2_KAT_LENGTH; ++i) { uint8_t hash[BLAKE2S_OUTBYTES]; blake2sp_state S; uint8_t * p = buf; size_t mlen = i; int err = 0; if( (err = blake2sp_init_key(&S, BLAKE2S_OUTBYTES, key, BLAKE2S_KEYBYTES)) < 0 ) { goto fail; } while (mlen >= step) { if ( (err = blake2sp_update(&S, p, step)) < 0 ) { goto fail; } mlen -= step; p += step; } if ( (err = blake2sp_update(&S, p, mlen)) < 0) { goto fail; } if ( (err = blake2sp_final(&S, hash, BLAKE2S_OUTBYTES)) < 0) { goto fail; } if (0 != memcmp(hash, blake2sp_keyed_kat[i], BLAKE2S_OUTBYTES)) { goto fail; } } } puts( "ok" ); return 0; fail: puts("error"); return -1; } #endif
hmm.c
/* * Copyright (C) 2017 by Benedict Paten ([email protected]) * * Released under the MIT license, see LICENSE.txt */ #include "stRPHmm.h" // OpenMP #if defined(_OPENMP) #include <omp.h> #define CELL_BUFFER_SIZE 1000 #endif inline double logAddP(double a, double b, bool maxNotSum) { /* * Local function for doing addition of logs or (if doing Viterbi style calculation), to take the max. */ return maxNotSum ? (a > b ? a : b) : stMath_logAddExact(a, b); } /* * Functions for the read partitioning hmm object stRPHmm. */ void stRPHmmParameters_destruct(stRPHmmParameters *params) { free(params->hetSubModel); free(params->hetSubModelSlow); free(params->readErrorSubModel); free(params->readErrorSubModelSlow); free(params); } static void printMatrix(FILE *fH, double *matrixSlow, uint16_t *matrixFast) { for(int64_t i=0; i<ALPHABET_SIZE; i++) { fprintf(fH, "\t\t\t"); for(int64_t j=0; j<ALPHABET_SIZE; j++) { fprintf(fH, " %f, ", exp(matrixSlow[i*ALPHABET_SIZE + j])); } fprintf(fH, "\n"); } } double *getColumnBaseComposition(stRPColumn *column, int64_t pos) { /* * Get the observed counts for each base seen at a particular position in a column */ double *baseCounts = st_calloc(ALPHABET_SIZE, sizeof(double)); for (int64_t i=0; i<column->depth; i++) { stProfileSeq *seq = column->seqHeaders[i]; if (pos >= seq->refStart && pos < seq->length+seq->refStart) { for(int64_t j=0; j<ALPHABET_SIZE; j++) { baseCounts[j] += getProb(&(seq->profileProbs[(pos - seq->refStart) * ALPHABET_SIZE]), j); } } } return baseCounts; } double *getProfileSequenceBaseCompositionAtPosition(stSet *profileSeqs, int64_t pos) { /* * Get the expected count of each alphabet character in the profile sequences, returned * as an array. */ double *baseCounts = st_calloc(ALPHABET_SIZE, sizeof(double)); stSetIterator *it = stSet_getIterator(profileSeqs); stProfileSeq *pSeq; while((pSeq = stSet_getNext(it)) != NULL) { if (pos >= pSeq->refStart && pos < pSeq->refStart+pSeq->length) { for(int64_t j=0; j<ALPHABET_SIZE; j++) { baseCounts[j] += getProb(&(pSeq->profileProbs[(pos - pSeq->refStart)*ALPHABET_SIZE]), j); } } } return baseCounts; } void stRPHmmParameters_printParameters(stRPHmmParameters *params, FILE *fH) { /* * Print the parameters in the parameters object in a human readable form. */ fprintf(fH, "\tRead Partitioning HMM Parameters\n"); fprintf(fH, "\t\tAlphabet_size: %i\n" "\t\tMax_read coverage_depth: %" PRIi64 "\n" "\t\tMax_not sum transitions?: %i\n" "\t\tMax_partitions in a column of an HMM: %" PRIi64 "\n" "\t\tMin read coverage to support phasing between heterozygous sites: %" PRIi64 "\n", ALPHABET_SIZE, params->maxCoverageDepth, (int)params->maxNotSumTransitions, params->maxPartitionsInAColumn, params->minReadCoverageToSupportPhasingBetweenHeterozygousSites); fprintf(fH, "\t\tHeterozygous substitution rates:\n"); printMatrix(fH, params->hetSubModelSlow, params->hetSubModel); fprintf(fH, "\t\tRead error substitution rates:\n"); printMatrix(fH, params->readErrorSubModelSlow, params->readErrorSubModel); fprintf(fH, "\t\tIterations of parameter learning: %" PRIi64 "\n", params->trainingIterations); fprintf(fH, "\t\tInclude deletions as gap character? : %i\n", (int) params->gapCharactersForDeletions); fprintf(fH, "\t\tUse reference prior?: %i\n", (int) params->useReferencePrior); fprintf(fH, "\t\tFilter bad reads?: %i\n", (int)params->filterBadReads); fprintf(fH, "\t\tFilter match threshold: %f\n", params->filterMatchThreshold); fprintf(fH, "\t\tFilter reads with any of these sam flags set: %d\n", params->filterAReadWithAnyOneOfTheseSamFlagsSet); fprintf(fH, "\t\tInclude inverted partitions?: %i\n", (int) params->includeInvertedPartitions); fprintf(fH, "\t\tEstimate read error probs empirically?: %i\n", (int) params->estimateReadErrorProbsEmpirically); fprintf(fH, "\t\tFiltering likely homoygous sites? : %i\n", (int)params->filterLikelyHomozygousSites); fprintf(fH, "\t\tminSecondMostFrequentBaseFilter: %f\n", params->minSecondMostFrequentBaseFilter); fprintf(fH, "\t\tminSecondMostFrequentBaseLogProbFilter: %f\n", params->minSecondMostFrequentBaseLogProbFilter); fprintf(fH, "\t\tRounds of iterative refinement: %" PRIi64 "\n", params->roundsOfIterativeRefinement); fprintf(fH, "\t\tWriting gvcf? : %i\n", (int)params->writeGVCF); fprintf(fH, "\t\tVerbose Attributes:\n"); if (params->verboseTruePositives) fprintf(fH, "\t\t\tTRUE_POSITIVES\n"); if (params->verboseFalsePositives) fprintf(fH, "\t\t\tFALSE_POSITIVES\n"); if (params->verboseFalseNegatives) fprintf(fH, "\t\t\tFALSE_NEGATIVES\n"); } static void calculateReadErrorSubModel(double *readErrorSubModel, int64_t refStart, int64_t length, uint64_t *haplotypeSeq, stSet *reads) { /* * Returns a normalized substitution matrix estimating the probability of read error substitutions by ML. */ stSetIterator *readIt = stSet_getIterator(reads); stProfileSeq *pSeq; int64_t end = refStart + length; while((pSeq = stSet_getNext(readIt)) != NULL) { // Get the overlapping interval int64_t i = refStart > pSeq->refStart ? refStart : pSeq->refStart; int64_t j = end < pSeq->refStart + pSeq->length ? end : pSeq->refStart + pSeq->length; // For each pair of read and haplotype characters for(;i<j;i++) { // Check coordinates in bounds assert(i - refStart >= 0 && i-refStart < length); assert(i - pSeq->refStart >= 0 && i - pSeq->refStart < pSeq->length); int64_t hapChar = haplotypeSeq[i - refStart]; for(int64_t readChar=0; readChar<ALPHABET_SIZE; readChar++) { double probOfReadChar = getProb(&(pSeq->profileProbs[(i-pSeq->refStart) * ALPHABET_SIZE]), readChar); *getSubstitutionProbSlow(readErrorSubModel, hapChar, readChar) += probOfReadChar; } } } stSet_destructIterator(readIt); } void normaliseSubstitutionMatrix(double *subMatrix) { /* * Normalise matrix so that counts are converted to conditional probabilities of observing * derived character given source character. */ for(int64_t fromChar=0; fromChar<ALPHABET_SIZE; fromChar++) { double totalSubCount = 0.0; for(int64_t toChar=0; toChar<ALPHABET_SIZE; toChar++) { totalSubCount += *getSubstitutionProbSlow(subMatrix, fromChar, toChar); } for(int64_t toChar=0; toChar<ALPHABET_SIZE; toChar++) { double p = *getSubstitutionProbSlow(subMatrix, fromChar, toChar) / totalSubCount; *getSubstitutionProbSlow(subMatrix, fromChar, toChar) = p <= 0.0001 ? 0.0001 : p; } } } void stRPHmmParameters_setReadErrorSubstitutionParameters(stRPHmmParameters *params, double *readErrorSubModel) { /* * Set the substitution parameters of the read error substitution model from the given matrix. */ for(int64_t j=0; j<ALPHABET_SIZE; j++) { for(int64_t k=0; k<ALPHABET_SIZE; k++) { setSubstitutionProb(params->readErrorSubModel, params->readErrorSubModelSlow, j, k, *getSubstitutionProbSlow(readErrorSubModel, j, k)); } } } double *getEmptyReadErrorSubstitutionMatrix(stRPHmmParameters *params) { /* * Get an empty substitution matrix initializaed with the pseudo counts specified by params. */ double *readErrorSubModel = st_calloc(ALPHABET_SIZE * ALPHABET_SIZE, sizeof(double)); for(int64_t j=0; j<ALPHABET_SIZE*ALPHABET_SIZE; j++) { readErrorSubModel[j] = params->offDiagonalReadErrorPseudoCount; } for(int64_t j=0; j<ALPHABET_SIZE; j++) { readErrorSubModel[j*ALPHABET_SIZE + j] = params->onDiagonalReadErrorPseudoCount; } return readErrorSubModel; } void stRPHmmParameters_learnParameters(stRPHmmParameters *params, stList *profileSequences, stHash *referenceNamesToReferencePriors) { /* * Learn the substitution matrices iteratively, updating the params object in place. * Iterations is the number of cycles of stochastic parameter search to do. */ // For each iteration construct a set of HMMs and estimate the parameters from it. for(int64_t i=0; i<params->trainingIterations; i++) { st_logDebug("\tStarting training iteration %" PRIi64 "\n", i); // Substitution model for haplotypes to reads double *readErrorSubModel = getEmptyReadErrorSubstitutionMatrix(params); stList *hmms = getRPHmms(profileSequences, referenceNamesToReferencePriors, params); for(int64_t j=0; j<stList_length(hmms); j++) { stRPHmm *hmm = stList_get(hmms, j); // Run the forward-backward algorithm stRPHmm_forwardBackward(hmm); // Now compute a high probability path through the hmm stList *path = stRPHmm_forwardTraceBack(hmm); // Compute the genome fragment stGenomeFragment *gF = stGenomeFragment_construct(hmm, path); // Get partitioned sequences stSet *reads1 = stRPHmm_partitionSequencesByStatePath(hmm, path, 1); stSet *reads2 = stRPHmm_partitionSequencesByStatePath(hmm, path, 0); // Estimate the read error substitution parameters calculateReadErrorSubModel(readErrorSubModel, gF->refStart, gF->length, gF->haplotypeString1, reads1); calculateReadErrorSubModel(readErrorSubModel, gF->refStart, gF->length, gF->haplotypeString2, reads2); // Cleanup stSet_destruct(reads1); stSet_destruct(reads2); stGenomeFragment_destruct(gF); stList_destruct(path); } // Cleanup stList_destruct(hmms); // Normalise the probabilities normaliseSubstitutionMatrix(readErrorSubModel); // Update the read error substitution parameters of the parameters object stRPHmmParameters_setReadErrorSubstitutionParameters(params, readErrorSubModel); // Cleanup free(readErrorSubModel); //Log the parameters info if(st_getLogLevel() == debug) { st_logDebug("\tParameters learned after iteration %" PRIi64 " of training:\n", i); stRPHmmParameters_printParameters(params, stderr); } } } static int cmpint64(int64_t i, int64_t j) { return i > j ? 1 : i < j ? -1 : 0; } inline int stRPHmm_cmpFn(const void *a, const void *b) { /* * Compares two read partitioning HMMs by coordinate on the reference. * Will return equal only if they are the same HMM, with the same memory * address, otherwise compares pointers for equal HMMs. * */ stRPHmm *hmm1 = (stRPHmm *)a, *hmm2 = (stRPHmm *)b; int i = strcmp(hmm1->referenceName, hmm2->referenceName); if(i == 0) { i = cmpint64(hmm1->refStart, hmm2->refStart); if(i == 0) { // Sort by descending order of length i = cmpint64(hmm2->refLength, hmm1->refLength); if(i == 0) { i = hmm1 > hmm2 ? 1 : (hmm1 < hmm2 ? -1 : 0); } } } return i; } stRPHmm *stRPHmm_construct(stProfileSeq *profileSeq, stReferencePriorProbs *referencePriorProbs, stRPHmmParameters *params) { /* * Create a read partitioning HMM representing the single sequence profile. */ stRPHmm *hmm = st_calloc(1, sizeof(stRPHmm)); // Set reference coordinates hmm->referenceName = stString_copy(profileSeq->referenceName); hmm->refStart = profileSeq->refStart; hmm->refLength = profileSeq->length; // Add the single profile sequence to the list of the hmm's sequences hmm->profileSeqs = stList_construct(); stList_append(hmm->profileSeqs, profileSeq); hmm->parameters = params; // Parameters for the model for computation, this is shared by different HMMs hmm->referencePriorProbs = referencePriorProbs; assert(stString_eq(hmm->referenceName, referencePriorProbs->referenceName)); assert(hmm->refStart >= referencePriorProbs->refStart); assert(hmm->refStart + hmm->refLength <= referencePriorProbs->refStart + referencePriorProbs->length); hmm->columnNumber = 1; // The number of columns in the model, initially just 1 hmm->maxDepth = 1; // The maximum number of states in a column, initially just 1 // Create the first column of the model stProfileSeq **seqHeaders = st_malloc(sizeof(stProfileSeq *)); seqHeaders[0] = profileSeq; uint8_t **seqs = st_malloc(sizeof(uint8_t *)); seqs[0] = profileSeq->profileProbs; stRPColumn *column = stRPColumn_construct(hmm->refStart, hmm->refLength, 1, seqHeaders, seqs, referencePriorProbs); hmm->firstColumn = column; hmm->lastColumn = column; // Add two cells to the column to represent the two possible partitions of the single profile sequence stRPCell *cell = stRPCell_construct(1); column->head = cell; cell->nCell = stRPCell_construct(0); return hmm; } void stRPHmm_destruct(stRPHmm *hmm, bool destructColumns) { /* * Free memory owned by the hmm, including columns. */ free(hmm->referenceName); stList_destruct(hmm->profileSeqs); if(destructColumns) { // Cleanup the columns of the hmm stRPColumn *column = hmm->firstColumn; while(1) { stRPMergeColumn *mColumn = column->nColumn; stRPColumn_destruct(column); if(mColumn == NULL) { break; } column = mColumn->nColumn; stRPMergeColumn_destruct(mColumn); } } free(hmm); } void stRPHmm_destruct2(stRPHmm *hmm) { /* * Cleans up hmm and columns */ stRPHmm_destruct(hmm, 1); } stList *stRPHmm_forwardTraceBack(stRPHmm *hmm) { /* * Traces back through the forward matrix picking the most probable path. * (yes, this is non-symmetric) * Returns the result as a list of cells, one from each column. */ stList *path = stList_construct(); stRPColumn *column = hmm->lastColumn; // Pick cell in the last column with highest probability stRPCell *cell = column->head; double maxProb = cell->forwardLogProb; stRPCell *maxCell = cell; while((cell = cell->nCell) != NULL) { if(cell->forwardLogProb > maxProb) { maxProb = cell->forwardLogProb; maxCell = cell; } } stList_append(path, maxCell); // Add chosen cell to output // Walk back through previous columns while(column->pColumn != NULL) { // Get previous merge cell stRPMergeCell *mCell = stRPMergeColumn_getPreviousMergeCell(maxCell, column->pColumn); assert(mCell != NULL); // Switch to previous column column = column->pColumn->pColumn; // Walk through cells in the previous column to find the one with the // highest forward probability that transitions to maxCell cell = column->head; maxCell = NULL; maxProb = ST_MATH_LOG_ZERO; do { // If compatible and has greater probability if(stRPMergeColumn_getNextMergeCell(cell, column->nColumn) == mCell && cell->forwardLogProb > maxProb) { maxProb = cell->forwardLogProb; maxCell = cell; } } while((cell = cell->nCell) != NULL); assert(maxCell != NULL); stList_append(path, maxCell); } stList_reverse(path); // So cells go in order return path; } stSet *stRPHmm_partitionSequencesByStatePath(stRPHmm *hmm, stList *path, bool partition1) { /* * For an hmm and path through the hmm (e.g. computed with stRPHmm_forwardTraceBack) returns the * set of sequences in the hmm that are predicted to come from one given haplotype. */ stSet *seqsInHap1 = stSet_construct(); // For each cell/column pair stRPColumn *column = hmm->firstColumn; for(int64_t i=0; i<stList_length(path); i++) { stRPCell *cell = stList_get(path, i); // Get sequences in first or second partition for(int64_t j=0; j<column->depth; j++) { if((seqInHap1(cell->partition, j) && partition1) || (!seqInHap1(cell->partition, j) && !partition1)) { stSet_insert(seqsInHap1, column->seqHeaders[j]); // todo add to readHaplotypes } } if(column->nColumn != NULL) { column = column->nColumn->nColumn; } } return seqsInHap1; } void stRPHmm_print(stRPHmm *hmm, FILE *fileHandle, bool includeColumns, bool includeCells) { /* * Prints a debug friendly representation of the state of an hmm. */ //Header line fprintf(fileHandle, "HMM REF_NAME: %s REF_START: %" PRIi64 " REF_LENGTH %" PRIi64 " COLUMN_NUMBER %" PRIi64 " MAX_DEPTH: %" PRIi64 " FORWARD_PROB: %f BACKWARD_PROB: %f\n", hmm->referenceName, hmm->refStart, hmm->refLength, hmm->columnNumber, hmm->maxDepth, (float)hmm->forwardLogProb, (float)hmm->backwardLogProb); if(includeColumns) { stRPColumn *column = hmm->firstColumn; int64_t i=0; while(1) { fprintf(fileHandle, "Column %" PRIi64 "\n", i++); // Print the column stRPColumn_print(column, fileHandle, includeCells); if(column->nColumn == NULL) { break; } // Print the merge column stRPMergeColumn_print(column->nColumn, fileHandle, includeCells); column = column->nColumn->nColumn; } } } stRPHmm *stRPHmm_fuse(stRPHmm *leftHmm, stRPHmm *rightHmm) { /* * Fuses together two hmms, such that leftHmm and rightHMM * are on the same reference sequence and non-overlapping and * left hmm precedes right hmm on the reference sequence. * Returns fused hmm, destroys input hmms in the process. */ // Checks if(!stString_eq(leftHmm->referenceName, rightHmm->referenceName)) { st_errAbort("Attempting to fuse two hmms not on the same reference sequence"); } if(stRPHmm_overlapOnReference(leftHmm, rightHmm)) { st_errAbort("Attemping to fuse two hmms that overlap in reference coordinates"); } if(leftHmm->refStart >= rightHmm->refStart) { st_errAbort("Left hmm does not precede right hmm in reference coordinates for merge"); } // Create a new empty hmm stRPHmm *hmm = st_malloc(sizeof(stRPHmm)); // Set the reference interval hmm->referenceName = stString_copy(leftHmm->referenceName); hmm->refStart = leftHmm->refStart; hmm->refLength = rightHmm->refStart + rightHmm->refLength - leftHmm->refStart; // Create the combined list of profile seqs hmm->profileSeqs = stList_copy(leftHmm->profileSeqs, NULL); stList_appendAll(hmm->profileSeqs, rightHmm->profileSeqs); // Set column number hmm->columnNumber = leftHmm->columnNumber + rightHmm->columnNumber; // Max depth hmm->maxDepth = leftHmm->maxDepth > rightHmm->maxDepth ? leftHmm->maxDepth : rightHmm->maxDepth; // Parameters if(leftHmm->parameters != rightHmm->parameters) { st_errAbort("HMM parameters differ in fuse function, panic."); } hmm->parameters = leftHmm->parameters; // Set reference position prior probabilities if(leftHmm->referencePriorProbs != rightHmm->referencePriorProbs) { st_errAbort("Hmm reference prior probs differ in fuse function, panic."); } hmm->referencePriorProbs = leftHmm->referencePriorProbs; // Make columns to fuse left hmm and right hmm's columns stRPMergeColumn *mColumn = stRPMergeColumn_construct(0, 0); // Links leftHmm->lastColumn->nColumn = mColumn; mColumn->pColumn = leftHmm->lastColumn; // Add merge cell to connect the cells in the two columns stRPMergeCell_construct(0, 0, mColumn); int64_t gapLength = rightHmm->refStart - (leftHmm->refStart + leftHmm->refLength); assert(gapLength >= 0); if(gapLength > 0) { // Make column in the gap stRPColumn *column = stRPColumn_construct(leftHmm->refStart + leftHmm->refLength, gapLength, 0, NULL, NULL, hmm->referencePriorProbs); // Links mColumn->nColumn = column; column->pColumn = mColumn; // Make cell for empty column column->head = stRPCell_construct(0); // Add right merge column mColumn = stRPMergeColumn_construct(0, 0); // Add merge cell to connect the cells in the two columns stRPMergeCell_construct(0, 0, mColumn); // Links column->nColumn = mColumn; mColumn->pColumn = column; // Increase the column number to account for the introduced gap column hmm->columnNumber += 1; } mColumn->nColumn = rightHmm->firstColumn; rightHmm->firstColumn->pColumn = mColumn; // Initialise first/last columns of fused hmm hmm->firstColumn = leftHmm->firstColumn; hmm->lastColumn = rightHmm->lastColumn; // Cleanup stRPHmm_destruct(leftHmm, 0); stRPHmm_destruct(rightHmm, 0); return hmm; } void stRPHmm_alignColumns(stRPHmm *hmm1, stRPHmm *hmm2) { /* * Align the input hmms, modifying them in place, so that they each * (1) span the same reference interval, * (2) have the same number of columns, and * (3) so that for all i, column i in each model span the same interval. */ assert(hmm1 != hmm2); // If the two hmms don't overlap in reference space then complain if(!stRPHmm_overlapOnReference(hmm1, hmm2)) { st_errAbort("Attempting to align two HMMs that do not overlap in reference coordinate space"); } // If hmm1 starts after hmm2 then call the other way around if(hmm1->refStart > hmm2->refStart) { stRPHmm_alignColumns(hmm2, hmm1); return; } // If hmm1 starts before hmm2 add an empty prefix interval to hmm2 // so they have the same start coordinate if(hmm1->refStart < hmm2->refStart) { // Create column stRPColumn *column = stRPColumn_construct(hmm1->refStart, hmm2->refStart - hmm1->refStart, 0, NULL, NULL, hmm1->referencePriorProbs); // Add cell column->head = stRPCell_construct(0); // Create merge column stRPMergeColumn *mColumn = stRPMergeColumn_construct(0,0); // Add merge cell stRPMergeCell_construct(0, 0, mColumn); // Create links hmm2->firstColumn->pColumn = mColumn; mColumn->nColumn = hmm2->firstColumn; mColumn->pColumn = column; column->nColumn = mColumn; assert(column->pColumn == NULL); hmm2->firstColumn = column; //Adjust start and length of hmm2 interval hmm2->refLength += hmm2->refStart - hmm1->refStart; hmm2->refStart = hmm1->refStart; // Increase column number hmm2->columnNumber++; } // If hmm1 has a shorter reference interval length than hmm2 then call the function // with the hmms reversed. if(hmm1->refLength < hmm2->refLength) { stRPHmm_alignColumns(hmm2, hmm1); return; } // If hmm1 has a longer reference interval than hmm2 append an empty suffix // interval to hmm2 to make them the same length. if(hmm1->refLength > hmm2->refLength) { // Create column stRPColumn *column = stRPColumn_construct(hmm2->lastColumn->refStart + hmm2->lastColumn->length, hmm1->refLength - hmm2->refLength, 0, NULL, NULL, hmm1->referencePriorProbs); // Add cell column->head = stRPCell_construct(0); // Create merge column stRPMergeColumn *mColumn = stRPMergeColumn_construct(0, 0); // Add merge cell stRPMergeCell_construct(0, 0, mColumn); // Create links hmm2->lastColumn->nColumn = mColumn; mColumn->pColumn = hmm2->lastColumn; mColumn->nColumn = column; column->pColumn = mColumn; assert(column->nColumn == NULL); hmm2->lastColumn = column; //Adjust start and length of hmm2 interval hmm2->refLength = hmm1->refLength; // Increase column number hmm2->columnNumber++; } // Quick coordinate checks assert(hmm1->refStart == hmm2->refStart); assert(hmm1->refLength == hmm2->refLength); assert(hmm1->firstColumn->refStart == hmm1->refStart); assert(hmm2->firstColumn->refStart == hmm2->refStart); assert(hmm1->lastColumn->refStart + hmm1->lastColumn->length == hmm1->refStart + hmm1->refLength); assert(hmm2->lastColumn->refStart + hmm2->lastColumn->length == hmm2->refStart + hmm2->refLength); // At this point both hmms have the same reference interval // While one hmm has a shorter reference interval than the other split the other interval // otherwise move on to the next stRPColumn *column1 = hmm1->firstColumn; stRPColumn *column2 = hmm2->firstColumn; while(1) { assert(column1->refStart == column2->refStart); if(column1->length > column2->length) { stRPColumn_split(column1, column2->length, hmm1); assert(column1->nColumn->nColumn->refStart == column1->refStart + column2->length); } else if(column1->length < column2->length) { stRPColumn_split(column2, column1->length, hmm2); } assert(column1->refStart == column2->refStart); assert(column1->length == column2->length); // Now have equal length/start // There are no more columns, so break if(column1->nColumn == NULL) { assert(hmm1->lastColumn == column1); assert(column2->nColumn == NULL); assert(hmm2->lastColumn == column2); break; } column1 = column1->nColumn->nColumn; assert(column2->nColumn != NULL); column2 = column2->nColumn->nColumn; assert(column1 != NULL); assert(column2 != NULL); } assert(hmm1->columnNumber == hmm2->columnNumber); } static uint64_t intHashFn(const void *a) { return *(uint64_t *)a; } static int intEqualsFn(const void *key1, const void *key2) { return *(uint64_t *)key1 == *(uint64_t *)key2; } stRPCell **makeCell(uint64_t partition, stRPCell **pCell, stHash *seen) { /* * Make a cell for a column. */ // Make the cell stRPCell *cell = stRPCell_construct(partition); // Add the partition to those already seen assert(stHash_search(seen, &cell->partition) == NULL); stHash_insert(seen, &cell->partition, cell); // Link cells *pCell = cell; return &cell->nCell; } stRPHmm *stRPHmm_createCrossProductOfTwoAlignedHmm(stRPHmm *hmm1, stRPHmm *hmm2) { /* * For two aligned hmms (see stRPHmm_alignColumns) returns a new hmm that represents the * cross product of all the states of the two input hmms. */ // Do sanity checks that the two hmms have been aligned if(!stString_eq(hmm1->referenceName, hmm2->referenceName)) { st_errAbort("Trying to create cross product of two HMMs " "on different reference sequences"); } if(hmm1->refStart != hmm2->refStart) { st_errAbort("Trying to create cross product of two HMMs " "with different reference interval starts"); } if(hmm1->refLength != hmm2->refLength) { st_errAbort("Trying to create cross product of two HMMs " "with different reference interval length"); } if(hmm1->columnNumber != hmm2->columnNumber) { st_errAbort("Trying to create cross product of two HMMs " "with different column numbers"); } // Create a new empty hmm stRPHmm *hmm = st_calloc(1, sizeof(stRPHmm)); // Set the reference interval hmm->referenceName = stString_copy(hmm1->referenceName); hmm->refStart = hmm1->refStart; hmm->refLength = hmm1->refLength; // Create the combined list of profile seqs hmm->profileSeqs = stList_copy(hmm1->profileSeqs, NULL); stList_appendAll(hmm->profileSeqs, hmm2->profileSeqs); // Set column number hmm->columnNumber = hmm1->columnNumber; // Set substitution matrices if(hmm1->parameters != hmm2->parameters) { st_errAbort("Hmm parameters differ in fuse function, panic."); } hmm->parameters = hmm1->parameters; // Set reference position prior probabilities if(hmm1->referencePriorProbs != hmm2->referencePriorProbs) { st_errAbort("Hmm reference prior probs differ in hmm cross product function, panic."); } hmm->referencePriorProbs = hmm1->referencePriorProbs; // For each pair of corresponding columns stRPColumn *column1 = hmm1->firstColumn; stRPColumn *column2 = hmm2->firstColumn; assert(column1 != NULL); assert(column2 != NULL); stRPMergeColumn *mColumn = NULL; while(1) { // Check columns aligned assert(column1->refStart == column2->refStart); assert(column1->length == column2->length); // Create the new column // Depth int64_t newColumnDepth = column1->depth+column2->depth; if(newColumnDepth > hmm->maxDepth) { hmm->maxDepth = newColumnDepth; } // Seq headers stProfileSeq **seqHeaders = st_malloc(sizeof(stProfileSeq *) * newColumnDepth); memcpy(seqHeaders, column1->seqHeaders, sizeof(stProfileSeq *) * column1->depth); memcpy(&seqHeaders[column1->depth], column2->seqHeaders, sizeof(stProfileSeq *) * column2->depth); // Profiles uint8_t **seqs = st_malloc(sizeof(uint8_t *) * newColumnDepth); memcpy(seqs, column1->seqs, sizeof(uint8_t *) * column1->depth); memcpy(&seqs[column1->depth], column2->seqs, sizeof(uint8_t *) * column2->depth); stRPColumn *column = stRPColumn_construct(column1->refStart, column1->length, newColumnDepth, seqHeaders, seqs, hmm->referencePriorProbs); // If the there is a previous column if(mColumn != NULL) { mColumn->nColumn = column; column->pColumn = mColumn; } else { hmm->firstColumn = column; assert(column->pColumn == NULL); } // Create cross product of columns stRPCell **pCell = &column->head; stRPCell *cell1 = column1->head; // includeInvertedPartitions forces that the partition and its inverse are included // in the resulting combine hmm. if(hmm->parameters->includeInvertedPartitions) { stHash *seen = stHash_construct3(intHashFn, intEqualsFn, NULL, NULL); do { stRPCell *cell2 = column2->head; do { uint64_t partition = mergePartitionsOrMasks(cell1->partition, cell2->partition, column1->depth, column2->depth); // We have not seen the combined partition before if(stHash_search(seen, &partition) == NULL) { // Add the partition to the column pCell = makeCell(partition, pCell, seen); // Check if the column has non-zero depth and only add the inverse partition if it does // because if zero length the inverse partition is the same as for the forward, and therefore // a duplicate if(newColumnDepth > 0) { uint64_t invertedPartition = invertPartition(partition, newColumnDepth); assert(stHash_search(seen, &invertedPartition) == NULL); pCell = makeCell(invertedPartition, pCell, seen); } } } while((cell2 = cell2->nCell) != NULL); } while((cell1 = cell1->nCell) != NULL); // Cleanup stHash_destruct(seen); } // If not forcing symmetry else { do { stRPCell *cell2 = column2->head; do { stRPCell *cell = stRPCell_construct(mergePartitionsOrMasks(cell1->partition, cell2->partition, column1->depth, column2->depth)); // Link cells *pCell = cell; pCell = &cell->nCell; } while((cell2 = cell2->nCell) != NULL); } while((cell1 = cell1->nCell) != NULL); } // Get the next merged column stRPMergeColumn *mColumn1 = column1->nColumn; stRPMergeColumn *mColumn2 = column2->nColumn; // If column is NULL, we have reached the last column // and we can exit if(mColumn1 == NULL) { assert(mColumn2 == NULL); assert(hmm1->lastColumn == column1); assert(hmm2->lastColumn == column2); // Set the last column pointer hmm->lastColumn = column; break; } // Create new merged column uint64_t fromMask = mergePartitionsOrMasks(mColumn1->maskFrom, mColumn2->maskFrom, mColumn1->pColumn->depth, mColumn2->pColumn->depth); uint64_t toMask = mergePartitionsOrMasks(mColumn1->maskTo, mColumn2->maskTo, mColumn1->nColumn->depth, mColumn2->nColumn->depth); assert(popcount64(fromMask) == popcount64(toMask)); mColumn = stRPMergeColumn_construct(fromMask, toMask); // Connect links mColumn->pColumn = column; column->nColumn = mColumn; // Create cross product of merged columns stHashIterator *cellIt1 = stHash_getIterator(mColumn1->mergeCellsFrom); stRPMergeCell *mCell1; while((mCell1 = stHash_getNext(cellIt1)) != NULL) { stHashIterator *cellIt2 = stHash_getIterator(mColumn2->mergeCellsFrom); stRPMergeCell *mCell2; while((mCell2 = stHash_getNext(cellIt2)) != NULL) { uint64_t fromPartition = mergePartitionsOrMasks(mCell1->fromPartition, mCell2->fromPartition, mColumn1->pColumn->depth, mColumn2->pColumn->depth); uint64_t toPartition = mergePartitionsOrMasks(mCell1->toPartition, mCell2->toPartition, mColumn1->nColumn->depth, mColumn2->nColumn->depth); assert(popcount64(fromPartition) == popcount64(toPartition)); // includeInvertedPartitions forces that the partition and its inverse are included // in the resulting combined hmm. if(hmm->parameters->includeInvertedPartitions) { if(stHash_search(mColumn->mergeCellsFrom, &fromPartition) == NULL) { stRPMergeCell_construct(fromPartition, toPartition, mColumn); // If the mask includes no sequences then the the inverted will be identical, so we check // to avoid adding the same partition twice if(popcount64(fromMask) > 0) { uint64_t invertedFromPartition = mColumn->maskFrom & invertPartition(fromPartition, mColumn1->pColumn->depth + mColumn2->pColumn->depth); uint64_t invertedToPartition = mColumn->maskTo & invertPartition(toPartition, mColumn1->nColumn->depth + mColumn2->nColumn->depth); stRPMergeCell_construct(invertedFromPartition, invertedToPartition, mColumn); } } } else { stRPMergeCell_construct(fromPartition, toPartition, mColumn); } } stHash_destructIterator(cellIt2); } stHash_destructIterator(cellIt1); // Get next column column1 = mColumn1->nColumn; column2 = mColumn2->nColumn; assert(column1 != NULL); assert(column2 != NULL); } return hmm; } static void stRPHmm_initialiseProbs(stRPHmm *hmm) { /* * Initialize the forward and backward matrices. */ // Initialize total forward and backward probabilities hmm->forwardLogProb = ST_MATH_LOG_ZERO; hmm->backwardLogProb = ST_MATH_LOG_ZERO; // Iterate through columns from first to last stRPColumn *column = hmm->firstColumn; while(1) { // Set total log prob column->totalLogProb = ST_MATH_LOG_ZERO; // Initialise cells in the column stRPCell *cell = column->head; do { cell->forwardLogProb = ST_MATH_LOG_ZERO; cell->backwardLogProb = ST_MATH_LOG_ZERO; } while((cell = cell->nCell) != NULL); if(column->nColumn == NULL) { break; } // Initialise cells in the next merge column stList *mergeCells = stHash_getValues(column->nColumn->mergeCellsFrom); for(int64_t i=0; i<stList_length(mergeCells); i++) { stRPMergeCell *mergeCell = stList_get(mergeCells, i); mergeCell->forwardLogProb = ST_MATH_LOG_ZERO; mergeCell->backwardLogProb = ST_MATH_LOG_ZERO; } stList_destruct(mergeCells); column = column->nColumn->nColumn; } } static inline void forwardCellCalc1(stRPHmm *hmm, stRPColumn *column, stRPCell *cell, uint64_t *bitCountVectors) { // If the previous merge column exists then propagate forward probability from merge state if(column->pColumn != NULL) { stRPMergeCell *mCell = stRPMergeColumn_getPreviousMergeCell(cell, column->pColumn); cell->forwardLogProb = mCell->forwardLogProb; } // Otherwise initialize probability with log(1.0) else { cell->forwardLogProb = ST_MATH_LOG_ONE; } // Calculate the emission prob double emissionProb = emissionLogProbability(column, cell, bitCountVectors, hmm->referencePriorProbs, (stRPHmmParameters *)hmm->parameters); // Add emission prob to forward log prob cell->forwardLogProb += emissionProb; // Store the emission probability for the cell in the backwardLogProb field temporarily // (is corrected during the backward pass) cell->backwardLogProb = emissionProb; } static inline void forwardCellCalc2(stRPHmm *hmm, stRPColumn *column, stRPCell *cell) { // If the next merge column exists then propagate forward probability to the merge state if (column->nColumn != NULL) { // Add to the next merge cell stRPMergeCell *mCell = stRPMergeColumn_getNextMergeCell(cell, column->nColumn); mCell->forwardLogProb = logAddP(mCell->forwardLogProb, cell->forwardLogProb, hmm->parameters->maxNotSumTransitions); } else { // Else propagate probability to total forward probability of model hmm->forwardLogProb = logAddP(hmm->forwardLogProb, cell->forwardLogProb, hmm->parameters->maxNotSumTransitions); } } static void stRPHmm_forward(stRPHmm *hmm) { /* * Forward algorithm for hmm. */ stRPColumn *column = hmm->firstColumn; // Iterate through columns from first to last while(1) { // Get the bit count vectors for the column uint64_t *bitCountVectors = calculateCountBitVectors(column->seqs, column->depth, column->activePositions, column->totalActivePositions); // Iterate through states in column stRPCell *cell = column->head; // If OpenMP is available then parallelize the calculation of the emission calcs #if defined(_OPENMP) stRPCell *cells[CELL_BUFFER_SIZE]; do { // Get as many cells as the buffer will fit / there are cells int64_t cellsInBuffer=0; do { cells[cellsInBuffer++] = cell; } while((cell = cell->nCell) != NULL && cellsInBuffer < CELL_BUFFER_SIZE); #pragma omp parallel { #pragma omp for for(int64_t i=0; i<cellsInBuffer; i++) { forwardCellCalc1(hmm, column, cells[i], bitCountVectors); } } for(int64_t i=0; i<cellsInBuffer; i++) { forwardCellCalc2(hmm, column, cells[i]); } } while(cell != NULL); #else // Otherwise do it without the need for the cell buffer do { forwardCellCalc1(hmm, column, cell, bitCountVectors); forwardCellCalc2(hmm, column, cell); } while((cell = cell->nCell) != NULL); #endif // Cleanup the bit count vectors free(bitCountVectors); if(column->nColumn == NULL) { break; } column = column->nColumn->nColumn; } } static inline void backwardCellCalc(stRPHmm *hmm, stRPColumn *column, stRPCell *cell) { // Retrieve the emission probability that was stored by the forward pass double probabilityToPropagateLogProb = cell->backwardLogProb; // If the next merge column exists then propagate backward probability from merge state if(column->nColumn != NULL) { stRPMergeCell *mCell = stRPMergeColumn_getNextMergeCell(cell, column->nColumn); cell->backwardLogProb = mCell->backwardLogProb; probabilityToPropagateLogProb += mCell->backwardLogProb; } else { // Else set the backward prob to log(1) cell->backwardLogProb = ST_MATH_LOG_ONE; } // If the previous merge column exists then propagate backward probability to the merge state if(column->pColumn != NULL) { // Add to the previous merge cell stRPMergeCell *mCell = stRPMergeColumn_getPreviousMergeCell(cell, column->pColumn); mCell->backwardLogProb = logAddP(mCell->backwardLogProb, probabilityToPropagateLogProb, hmm->parameters->maxNotSumTransitions); } else { hmm->backwardLogProb = logAddP(hmm->backwardLogProb, probabilityToPropagateLogProb, hmm->parameters->maxNotSumTransitions); } // Add to column total probability column->totalLogProb = logAddP(column->totalLogProb, cell->forwardLogProb + cell->backwardLogProb, hmm->parameters->maxNotSumTransitions); } static void stRPHmm_backward(stRPHmm *hmm) { /* * Backward algorithm for hmm. */ stRPColumn *column = hmm->lastColumn; // Iterate through columns from last to first while(1) { // Iterate through states in column stRPCell *cell = column->head; do { backwardCellCalc(hmm, column, cell); } while((cell = cell->nCell) != NULL); if(column->pColumn == NULL) { break; } column = column->pColumn->pColumn; } } void stRPHmm_forwardBackward(stRPHmm *hmm) { /* * Runs the forward and backward algorithms and sets the total column probabilities. * * This function must be run upon an HMM to calculate cell posterior probabilities. */ // Initialise state values stRPHmm_initialiseProbs(hmm); // Run the forward and backward passes stRPHmm_forward(hmm); stRPHmm_backward(hmm); } static int cellCmpFn(const void *a, const void *b, const void *extraArg) { /* * Sort cells by posterior probability in descending order. */ stRPCell *cell1 = (stRPCell *)a, *cell2 = (stRPCell *)b; stRPColumn *column = (stRPColumn *)extraArg; double p1 = stRPCell_posteriorProb(cell1, column), p2 = stRPCell_posteriorProb(cell2, column); return p1 > p2 ? -1 : p1 < p2 ? 1 : 0; } static int mergeCellCmpFn(const void *a, const void *b, const void *extraArg) { /* * Sort merge cells by posterior probability in descending order. */ stRPMergeCell *cell1 = (stRPMergeCell *)a, *cell2 = (stRPMergeCell *)b; stRPMergeColumn *column = (stRPMergeColumn *)extraArg; double p1 = stRPMergeCell_posteriorProb(cell1, column), p2 = stRPMergeCell_posteriorProb(cell2, column); return p1 > p2 ? -1 : p1 < p2 ? 1 : 0; } void filterMergeCells(stRPMergeColumn *mColumn, stSet *chosenMergeCellsSet) { /* * Removes merge cells from the column that are not in chosenMergeCellsSet */ assert(stSet_size(chosenMergeCellsSet) > 0); stList *mergeCells = stHash_getValues(mColumn->mergeCellsFrom); for(int64_t i=0; i<stList_length(mergeCells); i++) { stRPMergeCell *mCell = stList_get(mergeCells, i); assert(mCell != NULL); if(stSet_search(chosenMergeCellsSet, mCell) == NULL) { // Remove the state from the merge column assert(stHash_search(mColumn->mergeCellsFrom, &(mCell->fromPartition)) == mCell); assert(stHash_search(mColumn->mergeCellsTo, &(mCell->toPartition)) == mCell); stHash_remove(mColumn->mergeCellsFrom, &(mCell->fromPartition)); stHash_remove(mColumn->mergeCellsTo, &(mCell->toPartition)); // Cleanup stRPMergeCell_destruct(mCell); } } stList_destruct(mergeCells); assert(stSet_size(chosenMergeCellsSet) == stHash_size(mColumn->mergeCellsFrom)); assert(stSet_size(chosenMergeCellsSet) == stHash_size(mColumn->mergeCellsTo)); } stSet *getLinkedMergeCells(stRPMergeColumn *mColumn, stRPMergeCell *(*getNCell)(stRPCell *, stRPMergeColumn *), stList *cells) { /* * Returns the set of merge cells in the column that are linked to a cell * in cells. */ stSet *chosenMergeCellsSet = stSet_construct(); for(int64_t i=0; i<stList_length(cells); i++) { stRPMergeCell *mCell = getNCell(stList_get(cells, i), mColumn); assert(mCell != NULL); stSet_insert(chosenMergeCellsSet, mCell); } assert(stSet_size(chosenMergeCellsSet) > 0); return chosenMergeCellsSet; } void relinkCells(stRPColumn *column, stList *cells) { /* * Re-links the cells in the list 'cells' to make up the list of cells in the column. */ stRPCell **pCell = &column->head; // Pointer to previous cell, used to // remove cells from the linked list for(int64_t i=0; i<stList_length(cells); i++) { stRPCell *cell = stList_get(cells, i); *pCell = cell; pCell = &cell->nCell; } *pCell = NULL; assert(column->head != NULL); } stList *getLinkedCells(stRPColumn *column, stRPMergeCell *(*getPCell)(stRPCell *, stRPMergeColumn *), stRPMergeColumn *mColumn) { /* * Returns the set of cells in column that are linked to a cell in mColumn. */ // Put cells into an array and sort by descending posterior prob // only keeping cells that still have a preceding merge cell stList *cells = stList_construct(); stRPCell *cell = column->head; do { if(mColumn == NULL || getPCell(cell, mColumn) != NULL) { stList_append(cells, cell); cell = cell->nCell; } else { stRPCell *nCell = cell->nCell; stRPCell_destruct(cell); cell = nCell; } } while(cell != NULL); stList_sort2(cells, cellCmpFn, column); assert(stList_length(cells) > 0); return cells; } void stRPHmm_pruneForwards(stRPHmm *hmm) { /* * Remove cells from hmm whos posterior probability is below the given threshold */ // For each column stRPColumn *column = hmm->firstColumn; stRPMergeColumn *mColumn = NULL; while(1) { assert(column->head != NULL); // Get cells that have a valid previous cell stList *cells = getLinkedCells(column, stRPMergeColumn_getPreviousMergeCell, mColumn); // Get rid of the excess cells while(stList_length(cells) > hmm->parameters->minPartitionsInAColumn && (stList_length(cells) > hmm->parameters->maxPartitionsInAColumn || stRPCell_posteriorProb(stList_peek(cells), column) < hmm->parameters->minPosteriorProbabilityForPartition)) { stRPCell_destruct(stList_pop(cells)); } // Relink the cells (from most probable to least probable) relinkCells(column, cells); // Move on to the next merge column mColumn = column->nColumn; if(mColumn == NULL) { assert(column == hmm->lastColumn); stList_destruct(cells); break; } // Get merge cells that are connected to a cell in the previous column stSet *chosenMergeCellsSet = getLinkedMergeCells(mColumn, stRPMergeColumn_getNextMergeCell, cells); // Shrink the the number of chosen cells to less than equal to the desired number stList *chosenMergeCellsList = stSet_getList(chosenMergeCellsSet); stList_sort2(chosenMergeCellsList, mergeCellCmpFn, mColumn); while(stList_length(chosenMergeCellsList) > hmm->parameters->minPartitionsInAColumn && (stList_length(chosenMergeCellsList) > hmm->parameters->maxPartitionsInAColumn || stRPMergeCell_posteriorProb(stList_peek(chosenMergeCellsList), mColumn) < hmm->parameters->minPosteriorProbabilityForPartition)) { stSet_remove(chosenMergeCellsSet, stList_pop(chosenMergeCellsList)); } assert(stList_length(chosenMergeCellsList) == stSet_size(chosenMergeCellsSet)); stList_destruct(chosenMergeCellsList); // Get rid of merge cells we don't need filterMergeCells(mColumn, chosenMergeCellsSet); // Cleanup stList_destruct(cells); stSet_destruct(chosenMergeCellsSet); column = mColumn->nColumn; } } void stRPHmm_pruneBackwards(stRPHmm *hmm) { /* * Remove cells from hmm whos posterior probability is below the given threshold */ // For each column stRPColumn *column = hmm->lastColumn; stRPMergeColumn *mColumn = NULL; while(1) { assert(column->head != NULL); // Get cells that have a valid previous cell stList *cells = getLinkedCells(column, stRPMergeColumn_getNextMergeCell, mColumn); // This must be true because the forward pass has already winnowed the number below the // threshold assert(stList_length(cells) <= hmm->parameters->maxPartitionsInAColumn); // Relink the cells (from most probable to least probable) relinkCells(column, cells); // Move on to the next merge column mColumn = column->pColumn; if(mColumn == NULL) { assert(column == hmm->firstColumn); stList_destruct(cells); break; } // Get merge cells that are connected to a cell in the previous column stSet *chosenMergeCellsSet = getLinkedMergeCells(mColumn, stRPMergeColumn_getPreviousMergeCell, cells); // By the same logic, this number if pruned on the forwards pass assert(stSet_size(chosenMergeCellsSet) <= hmm->parameters->maxPartitionsInAColumn); // Get rid of merge cells we don't need filterMergeCells(mColumn, chosenMergeCellsSet); // Cleanup stList_destruct(cells); stSet_destruct(chosenMergeCellsSet); column = mColumn->pColumn; } } void stRPHmm_prune(stRPHmm *hmm) { stRPHmm_pruneForwards(hmm); stRPHmm_pruneBackwards(hmm); } bool stRPHmm_overlapOnReference(stRPHmm *hmm1, stRPHmm *hmm2) { /* * Return non-zero iff hmm1 and hmm2 have the same reference sequence and overlapping * coordinates intervals on that reference sequence. */ // If either interval is zero length this is not a well defined comparison if(hmm1->refLength <= 0 || hmm2->refLength <= 0) { st_errAbort("Trying to compare HMMs with a zero length coordinate interval"); } // Check if on the same reference sequence if(!stString_eq(hmm1->referenceName, hmm2->referenceName)) { return 0; } // Check if intervals overlap // If hmm1 starts after hmm2's start coordinate then switch hmm1 for hmm2 if(hmm1->refStart > hmm2->refStart) { return stRPHmm_overlapOnReference(hmm2, hmm1); } // The coordinates of the first interval overlap the second return hmm1->refStart + hmm1->refLength > hmm2->refStart; } static stRPColumn *getColumn(stRPColumn *column, int64_t site) { /* * Returns column containing the given reference position, starting from the linked, preceding column "column". */ assert(column != NULL); while(1) { assert(site >= column->refStart); if(site < column->refStart + column->length) { return column; } if(column->nColumn == NULL) { break; } column = column->nColumn->nColumn; } st_errAbort("Site: %" PRIi64 " not contained in hmm\n", site); return column; } void stRPHmm_resetColumnNumberAndDepth(stRPHmm *hmm) { /* * Walk through the hmm calculate and set the maxDepth and column number. */ hmm->columnNumber = 0; hmm->maxDepth = 0; stRPColumn *column = hmm->firstColumn; while(1) { hmm->columnNumber++; if(hmm->maxDepth < column->depth) { hmm->maxDepth = column->depth; } if(column->nColumn == NULL) { break; } column = column->nColumn->nColumn; } } stRPHmm *stRPHmm_split(stRPHmm *hmm, int64_t splitPoint) { /* * Splits the hmm into two at the specified point, given by the reference coordinate splitPiunt. The return value * is the suffix of the split, whose reference start is splitPoint. * The prefix of the split is the input hmm, which has its suffix cleaved off. Its length is then splitPoint-hmm->refStart. */ if(splitPoint <= hmm->refStart) { st_errAbort("The split point is at or before the start of the reference interval\n"); } assert(splitPoint < hmm->refStart + hmm->refLength); if(splitPoint >= hmm->refStart + hmm->refLength) { st_errAbort("The split point %" PRIi64 " is after the last position of the reference interval\n", splitPoint); } stRPHmm *suffixHmm = st_calloc(1, sizeof(stRPHmm)); // Set the reference interval for the two hmms suffixHmm->referenceName = stString_copy(hmm->referenceName); suffixHmm->refStart = splitPoint; suffixHmm->refLength = hmm->refLength + hmm->refStart - splitPoint; hmm->refLength = splitPoint - hmm->refStart; assert(hmm->refLength > 0); assert(suffixHmm->refLength > 0); // Parameters suffixHmm->parameters = hmm->parameters; // Reference prior probabilities suffixHmm->referencePriorProbs = hmm->referencePriorProbs; // Divide the profile sequences between the two hmms (some may end in both if they span the interval) suffixHmm->profileSeqs = stList_construct(); stList *prefixProfileSeqs = stList_construct(); for(int64_t i=0; i<stList_length(hmm->profileSeqs); i++) { stProfileSeq *pSeq = stList_get(hmm->profileSeqs, i); if(pSeq->refStart < splitPoint) { stList_append(prefixProfileSeqs, pSeq); } if(pSeq->refStart + pSeq->length > splitPoint) { stList_append(suffixHmm->profileSeqs, pSeq); } } stList_destruct(hmm->profileSeqs); hmm->profileSeqs = prefixProfileSeqs; // Get the column containing the split point stRPColumn *splitColumn = getColumn(hmm->firstColumn, splitPoint); assert(splitColumn != NULL); assert(splitColumn->refStart <= splitPoint); assert(splitPoint < splitColumn->refStart + splitColumn->length); // If the split point is within the column, split the column if(splitPoint > splitColumn->refStart) { stRPColumn_split(splitColumn, splitPoint-splitColumn->refStart, hmm); splitColumn = splitColumn->nColumn->nColumn; assert(splitPoint == splitColumn->refStart); } // Set links between columns suffixHmm->firstColumn = splitColumn; suffixHmm->lastColumn = hmm->lastColumn; hmm->lastColumn = splitColumn->pColumn->pColumn; hmm->lastColumn->nColumn = NULL; stRPMergeColumn_destruct(splitColumn->pColumn); // Cleanup the merge column that is deleted by this pointer setting splitColumn->pColumn = NULL; // Set depth and column numbers stRPHmm_resetColumnNumberAndDepth(hmm); stRPHmm_resetColumnNumberAndDepth(suffixHmm); return suffixHmm; } static bool sitesLinkageIsWellSupported(stRPHmm *hmm, int64_t leftSite, int64_t rightSite) { /* * Returns true if the two sites, specified by reference coordinates leftSite and rightSite, are linked by * hmm->parameters->minReadCoverageToSupportPhasingBetweenHeterozygousSites, otherwise false. */ stRPColumn *leftColumn = getColumn(hmm->firstColumn, leftSite); stRPColumn *rightColumn = getColumn(leftColumn, rightSite); stSet *sequencesInCommon = stRPColumn_getSequencesInCommon(leftColumn, rightColumn); // Condition to determine if well supported by reads bool wellSupported = stSet_size(sequencesInCommon) >= hmm->parameters->minReadCoverageToSupportPhasingBetweenHeterozygousSites; // Cleanup stSet_destruct(sequencesInCommon); return wellSupported; } stList *stRPHMM_splitWherePhasingIsUncertain(stRPHmm *hmm) { /* * Takes the input hmm and splits into a sequence of contiguous fragments covering the same reference interval, * returned as an ordered list of hmm fragments. * Hmms are split where there is insufficient support between heterozygous * sites to support phasing between the two haplotypes. * See sitesLinkageIsWellSupported for details. */ // Run the forward-backward algorithm stRPHmm_forwardBackward(hmm); // Now compute a high probability path through the hmm stList *path = stRPHmm_forwardTraceBack(hmm); // Get two haplotypes for the path through the HMM stGenomeFragment *gF = stGenomeFragment_construct(hmm, path); // Find high confidence heterozygous sites stList *hetSites = stList_construct3(0, (void (*)(void *))stIntTuple_destruct); for(int64_t i=0; i<gF->length; i++) { // If heterozygous site if(gF->haplotypeString1[i] != gF->haplotypeString2[i]) { stList_append(hetSites, stIntTuple_construct1(gF->refStart + i)); } } // Split hmms stList *splitHmms = stList_construct3(0, (void (*)(void *))stRPHmm_destruct2); // For each pair of contiguous het sites if not supported by sufficient reads split the hmm for(int64_t i=0; i<stList_length(hetSites)-1; i++) { int64_t j = stIntTuple_get(stList_get(hetSites, i), 0); int64_t k = stIntTuple_get(stList_get(hetSites, i+1), 0); assert(k > j); // If not well supported by reads if(!sitesLinkageIsWellSupported(hmm, j, k)) { // Split hmm int64_t splitPoint = j+(k-j+1)/2; stRPHmm *rightHmm = stRPHmm_split(hmm, splitPoint); assert(rightHmm->refStart == splitPoint); assert(hmm->refStart + hmm->refLength == splitPoint); // Add prefix of hmm to list of split hmms stList_append(splitHmms, hmm); // Set hmm as right hmm hmm = rightHmm; } } // Add the remaining part of the hmm to split hmms stList_append(splitHmms, hmm); // Cleanup stList_destruct(hetSites); stList_destruct(path); stGenomeFragment_destruct(gF); return splitHmms; }
preemptive.h
#include <algorithm> #include <vector> #include <cstdint> #include "fast-slic-common.h" #include "simd-helper.hpp" #include "parallel.h" #include "timer.h" struct PreemptiveTile { int sy, sx, ey, ex; }; class PreemptiveGrid { private: bool enabled; int H; int W; int K; int S; int CW; int CH; float thres; int stride; std::vector<Cluster> old_clusters; std::vector<std::vector<uint16_t>> cluster_grid; std::vector<int> active_grid; bool b_all_active; std::vector<int> y_to_cell_y; std::vector<int> x_to_cell_x; int cooldown = 2; public: PreemptiveGrid(int H, int W, int K, int S) : H(H), W(W), K(K), S(S), stride(1), y_to_cell_y(H), x_to_cell_x(W) { CW = ceil_int(W, 2 * S); CH = ceil_int(H, 2 * S); old_clusters.resize(K); cluster_grid.resize(CH * CW); active_grid.resize(CH * CW); b_all_active = true; for (int ci = 0; ci < CH; ci++) { int i_start = ci * 2 * S; int i_end = my_min(i_start + 2 * S, H); for (int i = i_start; i < i_end; i++) { y_to_cell_y[i] = ci; } } for (int cj = 0; cj < CW; cj++) { int j_start = cj * 2 * S; int j_end = my_min(j_start + 2 * S, W); for (int j = j_start; j < j_end; j++) { x_to_cell_x[j] = cj; } } }; void initialize(Cluster* clusters, bool enabled, float thres, int stride) { this->enabled = enabled; this->thres = thres; this->stride = stride; b_all_active = true; for (int k = 0; k < K; k++) { clusters[k].is_updatable = cooldown; } } void finalize(Cluster* clusters) { b_all_active = true; for (int k = 0; k < K; k++) { clusters[k].is_active = 1; } } bool all_active() { return !enabled || b_all_active; } std::vector<PreemptiveTile> get_active_tiles() const { std::vector<PreemptiveTile> result; if (!b_all_active) { for (int ci = 0; ci < CH; ci++) { for (int cj = 0; cj < CW; cj++) { int cell_index = CW * ci + cj; if (!active_grid[cell_index]) continue; PreemptiveTile tile; tile.sy = ci * 2 * S; tile.sx = cj * 2 * S; tile.ey = my_min(tile.sy + 2 * S, H); tile.ex = my_min(tile.sx + 2 * S, W); result.push_back(tile); } } } else { PreemptiveTile tile; tile.sx = tile.sy = 0; tile.ey = H; tile.ex = W; result.push_back(tile); } return result; } void set_old_clusters(const Cluster* clusters) { if (!enabled) return; std::copy(clusters, clusters + K, old_clusters.begin()); } int& get_active_cell(int y, int x) { return active_grid[CW * y_to_cell_y[y] + x_to_cell_x[x]]; } void set_new_clusters(Cluster* clusters) { fstimer::Scope s("set_new_clusters"); if (!enabled) return; for (auto &list : cluster_grid) list.clear(); for (int k = 0; k < K; k++) { cluster_grid[CW * y_to_cell_y[(int)clusters[k].y] + x_to_cell_x[(int)clusters[k].x]].push_back(k); clusters[k].is_active = 0; } std::fill(active_grid.begin(), active_grid.end(), 0); float l1_thres = my_max(roundf(2 * S * thres), 1.0f); const int dir[3] = {-1, 0, 1}; int num_active = 0; #pragma omp parallel num_threads(fsparallel::nth()) { #pragma omp for for (int k = 0; k < K; k++) { if (!clusters[k].is_updatable) continue; float l1_diff = abs(old_clusters[k].x - clusters[k].x) + abs(old_clusters[k].y - clusters[k].y); if (l1_diff < l1_thres) { clusters[k].is_updatable--; } else { clusters[k].is_updatable = cooldown; } } #pragma omp for for (int k = 0; k < K; k++) { const Cluster* cluster = &clusters[k]; if (!cluster->is_updatable) continue; int y = cluster->y, x = cluster->x; int cy = y_to_cell_y[y], cx = x_to_cell_x[x]; bool any_active = false; for (int dy : dir) { int ny = cy + dy; if (!(ny >= 0 && ny < CH)) continue; for (int dx : dir) { int nx = cx + dx; if (!(nx >= 0 && nx < CW)) continue; for (uint16_t neighbor_no : cluster_grid[CW * ny + nx]) { Cluster* neighbor = &clusters[neighbor_no]; int neighbor_y = neighbor->y, neighbor_x = neighbor->x; if (fast_abs(neighbor_y - y) <= 2 * S && fast_abs(neighbor_x - x) <= 2 * S) { neighbor->is_active = 1; get_active_cell(neighbor_y, neighbor_x) = 1; } } } } } #pragma omp for for (int k = 0; k < K; k++) { #pragma omp atomic num_active += (int)clusters[k].is_active; } #pragma omp single b_all_active = num_active == K; } } };
algebra_lineare_par.c
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <omp.h> #define NUM_BLOCCHI 156 #define K 6000 #define NUM_PRIMI 128 #define N_BITS 64 #define TYPE unsigned long typedef TYPE word; struct row_stats { // bit piu a destra long unsigned b_dx; // num di bit a 1 long unsigned n_bit; }; struct row_stats wt[K]; // Ritorna l'i-mo bit della k-ma riga unsigned int get_k_i(unsigned long M[][NUM_BLOCCHI], unsigned long k, unsigned long i) { unsigned long I = i / N_BITS; unsigned long n_shift = N_BITS - ((i % N_BITS ) + 1); //printf("i=%lu, I=%lu, n_shift=%lu, get=%lu, ", i, I, n_shift, (M[k][I] >> n_shift) & 1); //print_bits((M[k][I] >> n_shift)); //printf(" \n"); return (M[k][I] >> n_shift) & 1; } // Sostituisco all riga k la riga k + j: // k = k + j // Utilizzo lo XOR bit a bit (che corrisponde) // alla somma in modulo 2. // Eseguo lo XOR tra ogni blocco dei vettori unsigned long add_j_to_k(unsigned long M [][NUM_BLOCCHI], unsigned long k, unsigned long j, unsigned long n_blocchi) { //#pragma omp parallel for schedule(dynamic, n_blocchi/2) for(unsigned long I = 0; I < n_blocchi; ++I) M[k][I] = M[k][I] ^ M[j][I]; } void get_wt_k(unsigned long M[][NUM_BLOCCHI], unsigned long n_blocchi, unsigned long k, struct row_stats * wt) { wt->b_dx = 128; wt->n_bit = 0; unsigned long i = 0; while(get_k_i(M, k, i) == 0 && i < (n_blocchi * N_BITS)) ++i; if(i >= (n_blocchi * N_BITS)) return; //printf("i=%lu get=%lu, ", i, get_k_i(M, k, i)); wt->b_dx = i; //#pragma omp parallel for schedule(dynamic, (n_blocchi * N_BITS)/2) for(i = i; i < (n_blocchi * N_BITS); ++i) if(get_k_i(M, k, i)) wt->n_bit++; //printf("b_dx=%lu n_bit=%lu\n", wt->b_dx, wt->n_bit); } /* void get_wt_k(unsigned long M[][NUM_BLOCCHI], unsigned long n_blocchi, unsigned long k, struct row_stats * wt) { unsigned long b; // appoggio per copie locali unsigned long last_bit_pos = 0; // posizione ultimo bit a 1 nel blocco unsigned long bit_1_count = 0; // conta dei bit a 1 unsigned long last_block_pos = 0; // blocco che contiene l'ultimo bit for(unsigned long I = n_blocchi; I > 0; --I) { b = M[k][I-1]; //printf("%lu, I=%lu\n", M[k][I-1], I-1); for(unsigned i = 0; i < N_BITS; ++i) { if(b == 0) break; bit_1_count += b & 1; // sommo se il bit i-mo è a 1 b = b >> 1; //printf("b=%lu, ", b); //printf("bit=%lu\n", bit_1_count); last_bit_pos = i; last_block_pos = I-1; //printf("last_bit=%lu, last_block=%lu\n", last_bit_pos, last_block_pos); } } printf("n_b=%lu b_dx=%lu I=%lu\n", n_blocchi*64-1, last_bit_pos, last_block_pos); (*wt).b_dx = (n_blocchi*64 - 1) - (last_bit_pos + last_block_pos*N_BITS); (*wt).n_bit = bit_1_count; } */ void bit_gaussian_elimination_mod_2(unsigned long M[][NUM_BLOCCHI], unsigned long n_row, unsigned long n_col, unsigned long n_blocks, struct row_stats wt[]) { for(unsigned long i = 0; i < n_col; ++i) { unsigned long j; for(j = 0; j < n_row && wt[j].b_dx != i; ++j) //printf("wt[%d].b_dx=%d ==? %d\n", j, wt[j].b_dx, i) ;// avanzo e basta //printf("j=%d\n", j); // #pragma omp parallel for schedule(auto) for(unsigned k = j + 1; k < n_row; ++k) { //printf("wt[%d].b_dx=%d ==? %d\n", j, wt[j].b_dx, i); //printf("get=%d\n", get_k_i(M, k, i)); //getchar(); if(get_k_i(M, k, i)) { // il bit v(k)(i) deve essere a 1 add_j_to_k(M, k, j, n_blocks); // v(k) = v(k) + v(j) //printf("add: %lu = %lu + %lu\n", k, k, j); // sommare le righe della matrice degli esponenti in Z // moltiplicare i Q(A) // aggiorno info su wt: bit piu' a destra e n bit a 1 get_wt_k(M, n_blocks, k, & wt[k]); } } } } void print_bits(unsigned long a) { unsigned int bits[N_BITS]; for(unsigned int i = 0; i < N_BITS; ++i) bits[i] = (a >> i) & 1U; for(int i = 63; i >= 0; --i) printf("%d", bits[i]); } void print_all(unsigned long M[][NUM_BLOCCHI], int righe){ for(int i = 0; i < righe; ++i) { for(int j = 0; j < NUM_BLOCCHI; ++j) { print_bits(M[i][j]); printf(" "); } printf("\n"); } } int main() { unsigned long M[K][NUM_BLOCCHI]; /* N_BITS N_BITS 1) 000 ... 001 000 ... 001 2) 000 ... 000 000 ... 010 */ int n_threads = omp_get_num_threads(); int chunck = K/n_threads; double t1 = omp_get_wtime(); #pragma omp parallel for schedule(dynamic, K/4) for(int i = 0; i < K; ++i) get_wt_k(M, NUM_BLOCCHI, i, & wt[i]); double t2 = omp_get_wtime(); double t_set_up = t2 - t1; //get_wt_k(M, 2, 1, & wt[1]); //for(int i = 0; i < 6; ++i) //printf("wt[].b_dx=%lu, wt[].n_bit=%lu\n", wt[i].b_dx, wt[i].n_bit); //print_all(M, K); //printf("\n\n"); double t3 = omp_get_wtime(); bit_gaussian_elimination_mod_2(M, K, NUM_BLOCCHI*N_BITS, NUM_BLOCCHI, wt); double t4 = omp_get_wtime(); double t_gauss = t4 - t3; printf("#time_gauss time_set_up time_totale\n"); printf("%.6f ", t_gauss); printf("%.6f ", t_set_up); printf("%.6f\n", t_gauss + t_set_up); //print_all(M, K); //for(int i=0; i<2; i++){ //print_bits(M[i][0]); //printf(" "); //print_bits(M[i][1]); //printf("\n"); //get_wt_k(i, & wt); //printf("wt[].b_dx=%lu, wt[].n_bit=%lu\n", wt.b_dx, wt.n_bit); //} //add_k_to_j(0, 1); //printf("\n"); //print_bits(M[0][0]); //printf(" "); //print_bits(M[0][1]); //printf("\n"); //for(int i = 0; i < 64; ++i) //get_k_i(0, i); //printf("%d", get_k_i(0, i)); //printf("\n"); }
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int32_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
libdwt.c
/** * @file * @author David Barina <[email protected]> * @brief Fast wavelet transform implemented via lifting scheme. */ #include "libdwt.h" #define MEASURE_PER_PIXEL //#define DEBUG_VERBOSE //#define DISABLE_MEMCPY //#define ENABLE_LAZY_MEMCPY //#define DISABLE_Y //#define DISABLE_X //#define MEASURE_FACTOR 1000 #define MEASURE_FACTOR 1 //#define FV_ON_MAGNITUDES #define STRING(x) #x #define GCC_VERSION (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) #if (GCC_VERSION >= 30200) && (GCC_VERSION < 30300) #warning "GCC 3.2" #endif #if (GCC_VERSION >= 30300) && (GCC_VERSION < 30400) #warning "GCC 3.3" #endif #if (GCC_VERSION >= 30400) && (GCC_VERSION < 40000) #warning "GCC 3.4" #endif #if (GCC_VERSION >= 40000) && (GCC_VERSION < 40100) #warning "GCC 4.0" #endif #if (GCC_VERSION >= 40100) && (GCC_VERSION < 40200) #warning "GCC 4.1" #endif #if (GCC_VERSION >= 40200) && (GCC_VERSION < 40300) #warning "GCC 4.2" #endif #if (GCC_VERSION >= 40300) && (GCC_VERSION < 40400) #warning "GCC 4.3" #endif #if (GCC_VERSION >= 40400) && (GCC_VERSION < 40500) #warning "GCC 4.4" #endif #if (GCC_VERSION >= 40500) && (GCC_VERSION < 40600) #warning "GCC 4.5" #endif #if (GCC_VERSION >= 40600) && (GCC_VERSION < 40700) #warning "GCC 4.6" #endif #if (GCC_VERSION >= 40700) && (GCC_VERSION < 40800) #warning "GCC 4.7" #endif #if (GCC_VERSION >= 40800) && (GCC_VERSION < 40900) #warning "GCC 4.8" #endif #if (GCC_VERSION >= 40900) #warning "GCC 4.9+" #endif #if (GCC_VERSION < 40300) #warning Missing GCC 4.3+ #warning Missing __builtin___clear_cache function #define __builtin___clear_cache(begin,end) #endif #ifdef NDEBUG /* Release build */ #undef DEBUG #define FUNC_BEGIN #define FUNC_END #define dbg(msg, ...) #else /* Debug build */ #ifndef DEBUG #define DEBUG #endif #ifdef DEBUG_VERBOSE #define FUNC_BEGIN dwt_util_log(LOG_DBG, "%s ENTRY\n", __FUNCTION__) #define FUNC_END dwt_util_log(LOG_DBG, "%s EXIT\n", __FUNCTION__) #else #define FUNC_BEGIN #define FUNC_END #endif #define dbg(msg, ...) dwt_util_log(LOG_DBG, ("%s: " msg), __FUNCTION__, ##__VA_ARGS__) #endif /** UTIA ASVP/EdkDSP specific code */ #ifdef __asvp__ #define WAL_NATIVE_DMA #include <wal.h> #include <wal_bce_dma.h> #include <bce_dma_config.h> #ifndef BCE_DMA_CFGTABLE_NUM_ITEMS #warning BCE_DMA_CFGTABLE_NUM_ITEMS was not defined, using default value of 2 #define BCE_DMA_CFGTABLE_NUM_ITEMS 2 #endif WAL_REGISTER_WORKER(worker0, BCE_DMA_GENERIC_4D, bce_dma_cfgtable, 0, 1, 0); WAL_REGISTER_WORKER(worker1, BCE_DMA_GENERIC_4D, bce_dma_cfgtable, 1, 1, 0); WAL_REGISTER_WORKER(worker2, BCE_DMA_GENERIC_4D, bce_dma_cfgtable, 2, 1, 0); WAL_REGISTER_WORKER(worker3, BCE_DMA_GENERIC_4D, bce_dma_cfgtable, 3, 1, 0); wal_worker_t *worker[BCE_DMA_CFGTABLE_NUM_ITEMS] = { &worker0_data_structure, #if BCE_DMA_CFGTABLE_NUM_ITEMS > 1 &worker1_data_structure, #endif #if BCE_DMA_CFGTABLE_NUM_ITEMS > 2 &worker2_data_structure, #endif #if BCE_DMA_CFGTABLE_NUM_ITEMS > 3 &worker3_data_structure, #endif }; #include "firmware/fw_fp01_lift4sa.h" #include "firmware/fw_fp01_lift4sb.h" #define BANK_SIZE 1024 #define WAL_BANK_POS(off) ( off ) #define WAL_DMA_MASK(ch) ( 1<<(ch) ) #ifdef NDEBUG /* Release build */ #define WAL_CHECK(expr) (expr) #else /* Debug build */ #define WAL_CHECK(expr) ( wal_abort(STRING(expr), expr) ) #endif #endif /** UNUSED macro */ #include "inline.h" #ifndef BANK_SIZE #define BANK_SIZE 4096 #endif /** disable timers when using Par4All tool */ #if !defined(P4A) #define USE_TIME_CLOCK #define USE_TIME_CLOCK_GETTIME #define USE_TIME_CLOCK_GETTIME_REALTIME #define USE_TIME_CLOCK_GETTIME_MONOTONIC #define USE_TIME_CLOCK_GETTIME_MONOTONIC_RAW #define USE_TIME_CLOCK_GETTIME_PROCESS_CPUTIME_ID #define USE_TIME_CLOCK_GETTIME_THREAD_CPUTIME_ID #define USE_TIME_TIMES #define USE_TIME_IOCTL_RTC #define USE_TIME_GETTIMEOFDAY #define USE_TIME_GETRUSAGE #define USE_TIME_GETRUSAGE_SELF #define USE_TIME_GETRUSAGE_CHILDREN #define USE_TIME_GETRUSAGE_THREAD #endif // FIXME: glibc only #include <features.h> /** include LINUX_VERSION_CODE and KERNEL_VERSION macros */ #if defined(__linux) && !defined(microblaze) #include <linux/version.h> #endif /** define HAVE_TIME_* macros when corresponding timers are available */ #if defined(_GNU_SOURCE) || defined(_ISOC99_SOURCE) || defined(_POSIX_C_SOURCE) #define HAVE_TIME_CLOCK #endif #if _POSIX_C_SOURCE >= 199309L || _XOPEN_SOURCE >= 500 #define HAVE_TIME_CLOCK_GETTIME #ifdef _POSIX_C_SOURCE #include <unistd.h> // _POSIX_TIMERS, _POSIX_MONOTONIC_CLOCK, _POSIX_CPUTIME, _POSIX_THREAD_CPUTIME #ifdef _POSIX_TIMERS #define HAVE_TIME_CLOCK_GETTIME_REALTIME #ifdef _POSIX_MONOTONIC_CLOCK #define HAVE_TIME_CLOCK_GETTIME_MONOTONIC #endif #if defined(__linux) && !defined(microblaze) #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28) // FIXME: glibc 2.12.1+ #if defined(__GLIBC__) #if __GLIBC_PREREQ(2,12) #pragma message "INFO: Have glibc 2.12+" #define HAVE_TIME_CLOCK_GETTIME_MONOTONIC_RAW #endif #endif #endif #endif #ifdef _POSIX_CPUTIME #define HAVE_TIME_CLOCK_GETTIME_PROCESS_CPUTIME_ID #endif #ifdef _POSIX_THREAD_CPUTIME #define HAVE_TIME_CLOCK_GETTIME_THREAD_CPUTIME_ID #endif #endif #else #define HAVE_TIME_CLOCK_GETTIME_REALTIME #define HAVE_TIME_CLOCK_GETTIME_MONOTONIC #define HAVE_TIME_CLOCK_GETTIME_PROCESS_CPUTIME_ID #define HAVE_TIME_CLOCK_GETTIME_THREAD_CPUTIME_ID #endif #endif #if defined(_GNU_SOURCE) || defined(_SVID_SOURCE) || defined(_BSD_SOURCE) || defined(_POSIX_C_SOURCE) #define HAVE_TIME_TIMES #endif #if defined(__linux) && !defined(microblaze) #define HAVE_TIME_IOCTL_RTC #endif #if defined(_GNU_SOURCE) || defined(_SVID_SOURCE) || defined(_BSD_SOURCE) || defined(_POSIX_C_SOURCE) #define HAVE_TIME_GETTIMEOFDAY #endif #if defined(_GNU_SOURCE) || defined(_SVID_SOURCE) || defined(_BSD_SOURCE) || defined(_POSIX_C_SOURCE) #define HAVE_TIME_GETRUSAGE #define HAVE_TIME_GETRUSAGE_SELF #define HAVE_TIME_GETRUSAGE_CHILDREN #if defined(__linux) && !defined(microblaze) #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26) // FIXME: glibc 2.15+ needed #if defined(__GLIBC__) #if __GLIBC_PREREQ(2,15) #pragma message "INFO: Have glibc 2.15+" #define HAVE_TIME_GETRUSAGE_THREAD #endif #endif #endif #endif #endif /** define ENABLE_TIME_* macros when they are available and intended for use */ #if defined(USE_TIME_CLOCK) && defined(HAVE_TIME_CLOCK) #define ENABLE_TIME_CLOCK #endif #if defined(USE_TIME_CLOCK_GETTIME) && defined(HAVE_TIME_CLOCK_GETTIME) #define ENABLE_TIME_CLOCK_GETTIME #endif #if defined(USE_TIME_CLOCK_GETTIME_REALTIME) && defined(HAVE_TIME_CLOCK_GETTIME_REALTIME) #define ENABLE_TIME_CLOCK_GETTIME_REALTIME #endif #if defined(USE_TIME_CLOCK_GETTIME_MONOTONIC) && defined(HAVE_TIME_CLOCK_GETTIME_MONOTONIC) #define ENABLE_TIME_CLOCK_GETTIME_MONOTONIC #endif #if defined(USE_TIME_CLOCK_GETTIME_MONOTONIC_RAW) && defined(HAVE_TIME_CLOCK_GETTIME_MONOTONIC_RAW) #define ENABLE_TIME_CLOCK_GETTIME_MONOTONIC_RAW #endif #if defined(USE_TIME_CLOCK_GETTIME_PROCESS_CPUTIME_ID) && defined(HAVE_TIME_CLOCK_GETTIME_PROCESS_CPUTIME_ID) #define ENABLE_TIME_CLOCK_GETTIME_PROCESS_CPUTIME_ID #endif #if defined(USE_TIME_CLOCK_GETTIME_THREAD_CPUTIME_ID) && defined(HAVE_TIME_CLOCK_GETTIME_THREAD_CPUTIME_ID) #define ENABLE_TIME_CLOCK_GETTIME_THREAD_CPUTIME_ID #endif #if defined(USE_TIME_TIMES) && defined(HAVE_TIME_TIMES) #define ENABLE_TIME_TIMES #endif #if defined(USE_TIME_GETRUSAGE) && defined(HAVE_TIME_GETRUSAGE) #define ENABLE_TIME_GETRUSAGE #endif #if defined(USE_TIME_IOCTL_RTC) && defined(HAVE_TIME_IOCTL_RTC) #define ENABLE_TIME_IOCTL_RTC #endif #if defined(USE_TIME_GETTIMEOFDAY) && defined(HAVE_TIME_GETTIMEOFDAY) #define ENABLE_TIME_GETTIMEOFDAY #endif #if defined(USE_TIME_GETRUSAGE_SELF) && defined(HAVE_TIME_GETRUSAGE_SELF) #define ENABLE_TIME_GETRUSAGE_SELF #endif #if defined(USE_TIME_GETRUSAGE_CHILDREN) && defined(HAVE_TIME_GETRUSAGE_CHILDREN) #define ENABLE_TIME_GETRUSAGE_CHILDREN #endif #if defined(USE_TIME_GETRUSAGE_THREAD) && defined(HAVE_TIME_GETRUSAGE_THREAD) #define ENABLE_TIME_GETRUSAGE_THREAD #endif #pragma message "Enabled timers:" #ifdef ENABLE_TIME_CLOCK #pragma message "TIME_CLOCK: enabled" #else #pragma message "TIME_CLOCK: disabled" #endif #ifdef ENABLE_TIME_CLOCK_GETTIME #pragma message "TIME_CLOCK_GETTIME: enabled" #else #pragma message "TIME_CLOCK_GETTIME: disabled" #endif #ifdef ENABLE_TIME_CLOCK_GETTIME_REALTIME #pragma message "TIME_CLOCK_GETTIME_REALTIME: enabled" #else #pragma message "TIME_CLOCK_GETTIME_REALTIME: disabled" #endif #ifdef ENABLE_TIME_CLOCK_GETTIME_MONOTONIC #pragma message "TIME_CLOCK_GETTIME_MONOTONIC: enabled" #else #pragma message "TIME_CLOCK_GETTIME_MONOTONIC: disabled" #endif #ifdef ENABLE_TIME_CLOCK_GETTIME_MONOTONIC_RAW #pragma message "TIME_CLOCK_GETTIME_MONOTONIC_RAW: enabled" #else #pragma message "TIME_CLOCK_GETTIME_MONOTONIC_RAW: disabled" #endif #ifdef ENABLE_TIME_CLOCK_GETTIME_PROCESS_CPUTIME_ID #pragma message "TIME_CLOCK_GETTIME_PROCESS_CPUTIME_ID: enabled" #else #pragma message "TIME_CLOCK_GETTIME_PROCESS_CPUTIME_ID: disabled" #endif #ifdef ENABLE_TIME_CLOCK_GETTIME_THREAD_CPUTIME_ID #pragma message "TIME_CLOCK_GETTIME_THREAD_CPUTIME_ID: enabled" #else #pragma message "TIME_CLOCK_GETTIME_THREAD_CPUTIME_ID: disabled" #endif #ifdef ENABLE_TIME_TIMES #pragma message "TIME_TIMES: enabled" #else #pragma message "TIME_TIMES: disabled" #endif #ifdef ENABLE_TIME_GETRUSAGE #pragma message "TIME_GETRUSAGE: enabled" #else #pragma message "TIME_GETRUSAGE: disabled" #endif #ifdef ENABLE_TIME_IOCTL_RTC #pragma message "TIME_IOCTL_RTC: enabled" #else #pragma message "TIME_IOCTL_RTC: disabled" #endif #ifdef ENABLE_TIME_GETTIMEOFDAY #pragma message "TIME_GETTIMEOFDAY: enabled" #else #pragma message "TIME_GETTIMEOFDAY: disabled" #endif #ifdef ENABLE_TIME_GETRUSAGE_SELF #pragma message "TIME_GETRUSAGE_SELF: enabled" #else #pragma message "TIME_GETRUSAGE_SELF: disabled" #endif #ifdef ENABLE_TIME_GETRUSAGE_CHILDREN #pragma message "TIME_GETRUSAGE_CHILDREN: enabled" #else #pragma message "TIME_GETRUSAGE_CHILDREN: disabled" #endif #ifdef ENABLE_TIME_GETRUSAGE_THREAD #pragma message "TIME_GETRUSAGE_THREAD: enabled" #else #pragma message "TIME_GETRUSAGE_THREAD: disabled" #endif /** include necessary headers for selected timers */ #if defined(ENABLE_TIME_CLOCK_GETTIME) \ || defined(ENABLE_TIME_CLOCK_GETTIME_REALTIME) \ || defined(ENABLE_TIME_CLOCK_GETTIME_MONOTONIC) \ || defined(ENABLE_TIME_CLOCK_GETTIME_MONOTONIC_RAW) \ || defined(ENABLE_TIME_CLOCK_GETTIME_PROCESS_CPUTIME_ID) \ || defined(ENABLE_TIME_CLOCK_GETTIME_THREAD_CPUTIME_ID) // NOTE: -lrt #include <time.h> // struct timespec, clock_gettime, CLOCK_REALTIME, CLOCK_MONOTONIC, CLOCK_PROCESS_CPUTIME_ID, CLOCK_THREAD_CPUTIME_ID #endif #ifdef ENABLE_TIME_CLOCK #include <time.h> // clock, CLOCKS_PER_SEC #endif #ifdef ENABLE_TIME_TIMES #include <sys/times.h> // struct tms, times #include <unistd.h> // sysconf, _SC_CLK_TCK #endif #ifdef ENABLE_TIME_IOCTL_RTC #include <sys/ioctl.h> // ioctl #include <linux/rtc.h> // struct rtc_time, RTC_RD_TIME #include <fcntl.h> // open, O_NONBLOCK, O_RDONLY #include <unistd.h> // close #include <time.h> // struct tm, mktime #endif #ifdef ENABLE_TIME_GETTIMEOFDAY #include <sys/time.h> // struct timeval, gettimeofday #endif #if defined(ENABLE_TIME_GETRUSAGE) \ || defined(ENABLE_TIME_GETRUSAGE_SELF) \ || defined(ENABLE_TIME_GETRUSAGE_CHILDREN) \ || defined(ENABLE_TIME_GETRUSAGE_THREAD) #include <time.h> // struct timespec #include <unistd.h> #include <sys/resource.h> // getrusage, RUSAGE_SELF, struct rusage #include <sys/time.h> // struct timeval, TIMEVAL_TO_TIMESPEC #endif /** other headers */ #include <assert.h> // assert #include <stddef.h> // NULL, size_t #include <stdlib.h> // abort, malloc, free, qsort #include <limits.h> // CHAR_BIT // NOTE: -lm #include <math.h> // fabs, fabsf, isnan, isinf, powf #include <stdio.h> // FILE, fopen, fprintf, fclose #include <string.h> // memcpy #include <stdarg.h> // va_start, va_end #include <malloc.h> // memalign #include <unistd.h> // sysconf, _SC_HOST_NAME_MAX, _SC_PAGESIZE, _SC_LEVEL1_DCACHE_SIZE, _SC_LEVEL1_DCACHE_ASSOC, _SC_LEVEL1_DCACHE_LINESIZE #include <float.h> // FLT_EPSILON, DBL_EPSILON #include <stddef.h> // ptrdiff_t #include <ctype.h> // isspace /** SSE intrinsics */ #ifdef __SSE__ #pragma message "INFO: Using SSE" #include <xmmintrin.h> #endif /** OpenMP header when used */ #ifdef _OPENMP #pragma message "INFO: Using OpenMP" #include <omp.h> #endif #ifdef microblaze inline float powf( float x, float y ) { return __builtin_powf(x, y); } #endif #define ASM_MARKER __asm volatile ("# MARKER: " QUOTE(__LINE__)) #ifdef __asvp__ /** total number of workers available */ const int dwt_util_global_total_workers = BCE_DMA_CFGTABLE_NUM_ITEMS; static int get_total_workers() { return dwt_util_global_total_workers; } #endif /** how many workers use for computation (can be less than total number of workers) */ #ifdef __asvp__ int dwt_util_global_active_workers = BCE_DMA_CFGTABLE_NUM_ITEMS; #else int dwt_util_global_active_workers = 1; #endif static int get_active_workers() { return dwt_util_global_active_workers; } static void set_active_workers( int active_workers ) { dwt_util_global_active_workers = active_workers; } static size_t alignment( size_t type_size ) { assert( type_size ); #ifdef microblaze // DMA memory transfers seems to need alignment of 2*sizeof(float) = 8 // http://www.xilinx.com/support/documentation/sw_manuals/mb_ref_guide.pdf => Memory Architecture UNUSED(type_size); return 8; #endif #ifdef __x86_64__ // due to SSE memory access sizeof(__m128) = 16 // FIXME: this should return proper value according to accel_type (not for each implementation the SSE alignment is needed) UNUSED(type_size); return 16; #endif #ifdef __arm__ // http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.ddi0301h/Cdfifaec.html // FIXME: according to manual, should be "type_size" only. however, this fails on RasPi atleast (for floats, doubles and int are OK) return 2*type_size; #endif // fallback: unaligned data return 1; } size_t dwt_util_alignment( size_t type_size ) { return alignment(type_size); } static int is_aligned_s( const void *ptr ) { const size_t alignment = dwt_util_alignment(sizeof(float)); return ( (intptr_t)ptr & (intptr_t)(alignment-1) ) ? 0 : 1; } static void *align( void *ptr, size_t alignment ) { return (void *)( ((intptr_t)ptr+(alignment-1)) & (~(alignment-1)) ); } /** in bytes; offset in src[] and dst[] is given by worker_id * dwt_util_global_data_step */ ptrdiff_t dwt_util_global_data_step = 0; static ptrdiff_t get_data_step_s() { return dwt_util_global_data_step; } static void set_data_step_s( ptrdiff_t data_step ) { dwt_util_global_data_step = data_step; } /** in elements; offset in temp[] is given by worker_id * dwt_util_global_temp_step */ int dwt_util_global_temp_step = 0; static int get_temp_step() { return dwt_util_global_temp_step; } static void set_temp_step( int temp_step ) { dwt_util_global_temp_step = temp_step; } /** active firmware in all ASVP acceleration units */ enum dwt_op dwt_util_global_active_op = DWT_OP_NONE; /** this PACKAGE_STRING macro must be defined via compiler's command line */ #ifndef PACKAGE_STRING #error PACKAGE_STRING is not defined #endif /** quoting macros */ #define QUOTE(x) STRING(x) /** Calc offset in src[] or dst[] array for current worker. */ static float *calc_data_offset_s( float *addr, ///< pointer to array assigned to worker 0 int worker_id ///< identifier of current worker ) { return (float *)( (intptr_t)addr + (get_data_step_s() * worker_id) ); } /** Calc offset in src[] or dst[] array for current worker. */ static const float *calc_data_offset_const_s( const float *addr, ///< pointer to array assigned to worker 0 int worker_id ///< identifier of current worker ) { return (const float *)( (intptr_t)addr + (get_data_step_s() * worker_id) ); } #ifdef microblaze static inline void flush_cache( void *addr, ///< base address size_t size ///< length of memory in bytes ) { FUNC_BEGIN; // FIXME(ASVP): 4 or 8, should be detected const size_t dcache_line_len = 4; intptr_t tmp = size + (dcache_line_len * 4); do { __asm volatile ( "wdc %0, %1;" : : "r" ((intptr_t)addr+tmp), "r" (0) : "memory" ); tmp -= dcache_line_len * 4; } while( tmp >= 0 ); FUNC_END; } #endif #ifdef __x86_64__ static inline void flush_cache( void *addr, ///< base address size_t size ///< length of memory in bytes ) { FUNC_BEGIN; const intptr_t begin = (intptr_t)addr; const intptr_t end = (intptr_t)addr + size; for(intptr_t p = begin; p < end; p++) { __asm volatile ("clflush (%0)" : : "r"((void *)p) : "memory"); } __asm volatile ("mfence"); FUNC_END; } #endif #ifdef __arm__ static inline void flush_cache( void *addr, ///< base address size_t size ///< length of memory in bytes ) { __builtin___clear_cache(addr, (char *)addr+size); } #endif void dwt_util_flush_cache( void *addr, size_t size ) { flush_cache(addr, size); } static inline void flush_cache_s( float *addr, size_t size ) { flush_cache( (void *)addr, size * sizeof(float) ); } #ifdef __asvp__ void wal_abort( const char *str, int res ) { #ifdef DEBUG_VERBOSE dwt_util_log(LOG_DBG, "%s = ", str); #else UNUSED(str); #endif switch(res) { case WAL_RES_OK: #ifdef DEBUG_VERBOSE printf("WAL_RES_OK (all is OK)\n"); #endif return; break; case WAL_RES_WNULL: printf("WAL_RES_WNULL (argument is a NULL)\n"); return; break; case WAL_RES_ERR: printf("WAL_RES_ERR (generic error)\n"); break; case WAL_RES_ENOINIT: printf("WAL_RES_ENOINIT (not initiated)\n"); break; case WAL_RES_ENULL: printf("WAL_RES_ENULL (null pointer)\n"); break; case WAL_RES_ERUNNING: printf("WAL_RES_ERUNNING (worker is running)\n"); break; case WAL_RES_ERANGE: printf("WAL_RES_ERANGE (index/value is out of range)\n"); break; default: printf("(unknown error)\n"); } dwt_util_abort(); } #endif const char *dwt_util_version() { return QUOTE(PACKAGE_STRING); } const char *dwt_util_arch() { #ifdef microblaze // HACK: ugly buggy gcc workaround return "microblaze"; #endif return QUOTE(ARCH); } int dwt_util_global_accel_type = 0; static void set_accel_type( int accel_type ) { dwt_util_global_accel_type = accel_type; } static int get_accel_type() { return dwt_util_global_accel_type; } int dwt_util_get_accel() { return get_accel_type(); } #include "inline.h" int dwt_util_ceil_log2( int x ) { return ceil_log2(x); } int dwt_util_pow2_ceil_log2( int x ) { return pow2_ceil_log2(x); } #include "inline.h" int dwt_util_ceil_div( int x, int y ) { return ceil_div(x, y); } int dwt_util_floor_div( int x, int y ) { return floor_div(x, y); } #include "inline.h" int dwt_util_to_even( int x ) { return to_even(x); } int dwt_util_up_to_even( int x ) { return up_to_even(x); } int dwt_util_up_to_mul4( int x ) { return up_to_mul4(x); } int dwt_util_to_odd( int x ) { return to_odd(x); } static int is_aligned_4( const void *ptr ) { return ( (intptr_t)ptr&(intptr_t)(4-1) ) ? 0 : 1; } static int is_aligned_8( const void *ptr ) { return ( (intptr_t)ptr&(intptr_t)(8-1) ) ? 0 : 1; } static int is_aligned_16( const void *ptr ) { return ( (intptr_t)ptr&(intptr_t)(16-1) ) ? 0 : 1; } static intptr_t align_4( intptr_t p ) { return (p+(4-1))&(~(4-1)); } static intptr_t align_8( intptr_t p ) { return (p+(8-1))&(~(8-1)); } static intptr_t align_16( intptr_t p ) { return (p+(16-1))&(~(16-1)); } static intptr_t align_64( intptr_t p ) { return (p+(64-1))&(~(64-1)); } static intptr_t align_4096( intptr_t p ) { return (p+(4096-1))&(~(4096-1)); } intptr_t dwt_util_align_4( intptr_t p ) { return align_4(p); } intptr_t dwt_util_align_8( intptr_t p ) { return align_8(p); } intptr_t dwt_util_align_16( intptr_t p ) { return align_16(p); } static intptr_t align_int( intptr_t addr, size_t alignment ) { return (intptr_t)align((void *)addr, alignment); } static int temp_calc_internal( size_t alignment, ///< alignment (bytes) size_t elem_size, ///< element size (bytes) int offset, ///< offset (elements) int elements, ///< number of elements (elements) int worker ///< worker_id or total_workers (workers) ) { const int padding = 1; // elements const int offset1 = offset*elem_size; // bytes const int offset2 = (alignment-offset1) + align_int(padding*elem_size, alignment); // bytes const int size = elements*elem_size; // bytes const int block_size = align_int(offset2 + size + padding*elem_size, alignment); // bytes const int total_bytes = block_size * worker + offset2; // bytes const int total_elems = total_bytes/elem_size; // elements return total_elems; } /** * @note should return in bytes, only for compatibility in elements * @returns in elements */ static int calc_and_set_temp_size_s( int elements, ///< number of elements (floats) int offset ///< in elements, e.g. +1 float ) { const size_t elem_size = sizeof(float); // bytes const int workers = get_active_workers(); // workers const size_t alignment = dwt_util_alignment(sizeof(float)); // bytes set_temp_step(elements); // elements return temp_calc_internal(alignment, elem_size, offset, elements, workers); } #include "system.h" // is_aligned static void *ptralign_down( void *ptr, size_t alignment ) { return (void *)( (intptr_t)ptr & ~(alignment-1) ); } static float *calc_temp_offset2_s( float *addr, ///< pointer to temp[] or temp[]+offset int worker_id, ///< identifier of current worker int offset ///< offset ) { const size_t elem_size = sizeof(float); // bytes const size_t alignment = dwt_util_alignment(sizeof(float)); // bytes int return_offset = 0; // elements // correct the alignment if( !is_aligned(addr, alignment) ) { float *old = addr; addr = (float *)ptralign_down(addr, alignment); ptrdiff_t ptrdiff = (intptr_t)old - (intptr_t)addr; return_offset = ptrdiff / elem_size; } // if requested with offset then offset cannot be zero if( 0 == offset && 0 != return_offset ) { offset = return_offset; } const int elements = get_temp_step(); // elements return addr + temp_calc_internal(alignment, elem_size, offset, elements, worker_id) + return_offset; } #include "inline.h" int *dwt_util_addr_coeff_i( void *ptr, int y, int x, int stride_x, int stride_y ) { return addr2_i(ptr, y, x, stride_x, stride_y); } int *dwt_util_addr_coeff_i16( void *ptr, int y, int x, int stride_x, int stride_y ) { return addr2_i16(ptr, y, x, stride_x, stride_y); } const int *dwt_util_addr_coeff_const_i( const void *ptr, int y, int x, int stride_x, int stride_y ) { return addr2_const_i(ptr, y, x, stride_x, stride_y); } const int16_t *dwt_util_addr_coeff_const_i16( const void *ptr, int y, int x, int stride_x, int stride_y ) { return addr2_const_i16(ptr, y, x, stride_x, stride_y); } float *dwt_util_addr_coeff_s( void *ptr, int y, int x, int stride_x, int stride_y ) { return addr2_s(ptr, y, x, stride_x, stride_y); } const float *dwt_util_addr_coeff_const_s( const void *ptr, int y, int x, int stride_x, int stride_y ) { return addr2_const_s(ptr, y, x, stride_x, stride_y); } double *dwt_util_addr_coeff_d( void *ptr, int y, int x, int stride_x, int stride_y ) { return addr2_d(ptr, y, x, stride_x, stride_y); } void *dwt_util_addr_coeff( void *ptr, int y, int x, int stride_x, int stride_y ) { return addr2(ptr, y, x, stride_x, stride_y); } /** * @brief Pixel value of test image. */ static void dwt_util_test_image_value_i_d( double *dest, int x, int y, int rand, int type ) { switch(type) { case 0: x >>= rand; *dest = 2*x*y / (double)(x*x + y*y + 1); break; #ifdef __SSE__ case 1: x >>= rand; *dest = 2*x*y / (double)(x*x + y*y + 1) * fabs(sin(x/10.)) * fabs(cos(y*x/5.)); break; #endif default: { dwt_util_log(LOG_ERR, "Unknown test image type.\n"); dwt_util_abort(); } } } static void dwt_util_test_image_value_i_i( int *dest, int x, int y, int rand, int type ) { switch(type) { case 0: x >>= rand; *dest = 255 * (2*x*y) / (x*x + y*y + 1); break; case 2: *dest = x^y; *dest &= 0xff; break; default: { dwt_util_log(LOG_ERR, "Unknown test image type.\n"); dwt_util_abort(); } } } static void dwt_util_test_image_value_i_i16( int16_t *dest, int x, int y, int rand, int type ) { switch(type) { case 0: x >>= rand; *dest = (int16_t)( 255 * (2*x*y) / (x*x + y*y + 1) ); break; case 2: *dest = (int16_t)( x^y ); *dest &= (int16_t)( 0xff ); break; default: { dwt_util_log(LOG_ERR, "Unknown test image type.\n"); dwt_util_abort(); } } } /** * @brief Pixel value of test image. */ static void dwt_util_test_image_value_i_s( float *dest, int x, int y, int rand, int type ) { x++; y++; switch(type) { case 0: x >>= rand; *dest = 2*x*y / (float)(x*x + y*y + 1); break; #ifdef __SSE__ case 1: x >>= rand; *dest = 2*x*y / (float)(x*x + y*y + 1) * fabsf(sinf(x/10.f)) * fabsf(cosf(y*x/5.f)); break; #endif case 2: { int i = x^y; i &= 0xff; *dest = (float)i/32; break; } case 3: { int v = (((x&1)<<1)|(y&1))+1; *dest = v/4.f; break; } default: { dwt_util_log(LOG_ERR, "Unknown test image type.\n"); dwt_util_abort(); } } } // TODO: propagate type of test image void dwt_util_test_image_fill_d( void *ptr, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y, int rand ) { assert( NULL != ptr ); for(int y = 0; y < size_i_big_y; y++) for(int x = 0; x < size_i_big_x; x++) dwt_util_test_image_value_i_d( addr2_d(ptr, y, x, stride_x, stride_y), x, y, rand, 0 ); } // TODO: propagate type of test image void dwt_util_test_image_fill_i( void *ptr, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y, int rand ) { assert( NULL != ptr ); for(int y = 0; y < size_i_big_y; y++) for(int x = 0; x < size_i_big_x; x++) dwt_util_test_image_value_i_i( addr2_i(ptr, y, x, stride_x, stride_y), x, y, rand, 0 ); } void dwt_util_test_image_fill2_i( void *ptr, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y, int rand, int type ) { assert( NULL != ptr ); for(int y = 0; y < size_i_big_y; y++) for(int x = 0; x < size_i_big_x; x++) dwt_util_test_image_value_i_i( addr2_i(ptr, y, x, stride_x, stride_y), x, y, rand, type ); } void dwt_util_test_image_fill2_i16( void *ptr, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y, int rand, int type ) { assert( NULL != ptr ); for(int y = 0; y < size_i_big_y; y++) for(int x = 0; x < size_i_big_x; x++) dwt_util_test_image_value_i_i16( addr2_i16(ptr, y, x, stride_x, stride_y), x, y, rand, type ); } void dwt_util_test_image_fill_s( void *ptr, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y, int rand ) { FUNC_BEGIN; assert( NULL != ptr ); const int type = 0; for(int y = 0; y < size_i_big_y; y++) for(int x = 0; x < size_i_big_x; x++) dwt_util_test_image_value_i_s( addr2_s(ptr, y, x, stride_x, stride_y), x, y, rand, type ); FUNC_END; } void dwt_util_test_image_fill2_s( void *ptr, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y, int rand, int type ) { FUNC_BEGIN; assert( NULL != ptr ); for(int y = 0; y < size_i_big_y; y++) for(int x = 0; x < size_i_big_x; x++) dwt_util_test_image_value_i_s( addr2_s(ptr, y, x, stride_x, stride_y), x, y, rand, type ); FUNC_END; } void dwt_util_test_image_zero_s( void *ptr, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y ) { assert( NULL != ptr ); for(int y = 0; y < size_i_big_y; y++) for(int x = 0; x < size_i_big_x; x++) *addr2_s(ptr, y, x, stride_x, stride_y) = 0.0f; } static size_t image_size( int stride_x, int stride_y, int size_o_big_x, int size_o_big_y ) { UNUSED(stride_y); UNUSED(size_o_big_x); return stride_x * size_o_big_y; } size_t dwt_util_image_size( int stride_x, int stride_y, int size_o_big_x, int size_o_big_y ) { return image_size( stride_x, stride_y, size_o_big_x, size_o_big_y); } // TODO: this function should return a pointer void dwt_util_alloc_image( void **pptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y ) { FUNC_BEGIN; assert( NULL != pptr ); UNUSED(stride_y); UNUSED(size_o_big_x); *pptr = (void *)memalign(16, stride_x*size_o_big_y); if(NULL == *pptr) { dwt_util_log(LOG_ERR, "Unable to allocate memory.\n"); dwt_util_abort(); } FUNC_END; } void *dwt_util_alloc_image2( int stride_x, int stride_y, int size_x, int size_y ) { void *ptr; dwt_util_alloc_image( &ptr, stride_x, stride_y, size_x, size_y ); return ptr; } void dwt_util_free_image( void **pptr ) { assert( pptr != NULL ); free(*pptr); *pptr = NULL; } static int is_nan_or_inf_d(double x) { #ifdef microblaze return ( ((*(uint32_t *)(void *)&x)>>20) & 0x7ff ) == 0x7ff; #else return isnan(x) || isinf(x); #endif } int dwt_util_compare_d( void *ptr1, void *ptr2, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y ) { assert( ptr1 != NULL && ptr2 != NULL && size_i_big_x >= 0 && size_i_big_y >= 0 ); const double eps = 1e-6; for(int y = 0; y < size_i_big_y; y++) for(int x = 0; x < size_i_big_x; x++) { const double a = *addr2_d(ptr1, y, x, stride_x, stride_y); const double b = *addr2_d(ptr2, y, x, stride_x, stride_y); if( is_nan_or_inf_d(a) || is_nan_or_inf_d(b) ) return 1; if( fabs(a - b) > eps ) return 1; } return 0; } int dwt_util_compare_i( void *ptr1, void *ptr2, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y ) { assert( ptr1 != NULL && ptr2 != NULL && size_i_big_x >= 0 && size_i_big_y >= 0 ); for(int y = 0; y < size_i_big_y; y++) for(int x = 0; x < size_i_big_x; x++) { const int a = *addr2_i(ptr1, y, x, stride_x, stride_y); const int b = *addr2_i(ptr2, y, x, stride_x, stride_y); if( abs(a - b) > 0 ) { #ifdef DEBUG dwt_util_log(LOG_DBG, "%s: %i != %i at (x=%i, y=%i)\n", __FUNCTION__, a, b, x, y); #endif return 1; } } return 0; } int dwt_util_compare2_i( void *ptr1, void *ptr2, int stride1_x, int stride1_y, int stride2_x, int stride2_y, int size_x, int size_y ) { assert( ptr1 != NULL && ptr2 != NULL && size_x >= 0 && size_y >= 0 ); for(int y = 0; y < size_y; y++) { for(int x = 0; x < size_x; x++) { const int a = *addr2_i(ptr1, y, x, stride1_x, stride1_y); const int b = *addr2_i(ptr2, y, x, stride2_x, stride2_y); if( abs(a - b) > 0 ) { #ifdef DEBUG dwt_util_log(LOG_DBG, "%s: %i != %i at (x=%i, y=%i)\n", __FUNCTION__, a, b, x, y); #endif return 1; } } } return 0; } int dwt_util_compare_s( void *ptr1, void *ptr2, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y ) { assert( ptr1 != NULL && ptr2 != NULL && size_i_big_x >= 0 && size_i_big_y >= 0 ); const float eps = 1e-3; for(int y = 0; y < size_i_big_y; y++) for(int x = 0; x < size_i_big_x; x++) { const float a = *addr2_s(ptr1, y, x, stride_x, stride_y); const float b = *addr2_s(ptr2, y, x, stride_x, stride_y); if( isnan(a) || isinf(a) || isnan(b) || isinf(b) ) return 1; if( fabsf(a - b) > eps ) return 1; } return 0; } int dwt_util_compare2_s( void *ptr1, void *ptr2, int stride1_x, int stride1_y, int stride2_x, int stride2_y, int size_x, int size_y ) { assert( ptr1 != NULL && ptr2 != NULL && size_x >= 0 && size_y >= 0 ); int ret = 0; const float eps = 1.e-3f; for(int y = 0; y < size_y; y++) { for(int x = 0; x < size_x; x++) { const float a = *addr2_s(ptr1, y, x, stride1_x, stride1_y); const float b = *addr2_s(ptr2, y, x, stride2_x, stride2_y); if( isnan(a) || isinf(a) || isnan(b) || isinf(b) ) { #ifdef COMPARE_DESTROY *addr2_s(ptr1, y, x, stride1_x, stride1_y) = 1.f; #endif ret = 1; } if( fabsf(a - b) > eps ) { #ifdef COMPARE_DESTROY *addr2_s(ptr1, y, x, stride1_x, stride1_y) = 1.f; #endif ret = 1; } else { #ifdef COMPARE_DESTROY *addr2_s(ptr1, y, x, stride1_x, stride1_y) = 0.f; #endif } } } return ret; } int dwt_util_compare2_destructive_s( void *ptr1, const void *ptr2, int stride1_x, int stride1_y, int stride2_x, int stride2_y, int size_x, int size_y ) { assert( ptr1 != NULL && ptr2 != NULL && size_x >= 0 && size_y >= 0 ); int ret = 0; const float eps = 1.e-3f; for(int y = 0; y < size_y; y++) { for(int x = 0; x < size_x; x++) { const float a = *addr2_s (ptr1, y, x, stride1_x, stride1_y); const float b = *addr2_const_s(ptr2, y, x, stride2_x, stride2_y); float *dest = addr2_s(ptr1, y, x, stride1_x, stride1_y); *dest = 0.f; if( isnan(a) || isinf(a) || isnan(b) || isinf(b) ) { *dest = 1.f; ret = 1; } if( fabsf(a - b) > eps ) { *dest = 1.f; ret = 1; } } } return ret; } int dwt_util_compare2_destructive2_s( void *ptr1, const void *ptr2, void *map, int stride1_x, int stride1_y, int stride2_x, int stride2_y, int map_stride_x, int map_stride_y, int size_x, int size_y ) { assert( ptr1 && ptr2 && map && size_x >= 0 && size_y >= 0 ); int ret = 0; const float eps = 1.e-3f; for(int y = 0; y < size_y; y++) { for(int x = 0; x < size_x; x++) { const float a = *addr2_s (ptr1, y, x, stride1_x, stride1_y); const float b = *addr2_const_s(ptr2, y, x, stride2_x, stride2_y); float *dest = addr2_s(map, y, x, map_stride_x, map_stride_y); *dest = 0.f; if( isnan(a) || isinf(a) || isnan(b) || isinf(b) ) { *dest = 1.f; ret = 1; } if( fabsf(a - b) > eps ) { *dest = 1.f; ret = 1; } } } return ret; } int dwt_util_compare2_destructive2_i16( void *ptr1, const void *ptr2, void *map, int stride1_x, int stride1_y, int stride2_x, int stride2_y, int map_stride_x, int map_stride_y, int size_x, int size_y ) { assert( ptr1 && ptr2 && map && size_x >= 0 && size_y >= 0 ); int ret = 0; for(int y = 0; y < size_y; y++) { for(int x = 0; x < size_x; x++) { const int16_t a = *addr2_i16 (ptr1, y, x, stride1_x, stride1_y); const int16_t b = *addr2_const_i16(ptr2, y, x, stride2_x, stride2_y); int16_t *dest = addr2_i16(map, y, x, map_stride_x, map_stride_y); *dest = 0; if( abs(a - b) > 0 ) { *dest = 255; ret = 1; } } } return ret; } void dwt_cdf97_f_d( const double *src, double *dst, double *tmp, int N ) { dwt_cdf97_f_ex_d( src, dst, dst + ceil_div2(N), tmp, N ); } void dwt_cdf53_f_d( const double *src, double *dst, double *tmp, int N ) { dwt_cdf53_f_ex_d( src, dst, dst + ceil_div2(N), tmp, N ); } void dwt_cdf97_f_s( const float *src, float *dst, float *tmp, int N ) { dwt_cdf97_f_ex_s( src, dst, dst + ceil_div2(N), tmp, N ); } void dwt_cdf53_f_s( const float *src, float *dst, float *tmp, int N ) { dwt_cdf53_f_ex_s( src, dst, dst + ceil_div2(N), tmp, N ); } void dwt_cdf97_i_d( const double *src, double *dst, double *tmp, int N ) { dwt_cdf97_i_ex_d( src, src + ceil_div2(N), dst, tmp, N ); } void dwt_cdf53_i_d( const double *src, double *dst, double *tmp, int N ) { dwt_cdf53_i_ex_d( src, src + ceil_div2(N), dst, tmp, N ); } void dwt_cdf97_i_s( const float *src, float *dst, float *tmp, int N ) { dwt_cdf97_i_ex_s( src, src + ceil_div2(N), dst, tmp, N ); } void dwt_cdf53_i_s( const float *src, float *dst, float *tmp, int N ) { dwt_cdf53_i_ex_s( src, src + ceil_div2(N), dst, tmp, N ); } void dwt_cdf97_f_ex_d( const double *src, double *dst_l, double *dst_h, double *tmp, int N ) { dwt_cdf97_f_ex_stride_d( src, dst_l, dst_h, tmp, N, sizeof(double) ); } void dwt_cdf53_f_ex_d( const double *src, double *dst_l, double *dst_h, double *tmp, int N ) { dwt_cdf53_f_ex_stride_d( src, dst_l, dst_h, tmp, N, sizeof(double) ); } void dwt_cdf97_f_ex_s( const float *src, float *dst_l, float *dst_h, float *tmp, int N ) { dwt_cdf97_f_ex_stride_s( src, dst_l, dst_h, tmp, N, sizeof(float) ); } void dwt_cdf53_f_ex_i( const int *src, int *dst_l, int *dst_h, int *tmp, int N ) { dwt_cdf53_f_ex_stride_i( src, dst_l, dst_h, tmp, N, sizeof(int) ); } void dwt_cdf53_f_ex_s( const float *src, float *dst_l, float *dst_h, float *tmp, int N ) { dwt_cdf53_f_ex_stride_s( src, dst_l, dst_h, tmp, N, sizeof(float) ); } void dwt_cdf97_f_ex_stride_d( const double *src, double *dst_l, double *dst_h, double *tmp, int N, int stride ) { assert( N >= 0 && NULL != src && NULL != dst_l && NULL != dst_h && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) { if(1 == N) dst_l[0] = src[0] * dwt_cdf97_s1_d; return; } // copy src into tmp dwt_util_memcpy_stride_d(tmp, sizeof(double), src, stride, N); // predict 1 + update 1 for(int i=1; i<N-2+(N&1); i+=2) tmp[i] -= dwt_cdf97_p1_d * (tmp[i-1] + tmp[i+1]); if(is_odd(N)) tmp[N-1] += 2 * dwt_cdf97_u1_d * tmp[N-2]; else tmp[N-1] -= 2 * dwt_cdf97_p1_d * tmp[N-2]; tmp[0] += 2 * dwt_cdf97_u1_d * tmp[1]; for(int i=2; i<N-(N&1); i+=2) tmp[i] += dwt_cdf97_u1_d * (tmp[i-1] + tmp[i+1]); // predict 2 + update 2 for(int i=1; i<N-2+(N&1); i+=2) tmp[i] -= dwt_cdf97_p2_d * (tmp[i-1] + tmp[i+1]); if(is_odd(N)) tmp[N-1] += 2 * dwt_cdf97_u2_d * tmp[N-2]; else tmp[N-1] -= 2 * dwt_cdf97_p2_d * tmp[N-2]; tmp[0] += 2 * dwt_cdf97_u2_d * tmp[1]; for(int i=2; i<N-(N&1); i+=2) tmp[i] += dwt_cdf97_u2_d * (tmp[i-1] + tmp[i+1]); // scale for(int i=0; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf97_s1_d; for(int i=1; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf97_s2_d; // copy tmp into dst dwt_util_memcpy_stride_d(dst_l, stride, tmp+0, 2*sizeof(double), ceil_div2(N)); dwt_util_memcpy_stride_d(dst_h, stride, tmp+1, 2*sizeof(double), floor_div2(N)); } void dwt_cdf53_f_ex_stride_d( const double *src, double *dst_l, double *dst_h, double *tmp, int N, int stride ) { assert( N >= 0 && NULL != src && NULL != dst_l && NULL != dst_h && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) { if(1 == N) dst_l[0] = src[0] * dwt_cdf53_s1_d; return; } // copy src into tmp dwt_util_memcpy_stride_d(tmp, sizeof(double), src, stride, N); // predict 1 + update 1 for(int i=1; i<N-2+(N&1); i+=2) tmp[i] -= dwt_cdf53_p1_d * (tmp[i-1] + tmp[i+1]); if(is_odd(N)) tmp[N-1] += 2 * dwt_cdf53_u1_d * tmp[N-2]; else tmp[N-1] -= 2 * dwt_cdf53_p1_d * tmp[N-2]; tmp[0] += 2 * dwt_cdf53_u1_d * tmp[1]; for(int i=2; i<N-(N&1); i+=2) tmp[i] += dwt_cdf53_u1_d * (tmp[i-1] + tmp[i+1]); // scale for(int i=0; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s1_d; for(int i=1; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s2_d; // copy tmp into dst dwt_util_memcpy_stride_d(dst_l, stride, tmp+0, 2*sizeof(double), ceil_div2(N)); dwt_util_memcpy_stride_d(dst_h, stride, tmp+1, 2*sizeof(double), floor_div2(N)); } #ifdef __x86_64__ static void accel_lift_op4s_main_nosse_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling) // this long string disables SSE support (only no-sse is not enough) __attribute__ ((__target__ ("no-mmx,no-sse,no-sse2,no-sse3,no-sse4,no-sse4.1"))); #endif static void accel_lift_op4s_main_nosse_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling) { assert( steps >= 0 ); if( scaling < 0 ) { // inv for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *arr_local = calc_temp_offset2_s(arr, w, 0); // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // descale float *out = arr_local+4; for(int s = 0; s < steps; s++) { out[0] *= v[0]; out[1] *= v[1]; out += 2; } // operations for(int off = 4; off >= 1; off--) { float *out = arr_local+off; const float c = w[off-1]; for(int s = 0; s < steps; s++) { out[0] += c * (out[-1] + out[+1]); out += 2; } } } } else if( scaling > 0 ) { // fwd for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *arr_local = calc_temp_offset2_s(arr, w, 0); // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // operations for(int off = 4; off >= 1; off--) { float *out = arr_local+off; const float c = w[off-1]; for(int s = 0; s < steps; s++) { out[0] += c * (out[-1] + out[+1]); out += 2; } } // scale float *out = arr_local+0; for(int s = 0; s < steps; s++) { out[0] *= v[0]; out[1] *= v[1]; out += 2; } } } else { // uni dwt_util_abort(); } } #if __GNUC__ >= 4 && __GNUC_MINOR__ >= 7 #pragma message "INFO: Running on GCC 4.7+" #define ASSUME_ALIGNED(lvalueptr, align) __builtin_assume_aligned((lvalueptr), (align)) #else #pragma message "INFO: Missing GCC 4.7+" #define ASSUME_ALIGNED(lvalueptr, align) (lvalueptr) #endif #define ASSUME_ALIGNED_S(lvalueptr) ASSUME_ALIGNED((lvalueptr), alignment(sizeof(float))) #define ASSUME_ALIGNED_D(lvalueptr) ASSUME_ALIGNED((lvalueptr), alignment(sizeof(double))) #define ASSUME_ALIGNED_I(lvalueptr) ASSUME_ALIGNED((lvalueptr), alignment(sizeof(int))) /** * @brief Non-accelerated PicoBlaze operation. * * Two pairs (predict and update) of lifting steps and coefficients scaling * merged together. * * @param[in] scaling Perform scaling of coefficients. Possible values are: * @li s = 0 : without scaling, * @li s > 0 : scaling after lifting, * @li s < 0 : scaling before lifting. */ static void accel_lift_op4s_main_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling) { FUNC_BEGIN; assert( steps >= 0 ); if( scaling < 0 ) { // inv for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *arr_local = ASSUME_ALIGNED_S(calc_temp_offset2_s(arr, w, 0)); assert( is_aligned_s(arr_local) ); // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // descale float *out = arr_local+4; for(int s = 0; s < steps; s++) { out[0] *= v[0]; out[1] *= v[1]; out += 2; } // operations for(int off = 4; off >= 1; off--) { float *out = arr_local+off; const float c = w[off-1]; for(int s = 0; s < steps; s++) { out[0] += c * (out[-1] + out[+1]); out += 2; } } } } else if( scaling > 0 ) { // fwd for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *arr_local = ASSUME_ALIGNED_S(calc_temp_offset2_s(arr, w, 0)); assert( is_aligned_s(arr_local) ); // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // operations for(int off = 4; off >= 1; off--) { float *out = arr_local+off; const float c = w[off-1]; for(int s = 0; s < steps; s++) { out[0] += c * (out[-1] + out[+1]); out += 2; } } // scale float *out = arr_local+0; for(int s = 0; s < steps; s++) { out[0] *= v[0]; out[1] *= v[1]; out += 2; } } } else { // uni dwt_util_abort(); } FUNC_END; } static void accel_lift_op4s_main_stride_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling, int stride ) { FUNC_BEGIN; assert( steps >= 0 ); if( scaling < 0 ) { // inv assert( 1 == dwt_util_get_num_workers() ); { // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // descale float *out = addr1_s(arr, 4, stride); for(int s = 0; s < steps; s++) { *addr1_s(out, 0, stride) *= v[0]; *addr1_s(out, 1, stride) *= v[1]; out = addr1_s(out, 2, stride); } // operations for(int off = 4; off >= 1; off--) { float *out = addr1_s(arr, off, stride); const float c = w[off-1]; for(int s = 0; s < steps; s++) { *addr1_s(out, 0, stride) += c * (*addr1_s(out, -1, stride) + *addr1_s(out, +1, stride)); out = addr1_s(out, 2, stride); } } } } else if( scaling > 0 ) { // fwd assert( 1 == dwt_util_get_num_workers() ); { // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // operations for(int off = 4; off >= 1; off--) { float *out = addr1_s(arr, off, stride); const float c = w[off-1]; for(int s = 0; s < steps; s++) { *addr1_s(out, 0, stride) += c * (*addr1_s(out, -1, stride) + *addr1_s(out, +1, stride)); out = addr1_s(out, 2, stride); } } // scale float *out = addr1_s(arr, 0, stride); for(int s = 0; s < steps; s++) { *addr1_s(out, 0, stride) *= v[0]; *addr1_s(out, 1, stride) *= v[1]; out = addr1_s(out, 2, stride); } } } else { // uni dwt_util_abort(); } FUNC_END; } /** * horizontal vectorisation (multi-loop approach), forward transform */ static void accel_lift_op4s_fwd_main_stride_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling, int stride ) { FUNC_BEGIN; assert( steps >= 0 ); assert( scaling > 0 ); // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // operations for(int off = 4; off >= 1; off--) { float *out = addr1_s(arr, off, stride); const float c = w[off-1]; for(int s = 0; s < steps; s++) { *addr1_s(out, 0, stride) += c * (*addr1_s(out, -1, stride) + *addr1_s(out, +1, stride)); out = addr1_s(out, 2, stride); } } // scale float *out = addr1_s(arr, 0, stride); for(int s = 0; s < steps; s++) { *addr1_s(out, 0, stride) *= v[0]; *addr1_s(out, 1, stride) *= v[1]; out = addr1_s(out, 2, stride); } FUNC_END; } /** * horizontal vectorisation (multi-loop approach), inverse transform */ static void accel_lift_op4s_inv_main_stride_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling, int stride ) { FUNC_BEGIN; assert( steps >= 0 ); assert( scaling < 0 ); // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // descale float *out = addr1_s(arr, 4, stride); for(int s = 0; s < steps; s++) { *addr1_s(out, 0, stride) *= v[0]; *addr1_s(out, 1, stride) *= v[1]; out = addr1_s(out, 2, stride); } // operations for(int off = 4; off >= 1; off--) { float *out = addr1_s(arr, off, stride); const float c = w[off-1]; for(int s = 0; s < steps; s++) { *addr1_s(out, 0, stride) += c * (*addr1_s(out, -1, stride) + *addr1_s(out, +1, stride)); out = addr1_s(out, 2, stride); } } FUNC_END; } static void accel_lift_op2s_inv_main_stride_s( float *arr, int steps, float alpha, float beta, float zeta, int scaling, int stride ) { FUNC_BEGIN; assert( steps >= 0 ); assert( scaling < 0 ); // constants const float w[2] = { beta, alpha }; const float v[2] = { 1/zeta, zeta }; // descale float *out = addr1_s(arr, 2, stride); for(int s = 0; s < steps; s++) { *addr1_s(out, 0, stride) *= v[0]; *addr1_s(out, 1, stride) *= v[1]; out = addr1_s(out, 2, stride); } // operations for(int off = 2; off >= 1; off--) { float *out = addr1_s(arr, off, stride); const float c = w[off-1]; for(int s = 0; s < steps; s++) { *addr1_s(out, 0, stride) += c * (*addr1_s(out, -1, stride) + *addr1_s(out, +1, stride)); out = addr1_s(out, 2, stride); } } FUNC_END; } #ifdef __SSE__ /** * multi-loop algorithm with 4 workers */ static void accel_lift_op4s_main_ml4_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling) { assert( steps >= 0 ); if( scaling < 0 ) { // inv assert( 4 == dwt_util_get_num_workers() ); // constants __m128 w[4] = { { delta, delta, delta, delta }, { gamma, gamma, gamma, gamma }, { beta, beta, beta, beta }, { alpha, alpha, alpha, alpha } }; __m128 v[2] = { { 1/zeta, 1/zeta, 1/zeta, 1/zeta }, { zeta, zeta, zeta, zeta } }; float *arr_local[4]; // pointers for(int worker = 0; worker < 4; worker++) { arr_local[worker] = calc_temp_offset2_s(arr, worker, 0); } // buffer const int buff_size = 4 + 2*steps; __m128 buff[buff_size]; // FIXME(x86): huge array on the stack __m128 *out; // load buffer assert( is_aligned_16(arr_local[0]) ); const int t4 = buff_size >> 2; const int t3 = buff_size & ~3; for(int t = 0; t < t4; t++) { // FIXME(x86): access to image data directly instead of storing it into temp[] __m128 s0 = _mm_load_ps(&arr_local[0][4*t]); __m128 s1 = _mm_load_ps(&arr_local[1][4*t]); __m128 s2 = _mm_load_ps(&arr_local[2][4*t]); __m128 s3 = _mm_load_ps(&arr_local[3][4*t]); _MM_TRANSPOSE4_PS(s0, s1, s2, s3); buff[4*t+0] = s0; buff[4*t+1] = s1; buff[4*t+2] = s2; buff[4*t+3] = s3; } for(int t = t3; t < buff_size; t++) { // FIXME(x86): access to image data directly instead of storing it into temp[] buff[t][0] = arr_local[0][t]; buff[t][1] = arr_local[1][t]; buff[t][2] = arr_local[2][t]; buff[t][3] = arr_local[3][t]; } out = buff + 4; // descale for(int s = 0; s < steps; s++) { out[0] *= v[0]; out[1] *= v[1]; out += 2; } for(int off = 4; off >= 1; off--) { const __m128 coeff = w[off-1]; out = buff + off; // operation for(int s = 0; s < steps; s++) { out[0] += coeff * (out[-1] + out[+1]); out += 2; } } // store buffer for(int t = 0; t < t4; t++) { __m128 s0 = buff[4*t+0]; __m128 s1 = buff[4*t+1]; __m128 s2 = buff[4*t+2]; __m128 s3 = buff[4*t+3]; _MM_TRANSPOSE4_PS(s0, s1, s2, s3); _mm_store_ps(&arr_local[0][4*t], s0); _mm_store_ps(&arr_local[1][4*t], s1); _mm_store_ps(&arr_local[2][4*t], s2); _mm_store_ps(&arr_local[3][4*t], s3); } for(int t = t3; t < buff_size; t++) { arr_local[0][t] = buff[t][0]; arr_local[1][t] = buff[t][1]; arr_local[2][t] = buff[t][2]; arr_local[3][t] = buff[t][3]; } } else if( scaling > 0 ) { // fwd assert( 4 == dwt_util_get_num_workers() ); // constants __m128 w[4] = { { delta, delta, delta, delta }, { gamma, gamma, gamma, gamma }, { beta, beta, beta, beta }, { alpha, alpha, alpha, alpha } }; __m128 v[2] = { { 1/zeta, 1/zeta, 1/zeta, 1/zeta }, { zeta, zeta, zeta, zeta } }; float *arr_local[4]; // pointers for(int worker = 0; worker < 4; worker++) { arr_local[worker] = calc_temp_offset2_s(arr, worker, 0); } // buffer const int buff_size = 4 + 2*steps; __m128 buff[buff_size]; // FIXME(x86): huge array on the stack __m128 *out; // load buffer assert( is_aligned_16(arr_local[0]) ); const int t4 = buff_size >> 2; const int t3 = buff_size & ~3; for(int t = 0; t < t4; t++) { // FIXME(x86): access to image data directly instead of storing it into temp[] __m128 s0 = _mm_load_ps(&arr_local[0][4*t]); __m128 s1 = _mm_load_ps(&arr_local[1][4*t]); __m128 s2 = _mm_load_ps(&arr_local[2][4*t]); __m128 s3 = _mm_load_ps(&arr_local[3][4*t]); _MM_TRANSPOSE4_PS(s0, s1, s2, s3); buff[4*t+0] = s0; buff[4*t+1] = s1; buff[4*t+2] = s2; buff[4*t+3] = s3; } for(int t = t3; t < buff_size; t++) { // FIXME(x86): access to image data directly instead of storing it into temp[] buff[t][0] = arr_local[0][t]; buff[t][1] = arr_local[1][t]; buff[t][2] = arr_local[2][t]; buff[t][3] = arr_local[3][t]; } for(int off = 4; off >= 1; off--) { const __m128 coeff = w[off-1]; out = buff + off; // operation for(int s = 0; s < steps; s++) { out[0] += coeff * (out[-1] + out[+1]); out += 2; } } out = buff + 0; // descale for(int s = 0; s < steps; s++) { out[0] *= v[0]; out[1] *= v[1]; out += 2; } // store buffer for(int t = 0; t < t4; t++) { __m128 s0 = buff[4*t+0]; __m128 s1 = buff[4*t+1]; __m128 s2 = buff[4*t+2]; __m128 s3 = buff[4*t+3]; _MM_TRANSPOSE4_PS(s0, s1, s2, s3); _mm_store_ps(&arr_local[0][4*t], s0); _mm_store_ps(&arr_local[1][4*t], s1); _mm_store_ps(&arr_local[2][4*t], s2); _mm_store_ps(&arr_local[3][4*t], s3); } for(int t = t3; t < buff_size; t++) { arr_local[0][t] = buff[t][0]; arr_local[1][t] = buff[t][1]; arr_local[2][t] = buff[t][2]; arr_local[3][t] = buff[t][3]; } } else { // uni dwt_util_abort(); } } #endif static void op4s_sdl2_import_preload_s_ref(float *out, const float *restrict addr) { out[0] = addr[0]; out[1] = addr[1]; out[2] = addr[2]; out[3] = addr[3]; } #ifdef __SSE__ #define op4s_sdl2_import_preload_s_sse(out, addr) \ do { \ (out) = _mm_load_ps(addr); \ } while(0) #endif static void op4s_sdl2_import_s_ref(float *l, int idx, const float *out) { l[idx] = out[idx]; } #ifdef __SSE__ #define op4s_sdl2_import_s_sse(l, idx, out) \ do { \ (out) = _mm_shuffle_ps((out), (out), _MM_SHUFFLE(2,1,0,3)); \ (l) = _mm_move_ss((l), (out)); \ (l) = _mm_shuffle_ps((l), (l), _MM_SHUFFLE((3==idx)?0:3,(2==idx)?0:2,(1==idx)?0:1,(0==idx)?0:0)); \ } while(0) #endif static void op4s_sdl6_import_s_ref(float *l, int idx, const float *out) { l[idx] = out[idx]; } #ifdef __SSE__ #define op4s_sdl6_import_s_sse(l, idx, out) \ do { \ (out) = _mm_shuffle_ps((out), (out), _MM_SHUFFLE(2,1,0,3)); \ (l) = _mm_move_ss((l), (out)); \ (l) = _mm_shuffle_ps((l), (l), _MM_SHUFFLE((3==idx)?0:3,(2==idx)?0:2,(1==idx)?0:1,(0==idx)?0:0)); \ } while(0) #endif static void op4s_sdl2_load_s_ref(float *in, const float *addr) { in[0] = addr[0]; in[1] = addr[1]; in[2] = addr[2]; in[3] = addr[3]; } #ifdef __SSE__ #define op4s_sdl2_load_s_sse(in, addr) \ do { \ (in) = _mm_load_ps((const float *)(addr)); \ } while(0) #endif static void op4s_sdl2_shuffle_s_ref(float *c, float *r) { c[0]=c[1]; c[1]=c[2]; c[2]=c[3]; r[0]=r[1]; r[1]=r[2]; r[2]=r[3]; } #ifdef __SSE__ #define op4s_sdl2_shuffle_s_sse(c, r) \ do { \ (c) = _mm_shuffle_ps((c), (c), _MM_SHUFFLE(0,3,2,1)); \ (r) = _mm_shuffle_ps((r), (r), _MM_SHUFFLE(0,3,2,1)); \ } while(0) #endif static void op4s_sdl2_input_low_s_ref(const float *in, float *c, float *r) { c[3] = in[0]; r[3] = in[1]; } #ifdef __SSE__ #define op4s_sdl2_input_low_s_sse(in, c, r) \ do { \ __m128 t; \ (t) = (c); \ (t) = _mm_shuffle_ps((t), (in), _MM_SHUFFLE(1,0,3,2)); \ (c) = _mm_shuffle_ps((c), (t), _MM_SHUFFLE(2,0,1,0)); \ (t) = _mm_shuffle_ps((t), (r), _MM_SHUFFLE(3,2,3,2)); \ (r) = _mm_shuffle_ps((r), (t), _MM_SHUFFLE(1,2,1,0)); \ } while(0) #endif static void op4s_sdl2_input_high_s_ref(const float *in, float *c, float *r) { c[3] = in[2]; r[3] = in[3]; } static void op4s_sdl2_shuffle_input_low_s_ref(const float *in, float *c, float *r) { op4s_sdl2_shuffle_s_ref(c, r); op4s_sdl2_input_low_s_ref(in, c, r); } #ifdef __SSE__ #define op4s_sdl2_shuffle_input_low_s_sse(in, c, r) \ do { \ __m128 t; \ (t) = (in); \ (t) = _mm_shuffle_ps((t), (c), _MM_SHUFFLE(3,2,1,0)); \ (c) = _mm_shuffle_ps((c), (t), _MM_SHUFFLE(0,3,2,1)); \ (t) = _mm_shuffle_ps((t), (r), _MM_SHUFFLE(3,2,1,0)); \ (r) = _mm_shuffle_ps((r), (t), _MM_SHUFFLE(1,3,2,1)); \ } while(0) #endif static void op4s_sdl2_shuffle_input_high_s_ref(const float *in, float *c, float *r) { op4s_sdl2_shuffle_s_ref(c, r); op4s_sdl2_input_high_s_ref(in, c, r); } #ifdef __SSE__ #define op4s_sdl2_shuffle_input_high_s_sse(in, c, r) \ do { \ (in) = _mm_shuffle_ps( (in), (c), _MM_SHUFFLE(3,2,3,2) ); \ (c) = _mm_shuffle_ps( (c), (in), _MM_SHUFFLE(0,3,2,1) ); \ (in) = _mm_shuffle_ps( (in), (r), _MM_SHUFFLE(3,2,1,0) ); \ (r) = _mm_shuffle_ps( (r), (in), _MM_SHUFFLE(1,3,2,1) ); \ } while(0) #endif static void op4s_sdl2_op_s_ref(float *z, const float *c, const float *w, const float *l, const float *r) { z[3] = c[3] + w[3] * ( l[3] + r[3] ); z[2] = c[2] + w[2] * ( l[2] + r[2] ); z[1] = c[1] + w[1] * ( l[1] + r[1] ); z[0] = c[0] + w[0] * ( l[0] + r[0] ); } #ifdef __SSE__ #define op4s_sdl2_op_s_sse(z, c, w, l, r) \ do { \ (z) = (l); \ (z) = _mm_add_ps((z), (r)); \ (z) = _mm_mul_ps((z), (w)); \ (z) = _mm_add_ps((z), (c)); \ } while(0) #endif static void op4s_sdl6_op_s_ref(float *z, const float *w, const float *l, const float *r) { z[3] = z[3] + w[3] * ( l[3] + r[3] ); z[2] = z[2] + w[2] * ( l[2] + r[2] ); z[1] = z[1] + w[1] * ( l[1] + r[1] ); z[0] = z[0] + w[0] * ( l[0] + r[0] ); } #ifdef __SSE__ #define op4s_sdl6_op_s_sse(z, w, l, r) \ do { \ __m128 t; \ (t) = (l); \ (t) = _mm_add_ps((t), (r)); \ (t) = _mm_mul_ps((t), (w)); \ (z) = _mm_add_ps((z), (t)); \ } while(0) #endif static void op4s_sdl2_update_s_ref(float *c, float *l, float *r, const float *z) { c[0] = l[0]; c[1] = l[1]; c[2] = l[2]; c[3] = l[3]; l[0] = r[0]; l[1] = r[1]; l[2] = r[2]; l[3] = r[3]; r[0] = z[0]; r[1] = z[1]; r[2] = z[2]; r[3] = z[3]; } #ifdef __SSE__ #define op4s_sdl2_update_s_sse(c, l, r, z) \ do { \ (c) = (l); \ (l) = (r); \ (r) = (z); \ } while(0) #endif static void op4s_sdl6_update_s_ref(float *z, float *l, float *r) { float t[4]; t[0] = z[0]; t[1] = z[1]; t[2] = z[2]; t[3] = z[3]; z[0] = l[0]; z[1] = l[1]; z[2] = l[2]; z[3] = l[3]; l[0] = r[0]; l[1] = r[1]; l[2] = r[2]; l[3] = r[3]; r[0] = t[0]; r[1] = t[1]; r[2] = t[2]; r[3] = t[3]; } #ifdef __SSE__ #define op4s_sdl6_update_s_sse(z, l, r) \ do { \ __m128 t; \ (t) = (z); \ (z) = (l); \ (l) = (r); \ (r) = (t); \ } while(0) #endif static void op4s_sdl2_output_low_s_ref(float *out, const float *l, const float *z) { out[0] = l[0]; out[1] = z[0]; } #ifdef __SSE__ #define op4s_sdl2_output_low_s_sse(out, l, z) \ do { \ (out) = (l); \ (out) = _mm_unpacklo_ps((out), (z)); \ } while(0) #endif static void op4s_sdl2_output_high_s_ref(float *out, const float *l, const float *z) { out[2] = l[0]; out[3] = z[0]; } #ifdef __SSE__ #define op4s_sdl2_output_high_s_sse(out, l, z) \ do { \ __m128 t; \ (t) = (l); \ (t) = _mm_unpacklo_ps((t), (z)); \ (out) = _mm_shuffle_ps((out), t, _MM_SHUFFLE(1,0,1,0)); \ } while(0) #endif static void op4s_sdl2_scale_s_ref(float *out, const float *v) { out[0] *= v[0]; out[1] *= v[1]; out[2] *= v[2]; out[3] *= v[3]; } #ifdef __SSE__ #define op4s_sdl2_scale_s_sse(out, v) \ do { \ (out) = _mm_mul_ps((out), (v)); \ } while(0) #endif static void op4s_sdl2_descale_s_ref(float *in, const float *v) { in[0] *= v[0]; in[1] *= v[1]; in[2] *= v[2]; in[3] *= v[3]; } #ifdef __SSE__ #define op4s_sdl2_descale_s_sse(in, v) \ do { \ (in) = _mm_mul_ps((in), (v)); \ } while(0) #endif static void op4s_sdl2_save_s_ref(float *out, float *addr) { addr[0] = out[0]; addr[1] = out[1]; } #ifdef __SSE__ #define op4s_sdl2_save_s_sse(out, addr) \ do { \ _mm_storel_pi((__m64 *)(addr), (out)); \ } while(0) #endif static void op4s_sdl2_save_shift_s_ref(float *out, float *addr) { addr[0] = out[0]; addr[1] = out[1]; addr[2] = out[2]; addr[3] = out[3]; } #ifdef __SSE__ #define op4s_sdl2_save_shift_s_sse(out, addr) \ do { \ _mm_store_ps((float *)(addr), (out)); \ } while(0) #endif static void op4s_sdl2_export_s_ref(const float *l, float *addr, int idx) { addr[idx] = l[idx]; } #ifdef __SSE__ #define op4s_sdl2_export_s_sse(l, addr, idx) \ do { \ (addr)[(idx)] = (l)[(idx)]; \ } while(0) #endif static void op4s_sdl6_export_s_ref(const float *l, float *addr, int idx) { addr[idx] = l[idx]; } #ifdef __SSE__ #define op4s_sdl6_export_s_sse(l, addr, idx) \ do { \ (addr)[(idx)] = (l)[(idx)]; \ } while(0) #endif static void op4s_sdl_import_s_ref(float *l, const float *restrict addr, int idx) { l[idx] = addr[idx]; } static void op4s_sdl_import_stride_s_ref(float *l, const float *restrict addr, int idx, int stride) { l[idx] = *addr1_const_s(addr, idx, stride); } #ifdef __SSE__ #define op4s_sdl_import_stride_s_sse(l, addr, idx, stride) \ do { \ l[idx] = *addr1_const_s(addr, idx, stride); \ } while(0) #endif static void op4s_sdl_shuffle_s_ref(float *c, float *r) { c[0]=c[1]; c[1]=c[2]; c[2]=c[3]; r[0]=r[1]; r[1]=r[2]; r[2]=r[3]; } static void op4s_sdl_load_s_ref(float *in, const float *restrict addr) { in[0] = addr[0]; in[1] = addr[1]; } static void op4s_sdl_load_stride_s_ref(float *in, const float *restrict addr, int stride) { in[0] = *addr1_const_s(addr,0,stride); in[1] = *addr1_const_s(addr,1,stride); } #ifdef __SSE__ #define op4s_sdl_load_stride_s_sse(in, addr, stride) \ do { \ in[0] = *addr1_const_s(addr,0,stride); \ in[1] = *addr1_const_s(addr,1,stride); \ } while(0) #endif static void op4s_sdl_input_s_ref(const float *in, float *c, float *r) { c[3] = in[0]; r[3] = in[1]; } static void op4s_sdl_op_s_ref(float *z, const float *c, const float *w, const float *l, const float *r) { z[3] = c[3] + w[3] * ( l[3] + r[3] ); z[2] = c[2] + w[2] * ( l[2] + r[2] ); z[1] = c[1] + w[1] * ( l[1] + r[1] ); z[0] = c[0] + w[0] * ( l[0] + r[0] ); } static void op4s_sdl_update_s_ref(float *c, float *l, float *r, const float *z) { c[0] = l[0]; c[1] = l[1]; c[2] = l[2]; c[3] = l[3]; l[0] = r[0]; l[1] = r[1]; l[2] = r[2]; l[3] = r[3]; r[0] = z[0]; r[1] = z[1]; r[2] = z[2]; r[3] = z[3]; } static void op4s_sdl_output_s_ref(float *out, const float *l, const float *z) { out[0] = l[0]; out[1] = z[0]; } static void op4s_sdl_scale_s_ref(float *out, const float *v) { out[0] *= v[0]; out[1] *= v[1]; } static void op4s_sdl_descale_s_ref(float *in, const float *v) { in[0] *= v[0]; in[1] *= v[1]; } static void op4s_sdl_save_s_ref(float *out, float *restrict addr) { addr[0] = out[0]; addr[1] = out[1]; } static void op4s_sdl_save_stride_s_ref(float *out, float *restrict addr, int stride) { *addr1_s(addr,0,stride) = out[0]; *addr1_s(addr,1,stride) = out[1]; } #ifdef __SSE__ #define op4s_sdl_save_stride_s_sse(out, addr, stride) \ do { \ *addr1_s(addr,0,stride) = out[0]; \ *addr1_s(addr,1,stride) = out[1]; \ } while(0) #endif static void op4s_sdl_export_s_ref(const float *l, float *restrict addr, int idx) { addr[idx] = l[idx]; } static void op4s_sdl_export_stride_s_ref(const float *l, float *restrict addr, int idx, int stride) { *addr1_s(addr,idx,stride) = l[idx]; } #ifdef __SSE__ #define op4s_sdl_export_stride_s_sse(l,addr,idx,stride) \ do { \ *addr1_s(addr,idx,stride) = l[idx]; \ } while(0) #endif static void op4s_sdl2_preload_prolog_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(w); UNUSED(v); UNUSED(l); UNUSED(c); UNUSED(r); UNUSED(z); UNUSED(in); op4s_sdl2_import_preload_s_ref(out, (*addr)); (*addr) += 4; } #ifdef __SSE__ #define op4s_sdl2_preload_prolog_s_sse(w, v, l, c, r, z, in, out, addr) \ do { \ op4s_sdl2_import_preload_s_sse((out), (*(addr))); \ (*(addr)) += 4; \ } while(0) #endif static void op4s_sdl6_preload_prolog_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(w); UNUSED(v); UNUSED(l); UNUSED(r); UNUSED(z); UNUSED(in); op4s_sdl2_import_preload_s_ref(out, (*addr)); (*addr) += 4; } #ifdef __SSE__ #define op4s_sdl6_preload_prolog_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_import_preload_s_sse((out), (*(addr))); \ (*(addr)) += 4; \ } while(0) #endif static void op4s_sdl2_pass_fwd_prolog_full_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(out); // load op4s_sdl2_load_s_ref(in, (*addr)); // shuffle + input-low op4s_sdl2_shuffle_input_low_s_ref(in, c, r); // operation op4s_sdl2_op_s_ref(z, c, w, l, r); // update op4s_sdl2_update_s_ref(c, l, r, z); // pointers (*addr) += 4; } #ifdef __SSE__ #define op4s_sdl2_pass_fwd_prolog_full_s_sse(w, v, l, c, r, z, in, out, addr) \ do { \ op4s_sdl2_load_s_sse((in), (*(addr))); \ op4s_sdl2_shuffle_input_low_s_sse((in), (c), (r)); \ op4s_sdl2_op_s_sse((z), (c), (w), (l), (r)); \ op4s_sdl2_update_s_sse((c), (l), (r), (z)); \ (*(addr)) += 4; \ } while(0) #endif static void op4s_sdl2_pass_inv_prolog_full_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(out); // load op4s_sdl2_load_s_ref(in, (*addr)); // descale op4s_sdl2_descale_s_ref(in, v); // shuffle + input-low op4s_sdl2_shuffle_input_low_s_ref(in, c, r); // operation op4s_sdl2_op_s_ref(z, c, w, l, r); // update op4s_sdl2_update_s_ref(c, l, r, z); // pointers (*addr) += 4; } #ifdef __SSE__ #define op4s_sdl2_pass_inv_prolog_full_s_sse(w, v, l, c, r, z, in, out, addr) \ do { \ op4s_sdl2_load_s_sse((in), (*(addr))); \ op4s_sdl2_descale_s_sse((in), (v)); \ op4s_sdl2_shuffle_input_low_s_sse((in), (c), (r)); \ op4s_sdl2_op_s_sse((z), (c), (w), (l), (r)); \ op4s_sdl2_update_s_sse((c), (l), (r), (z)); \ (*(addr)) += 4; \ } while(0) #endif static void op4s_sdl6_pass_inv_prolog_full_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(out); // load op4s_sdl2_load_s_ref(in, (*addr)); // descale op4s_sdl2_descale_s_ref(in, v); // shuffle + input-low op4s_sdl2_shuffle_input_low_s_ref(in, z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // update op4s_sdl6_update_s_ref(z, l, r); // pointers (*addr) += 4; } #ifdef __SSE__ #define op4s_sdl6_pass_inv_prolog_full_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_load_s_sse((in), (*(addr))); \ op4s_sdl2_descale_s_sse((in), (v)); \ op4s_sdl2_shuffle_input_low_s_sse((in), (z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl6_update_s_sse((z), (l), (r)); \ (*(addr)) += 4; \ } while(0) #endif static void op4s_sdl6_pass_fwd_prolog_full_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(out); // load op4s_sdl2_load_s_ref(in, (*addr)); // (descale) // shuffle + input-low op4s_sdl2_shuffle_input_low_s_ref(in, z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // update op4s_sdl6_update_s_ref(z, l, r); // pointers (*addr) += 4; } #ifdef __SSE__ #define op4s_sdl6_pass_fwd_prolog_full_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_load_s_sse((in), (*(addr))); \ op4s_sdl2_shuffle_input_low_s_sse((in), (z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl6_update_s_sse((z), (l), (r)); \ (*(addr)) += 4; \ } while(0) #endif static void op4s_sdl2_pass_fwd_prolog_light_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(out); UNUSED(addr); // shuffle + input-high op4s_sdl2_shuffle_input_high_s_ref(in, c, r); // operation op4s_sdl2_op_s_ref(z, c, w, l, r); // update op4s_sdl2_update_s_ref(c, l, r, z); } #ifdef __SSE__ #define op4s_sdl2_pass_fwd_prolog_light_s_sse(w, v, l, c, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_input_high_s_sse((in), (c), (r)); \ op4s_sdl2_op_s_sse((z), (c), (w), (l), (r)); \ op4s_sdl2_update_s_sse((c), (l), (r), (z)); \ } while(0) #endif static void op4s_sdl2_pass_inv_prolog_light_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(out); UNUSED(addr); // shuffle + input-high op4s_sdl2_shuffle_input_high_s_ref(in, c, r); // operation op4s_sdl2_op_s_ref(z, c, w, l, r); // update op4s_sdl2_update_s_ref(c, l, r, z); } #ifdef __SSE__ #define op4s_sdl2_pass_inv_prolog_light_s_sse(w, v, l, c, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_input_high_s_sse((in), (c), (r)); \ op4s_sdl2_op_s_sse((z), (c), (w), (l), (r)); \ op4s_sdl2_update_s_sse((c), (l), (r), (z)); \ } while(0) #endif static void op4s_sdl6_pass_inv_prolog_light_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(out); UNUSED(addr); // shuffle + input-high op4s_sdl2_shuffle_input_high_s_ref(in, z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // update op4s_sdl6_update_s_ref(z, l, r); } #ifdef __SSE__ #define op4s_sdl6_pass_inv_prolog_light_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_input_high_s_sse((in), (z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl6_update_s_sse((z), (l), (r)); \ } while(0) #endif static void op4s_sdl6_pass_fwd_prolog_light_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(out); UNUSED(addr); // shuffle + input-high op4s_sdl2_shuffle_input_high_s_ref(in, z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // update op4s_sdl6_update_s_ref(z, l, r); } #ifdef __SSE__ #define op4s_sdl6_pass_fwd_prolog_light_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_input_high_s_sse((in), (z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl6_update_s_sse((z), (l), (r)); \ } while(0) #endif static void op4s_sdl2_pass_fwd_core_light_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(addr); // shuffle + input-high op4s_sdl2_shuffle_input_high_s_ref(in, c, r); // operation op4s_sdl2_op_s_ref(z, c, w, l, r); // output-low op4s_sdl2_output_low_s_ref(out, l, z); // update op4s_sdl2_update_s_ref(c, l, r, z); } #ifdef __SSE__ #define op4s_sdl2_pass_fwd_core_light_s_sse(w, v, l, c, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_input_high_s_sse((in), (c), (r)); \ op4s_sdl2_op_s_sse((z), (c), (w), (l), (r)); \ op4s_sdl2_output_low_s_sse((out), (l), (z)); \ op4s_sdl2_update_s_sse((c), (l), (r), (z)); \ } while(0) #endif static void op4s_sdl2_pass_inv_core_light_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(addr); // shuffle + input-high op4s_sdl2_shuffle_input_high_s_ref(in, c, r); // operation op4s_sdl2_op_s_ref(z, c, w, l, r); // output-low op4s_sdl2_output_low_s_ref(out, l, z); // update op4s_sdl2_update_s_ref(c, l, r, z); } #ifdef __SSE__ #define op4s_sdl2_pass_inv_core_light_s_sse(w, v, l, c, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_input_high_s_sse((in), (c), (r)); \ op4s_sdl2_op_s_sse((z), (c), (w), (l), (r)); \ op4s_sdl2_output_low_s_sse((out), (l), (z)); \ op4s_sdl2_update_s_sse((c), (l), (r), (z)); \ } while(0) #endif static void op4s_sdl6_pass_inv_core_light_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(addr); // shuffle + input-high op4s_sdl2_shuffle_input_high_s_ref(in, z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // output-low op4s_sdl2_output_low_s_ref(out, l, z); // (update) } #ifdef __SSE__ #define op4s_sdl6_pass_inv_core_light_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_input_high_s_sse((in), (z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl2_output_low_s_sse((out), (l), (z)); \ } while(0) #endif static void op4s_sdl6_pass_fwd_core_light_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(addr); // shuffle + input-high op4s_sdl2_shuffle_input_high_s_ref(in, z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // output-low op4s_sdl2_output_low_s_ref(out, l, z); // (update) } #ifdef __SSE__ #define op4s_sdl6_pass_fwd_core_light_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_input_high_s_sse((in), (z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl2_output_low_s_sse((out), (l), (z)); \ } while(0) #endif static void op4s_sdl6_pass_inv_postcore_light_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(addr); // shuffle + input-high op4s_sdl2_shuffle_input_high_s_ref(in, z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // output-low op4s_sdl2_output_low_s_ref(out, l, z); // update op4s_sdl6_update_s_ref(z, l, r); } #ifdef __SSE__ #define op4s_sdl6_pass_inv_postcore_light_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_input_high_s_sse((in), (z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl2_output_low_s_sse((out), (l), (z)); \ op4s_sdl6_update_s_sse((z), (l), (r)); \ } while(0) #endif static void op4s_sdl6_pass_fwd_postcore_light_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(addr); // shuffle + input-high op4s_sdl2_shuffle_input_high_s_ref(in, z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // output-low op4s_sdl2_output_low_s_ref(out, l, z); // update op4s_sdl6_update_s_ref(z, l, r); } #ifdef __SSE__ #define op4s_sdl6_pass_fwd_postcore_light_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_input_high_s_sse((in), (z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl2_output_low_s_sse((out), (l), (z)); \ op4s_sdl6_update_s_sse((z), (l), (r)); \ } while(0) #endif static void op4s_sdl2_pass_fwd_core_full_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { // load op4s_sdl2_load_s_ref(in, (*addr)); // shuffle + input-low op4s_sdl2_shuffle_input_low_s_ref(in, c, r); // operation op4s_sdl2_op_s_ref(z, c, w, l, r); // output-high op4s_sdl2_output_high_s_ref(out, l, z); // scale op4s_sdl2_scale_s_ref(out, v); // save-shift op4s_sdl2_save_shift_s_ref(out, (*addr)-12); // update op4s_sdl2_update_s_ref(c, l, r, z); // pointers (*addr) += 4; } #ifdef __SSE__ #define op4s_sdl2_pass_fwd_core_full_s_sse(w, v, l, c, r, z, in, out, addr) \ do { \ op4s_sdl2_load_s_sse((in), (*(addr))); \ op4s_sdl2_shuffle_input_low_s_sse((in), (c), (r)); \ op4s_sdl2_op_s_sse((z), (c), (w), (l), (r)); \ op4s_sdl2_output_high_s_sse((out), (l), (z)); \ op4s_sdl2_scale_s_sse((out), (v)); \ op4s_sdl2_save_shift_s_sse((out), (*(addr))-12); \ op4s_sdl2_update_s_sse((c), (l), (r), (z)); \ (*(addr)) += 4; \ } while(0) #endif static void op4s_sdl2_pass_inv_core_full_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { // load op4s_sdl2_load_s_ref(in, (*addr)); // descale op4s_sdl2_descale_s_ref(in, v); // shuffle + input-low op4s_sdl2_shuffle_input_low_s_ref(in, c, r); // operation op4s_sdl2_op_s_ref(z, c, w, l, r); // output-high op4s_sdl2_output_high_s_ref(out, l, z); // (scale) // save-shift op4s_sdl2_save_shift_s_ref(out, (*addr)-12); // update op4s_sdl2_update_s_ref(c, l, r, z); // pointers (*addr) += 4; } #ifdef __SSE__ #define op4s_sdl2_pass_inv_core_full_s_sse(w, v, l, c, r, z, in, out, addr) \ do { \ op4s_sdl2_load_s_sse((in), (*(addr))); \ op4s_sdl2_descale_s_sse((in), (v)); \ op4s_sdl2_shuffle_input_low_s_sse((in), (c), (r)); \ op4s_sdl2_op_s_sse((z), (c), (w), (l), (r)); \ op4s_sdl2_output_high_s_sse((out), (l), (z)); \ op4s_sdl2_save_shift_s_sse((out), (*(addr))-12); \ op4s_sdl2_update_s_sse((c), (l), (r), (z)); \ (*(addr)) += 4; \ } while(0) #endif static void op4s_sdl6_pass_inv_core_full_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { // load op4s_sdl2_load_s_ref(in, (*addr)); // descale op4s_sdl2_descale_s_ref(in, v); // shuffle + input-low op4s_sdl2_shuffle_input_low_s_ref(in, z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // output-high op4s_sdl2_output_high_s_ref(out, l, z); // (scale) // save-shift op4s_sdl2_save_shift_s_ref(out, (*addr)-12); // (update) // pointers (*addr) += 4; } #ifdef __SSE__ #define op4s_sdl6_pass_inv_core_full_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_load_s_sse((in), (*(addr))); \ op4s_sdl2_descale_s_sse((in), (v)); \ op4s_sdl2_shuffle_input_low_s_sse((in), (z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl2_output_high_s_sse((out), (l), (z)); \ op4s_sdl2_save_shift_s_sse((out), (*(addr))-12); \ (*(addr)) += 4; \ } while(0) #endif static void op4s_sdl6_pass_fwd_core_full_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { // load op4s_sdl2_load_s_ref(in, (*addr)); // (descale) // shuffle + input-low op4s_sdl2_shuffle_input_low_s_ref(in, z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // output-high op4s_sdl2_output_high_s_ref(out, l, z); // (scale) op4s_sdl2_scale_s_ref(out, v); // save-shift op4s_sdl2_save_shift_s_ref(out, (*addr)-12); // (update) // pointers (*addr) += 4; } #ifdef __SSE__ #define op4s_sdl6_pass_fwd_core_full_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_load_s_sse((in), (*(addr))); \ op4s_sdl2_shuffle_input_low_s_sse((in), (z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl2_output_high_s_sse((out), (l), (z)); \ op4s_sdl2_scale_s_sse((out), (v)); \ op4s_sdl2_save_shift_s_sse((out), (*(addr))-12); \ (*(addr)) += 4; \ } while(0) #endif static void op4s_sdl6_pass_inv_postcore_full_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { // load op4s_sdl2_load_s_ref(in, (*addr)); // descale op4s_sdl2_descale_s_ref(in, v); // shuffle + input-low op4s_sdl2_shuffle_input_low_s_ref(in, z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // output-high op4s_sdl2_output_high_s_ref(out, l, z); // (scale) // save-shift op4s_sdl2_save_shift_s_ref(out, (*addr)-12); // update op4s_sdl6_update_s_ref(z, l, r); // pointers (*addr) += 4; } #ifdef __SSE__ #define op4s_sdl6_pass_inv_postcore_full_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_load_s_sse((in), (*(addr))); \ op4s_sdl2_descale_s_sse((in), (v)); \ op4s_sdl2_shuffle_input_low_s_sse((in), (z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl2_output_high_s_sse((out), (l), (z)); \ op4s_sdl2_save_shift_s_sse((out), (*(addr))-12); \ op4s_sdl6_update_s_sse((z), (l), (r)); \ (*(addr)) += 4; \ } while(0) #endif static void op4s_sdl6_pass_fwd_postcore_full_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { // load op4s_sdl2_load_s_ref(in, (*addr)); // (descale) // shuffle + input-low op4s_sdl2_shuffle_input_low_s_ref(in, z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // output-high op4s_sdl2_output_high_s_ref(out, l, z); // (scale) op4s_sdl2_scale_s_ref(out, v); // save-shift op4s_sdl2_save_shift_s_ref(out, (*addr)-12); // update op4s_sdl6_update_s_ref(z, l, r); // pointers (*addr) += 4; } #ifdef __SSE__ #define op4s_sdl6_pass_fwd_postcore_full_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_load_s_sse((in), (*(addr))); \ op4s_sdl2_shuffle_input_low_s_sse((in), (z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl2_output_high_s_sse((out), (l), (z)); \ op4s_sdl2_scale_s_sse((out), (v)); \ op4s_sdl2_save_shift_s_sse((out), (*(addr))-12); \ op4s_sdl6_update_s_sse((z), (l), (r)); \ (*(addr)) += 4; \ } while(0) #endif static void op4s_sdl2_pass_fwd_epilog_full_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(in); // shuffle op4s_sdl2_shuffle_s_ref(c, r); // operation op4s_sdl2_op_s_ref(z, c, w, l, r); // output-high op4s_sdl2_output_high_s_ref(out, l, z); // scale op4s_sdl2_scale_s_ref(out, v); // save-shift op4s_sdl2_save_shift_s_ref(out, (*addr)-12); // update op4s_sdl2_update_s_ref(c, l, r, z); // pointers (*addr) += 4; } #ifdef __SSE__ #define op4s_sdl2_pass_fwd_epilog_full_s_sse(w, v, l, c, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_s_sse((c), (r)); \ op4s_sdl2_op_s_sse((z), (c), (w), (l), (r)); \ op4s_sdl2_output_high_s_sse((out), (l), (z)); \ op4s_sdl2_scale_s_sse((out), (v)); \ op4s_sdl2_save_shift_s_sse((out), (*(addr))-12); \ op4s_sdl2_update_s_sse((c), (l), (r), (z)); \ (*(addr)) += 4; \ } while(0) #endif static void op4s_sdl2_pass_inv_epilog_full_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(in); // shuffle op4s_sdl2_shuffle_s_ref(c, r); // operation op4s_sdl2_op_s_ref(z, c, w, l, r); // output-high op4s_sdl2_output_high_s_ref(out, l, z); // (scale) // save-shift op4s_sdl2_save_shift_s_ref(out, (*addr)-12); // update op4s_sdl2_update_s_ref(c, l, r, z); // pointers (*addr) += 4; } #ifdef __SSE__ #define op4s_sdl2_pass_inv_epilog_full_s_sse(w, v, l, c, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_s_sse((c), (r)); \ op4s_sdl2_op_s_sse((z), (c), (w), (l), (r)); \ op4s_sdl2_output_high_s_sse((out), (l), (z)); \ op4s_sdl2_save_shift_s_sse((out), (*(addr))-12); \ op4s_sdl2_update_s_sse((c), (l), (r), (z)); \ (*(addr)) += 4; \ } while(0) #endif static void op4s_sdl6_pass_inv_epilog_full_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(in); // shuffle op4s_sdl2_shuffle_s_ref(z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // output-high op4s_sdl2_output_high_s_ref(out, l, z); // (scale) // save-shift op4s_sdl2_save_shift_s_ref(out, (*addr)-12); // update op4s_sdl6_update_s_ref(z, l, r); // pointers (*addr) += 4; } #ifdef __SSE__ #define op4s_sdl6_pass_inv_epilog_full_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_s_sse((z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl2_output_high_s_sse((out), (l), (z)); \ op4s_sdl2_save_shift_s_sse((out), (*(addr))-12); \ op4s_sdl6_update_s_sse((z), (l), (r)); \ (*(addr)) += 4; \ } while(0) #endif static void op4s_sdl6_pass_fwd_epilog_full_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(in); // shuffle op4s_sdl2_shuffle_s_ref(z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // output-high op4s_sdl2_output_high_s_ref(out, l, z); // (scale) op4s_sdl2_scale_s_ref(out, v); // save-shift op4s_sdl2_save_shift_s_ref(out, (*addr)-12); // update op4s_sdl6_update_s_ref(z, l, r); // pointers (*addr) += 4; } #ifdef __SSE__ #define op4s_sdl6_pass_fwd_epilog_full_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_s_sse((z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl2_output_high_s_sse((out), (l), (z)); \ op4s_sdl2_scale_s_sse((out), (v)); \ op4s_sdl2_save_shift_s_sse((out), (*(addr))-12); \ op4s_sdl6_update_s_sse((z), (l), (r)); \ (*(addr)) += 4; \ } while(0) #endif static void op4s_sdl2_pass_fwd_epilog_light_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(in); UNUSED(addr); // shuffle op4s_sdl2_shuffle_s_ref(c, r); // operation op4s_sdl2_op_s_ref(z, c, w, l, r); // output-low op4s_sdl2_output_low_s_ref(out, l, z); // update op4s_sdl2_update_s_ref(c, l, r, z); } #ifdef __SSE__ #define op4s_sdl2_pass_fwd_epilog_light_s_sse(w, v, l, c, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_s_sse((c), (r)); \ op4s_sdl2_op_s_sse((z), (c), (w), (l), (r)); \ op4s_sdl2_output_low_s_sse((out), (l), (z)); \ op4s_sdl2_update_s_sse((c), (l), (r), (z)); \ } while(0) #endif static void op4s_sdl2_pass_inv_epilog_light_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(in); UNUSED(addr); // shuffle op4s_sdl2_shuffle_s_ref(c, r); // operation op4s_sdl2_op_s_ref(z, c, w, l, r); // output-low op4s_sdl2_output_low_s_ref(out, l, z); // update op4s_sdl2_update_s_ref(c, l, r, z); } #ifdef __SSE__ #define op4s_sdl2_pass_inv_epilog_light_s_sse(w, v, l, c, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_s_sse((c), (r)); \ op4s_sdl2_op_s_sse((z), (c), (w), (l), (r)); \ op4s_sdl2_output_low_s_sse((out), (l), (z)); \ op4s_sdl2_update_s_sse((c), (l), (r), (z)); \ } while(0) #endif static void op4s_sdl6_pass_inv_epilog_light_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(in); UNUSED(addr); // shuffle op4s_sdl2_shuffle_s_ref(z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // output-low op4s_sdl2_output_low_s_ref(out, l, z); // update op4s_sdl6_update_s_ref(z, l, r); } #ifdef __SSE__ #define op4s_sdl6_pass_inv_epilog_light_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_s_sse((z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl2_output_low_s_sse((out), (l), (z)); \ op4s_sdl6_update_s_sse((z), (l), (r)); \ } while(0) #endif static void op4s_sdl6_pass_fwd_epilog_light_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(in); UNUSED(addr); // shuffle op4s_sdl2_shuffle_s_ref(z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // output-low op4s_sdl2_output_low_s_ref(out, l, z); // update op4s_sdl6_update_s_ref(z, l, r); } #ifdef __SSE__ #define op4s_sdl6_pass_fwd_epilog_light_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_s_sse((z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl2_output_low_s_sse((out), (l), (z)); \ op4s_sdl6_update_s_sse((z), (l), (r)); \ } while(0) #endif static void op4s_sdl2_pass_fwd_epilog_flush_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(in); // shuffle op4s_sdl2_shuffle_s_ref(c, r); // operation op4s_sdl2_op_s_ref(z, c, w, l, r); // output-low op4s_sdl2_output_low_s_ref(out, l, z); // scale op4s_sdl2_scale_s_ref(out, v); // save op4s_sdl2_save_s_ref(out, (*addr)-12); // update op4s_sdl2_update_s_ref(c, l, r, z); } #ifdef __SSE__ #define op4s_sdl2_pass_fwd_epilog_flush_s_sse(w, v, l, c, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_s_sse((c), (r)); \ op4s_sdl2_op_s_sse((z), (c), (w), (l), (r)); \ op4s_sdl2_output_low_s_sse((out), (l), (z)); \ op4s_sdl2_scale_s_sse((out), (v)); \ op4s_sdl2_save_s_sse((out), (*(addr))-12); \ op4s_sdl2_update_s_sse((c), (l), (r), (z)); \ } while(0) #endif static void op4s_sdl2_pass_inv_epilog_flush_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(in); // shuffle op4s_sdl2_shuffle_s_ref(c, r); // operation op4s_sdl2_op_s_ref(z, c, w, l, r); // output-low op4s_sdl2_output_low_s_ref(out, l, z); // (scale) // save op4s_sdl2_save_s_ref(out, (*addr)-12); // update op4s_sdl2_update_s_ref(c, l, r, z); } #ifdef __SSE__ #define op4s_sdl2_pass_inv_epilog_flush_s_sse(w, v, l, c, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_s_sse((c), (r)); \ op4s_sdl2_op_s_sse((z), (c), (w), (l), (r)); \ op4s_sdl2_output_low_s_sse((out), (l), (z)); \ op4s_sdl2_save_s_sse((out), (*(addr))-12); \ op4s_sdl2_update_s_sse((c), (l), (r), (z)); \ } while(0) #endif static void op4s_sdl6_pass_inv_epilog_flush_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(in); // shuffle op4s_sdl2_shuffle_s_ref(z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // output-low op4s_sdl2_output_low_s_ref(out, l, z); // (scale) // save op4s_sdl2_save_s_ref(out, (*addr)-12); // update op4s_sdl6_update_s_ref(z, l, r); } #ifdef __SSE__ #define op4s_sdl6_pass_inv_epilog_flush_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_s_sse((z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl2_output_low_s_sse((out), (l), (z)); \ op4s_sdl2_save_s_sse((out), (*(addr))-12); \ op4s_sdl6_update_s_sse((z), (l), (r)); \ } while(0) #endif static void op4s_sdl6_pass_fwd_epilog_flush_s_ref(const float *w, const float *v, float *l, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(in); // shuffle op4s_sdl2_shuffle_s_ref(z, r); // operation op4s_sdl6_op_s_ref(z, w, l, r); // output-low op4s_sdl2_output_low_s_ref(out, l, z); // (scale) op4s_sdl2_scale_s_ref(out, v); // save op4s_sdl2_save_s_ref(out, (*addr)-12); // update op4s_sdl6_update_s_ref(z, l, r); } #ifdef __SSE__ #define op4s_sdl6_pass_fwd_epilog_flush_s_sse(w, v, l, r, z, in, out, addr) \ do { \ op4s_sdl2_shuffle_s_sse((z), (r)); \ op4s_sdl6_op_s_sse((z), (w), (l), (r)); \ op4s_sdl2_output_low_s_sse((out), (l), (z)); \ op4s_sdl2_scale_s_sse((out), (v)); \ op4s_sdl2_save_s_sse((out), (*(addr))-12); \ op4s_sdl6_update_s_sse((z), (l), (r)); \ } while(0) #endif static void op4s_sdl_pass_fwd_prolog_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(out); // shuffle op4s_sdl_shuffle_s_ref(c, r); // load op4s_sdl_load_s_ref(in, *addr+4); // (descale) // input op4s_sdl_input_s_ref(in, c, r); // operation op4s_sdl_op_s_ref(z, c, w, l, r); // (output) // (scale) // (save) // update op4s_sdl_update_s_ref(c, l, r, z); // pointers (*addr) += 2; } static void op4s_sdl_pass_fwd_prolog_stride_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr, int stride) { UNUSED(v); UNUSED(out); // shuffle op4s_sdl_shuffle_s_ref(c, r); // load op4s_sdl_load_stride_s_ref(in, addr1_s(*addr,4,stride), stride); // (descale) // input op4s_sdl_input_s_ref(in, c, r); // operation op4s_sdl_op_s_ref(z, c, w, l, r); // (output) // (scale) // (save) // update op4s_sdl_update_s_ref(c, l, r, z); // pointers *addr = addr1_s(*addr,2,stride); } #ifdef __SSE__ #define op4s_sdl_pass_fwd_prolog_stride_s_sse(w, v, l, c, r, z, in, out, addr, stride) \ do { \ op4s_sdl2_shuffle_s_sse(c, r); \ op4s_sdl_load_stride_s_sse(in, addr1_s(*addr,4,stride), stride); \ op4s_sdl2_input_low_s_sse(in, c, r); \ op4s_sdl2_op_s_sse(z, c, w, l, r); \ op4s_sdl2_update_s_sse(c, l, r, z); \ *addr = addr1_s(*addr,2,stride); \ } while(0) #endif static void op4s_sdl_pass_inv_prolog_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(out); // shuffle op4s_sdl_shuffle_s_ref(c, r); // load op4s_sdl_load_s_ref(in, *addr+4); // descale op4s_sdl_descale_s_ref(in, v); // input op4s_sdl_input_s_ref(in, c, r); // operation op4s_sdl_op_s_ref(z, c, w, l, r); // (output) // (scale) // (save) // update op4s_sdl_update_s_ref(c, l, r, z); // pointers (*addr) += 2; } static void op4s_sdl_pass_fwd_core_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { // shuffle op4s_sdl_shuffle_s_ref(c, r); // load op4s_sdl_load_s_ref(in, *addr+4); // (descale) // input op4s_sdl_input_s_ref(in, c, r); // operation op4s_sdl_op_s_ref(z, c, w, l, r); // output op4s_sdl_output_s_ref(out, l, z); // scale op4s_sdl_scale_s_ref(out, v); // save op4s_sdl_save_s_ref(out, *addr-6); // update op4s_sdl_update_s_ref(c, l, r, z); // pointers (*addr) += 2; } static void op4s_sdl_pass_fwd_core_stride_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr, int stride) { // shuffle op4s_sdl_shuffle_s_ref(c, r); // load op4s_sdl_load_stride_s_ref(in, addr1_s(*addr,4,stride), stride); // (descale) // input op4s_sdl_input_s_ref(in, c, r); // operation op4s_sdl_op_s_ref(z, c, w, l, r); // output op4s_sdl_output_s_ref(out, l, z); // scale op4s_sdl_scale_s_ref(out, v); // save op4s_sdl_save_stride_s_ref(out, addr1_s(*addr,-6,stride), stride); // update op4s_sdl_update_s_ref(c, l, r, z); // pointers *addr = addr1_s(*addr,2,stride); } #ifdef __SSE__ #define op4s_sdl_pass_fwd_core_stride_s_sse(w, v, l, c, r, z, in, out, addr, stride) \ do { \ op4s_sdl2_shuffle_s_sse(c, r); \ op4s_sdl_load_stride_s_sse(in, addr1_s(*addr,4,stride), stride); \ op4s_sdl2_input_low_s_sse(in, c, r); \ op4s_sdl2_op_s_sse(z, c, w, l, r); \ op4s_sdl2_output_low_s_sse(out, l, z); \ op4s_sdl2_scale_s_sse(out, v); \ op4s_sdl_save_stride_s_sse(out, addr1_s(*addr,-6,stride), stride); \ op4s_sdl2_update_s_sse(c, l, r, z); \ *addr = addr1_s(*addr,2,stride); \ } while(0) #endif static void op4s_sdl_pass_inv_core_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { // shuffle op4s_sdl_shuffle_s_ref(c, r); // load op4s_sdl_load_s_ref(in, *addr+4); // descale op4s_sdl_descale_s_ref(in, v); // input op4s_sdl_input_s_ref(in, c, r); // operation op4s_sdl_op_s_ref(z, c, w, l, r); // output op4s_sdl_output_s_ref(out, l, z); // (scale) // save op4s_sdl_save_s_ref(out, *addr-6); // update op4s_sdl_update_s_ref(c, l, r, z); // pointers (*addr) += 2; } static void op4s_sdl_pass_fwd_epilog_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(in); // shuffle op4s_sdl_shuffle_s_ref(c, r); // (load) // (descale) // (input) // operation op4s_sdl_op_s_ref(z, c, w, l, r); // output op4s_sdl_output_s_ref(out, l, z); // scale op4s_sdl_scale_s_ref(out, v); // save op4s_sdl_save_s_ref(out, *addr-6); // update op4s_sdl_update_s_ref(c, l, r, z); // pointers (*addr) += 2; } static void op4s_sdl_pass_fwd_epilog_stride_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr, int stride) { UNUSED(in); // shuffle op4s_sdl_shuffle_s_ref(c, r); // (load) // (descale) // (input) // operation op4s_sdl_op_s_ref(z, c, w, l, r); // output op4s_sdl_output_s_ref(out, l, z); // scale op4s_sdl_scale_s_ref(out, v); // save op4s_sdl_save_stride_s_ref(out, addr1_s(*addr,-6,stride), stride); // update op4s_sdl_update_s_ref(c, l, r, z); // pointers (*addr) = addr1_s(*addr,2,stride); } #ifdef __SSE__ #define op4s_sdl_pass_fwd_epilog_stride_s_sse(w,v,l,c,r,z,in,out,addr,stride) \ do { \ op4s_sdl2_shuffle_s_sse(c, r); \ op4s_sdl2_op_s_sse(z, c, w, l, r); \ op4s_sdl2_output_low_s_sse(out, l, z); \ op4s_sdl2_scale_s_sse(out, v); \ op4s_sdl_save_stride_s_sse(out, addr1_s(*addr,-6,stride), stride); \ op4s_sdl2_update_s_sse(c, l, r, z); \ (*addr) = addr1_s(*addr,2,stride); \ } while(0) #endif static void op4s_sdl_pass_inv_epilog_s_ref(const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float *restrict *addr) { UNUSED(v); UNUSED(in); // shuffle op4s_sdl_shuffle_s_ref(c, r); // (load) // (descale) // (input) // operation op4s_sdl_op_s_ref(z, c, w, l, r); // (output) op4s_sdl_output_s_ref(out, l, z); // (scale) // save op4s_sdl_save_s_ref(out, *addr-6); // update op4s_sdl_update_s_ref(c, l, r, z); // pointers (*addr) += 2; } #ifdef __SSE__ /** * @brief Shifted Double-Loop implementation of lifting scheme with 6 * iterations merged. * * i.e. 12 = (6)*(2) = (2*3)*(2) coefficients per one iteration. */ static void accel_lift_op4s_main_sdl6_sse_s( float *restrict arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling) { // 6+ coeffs implies 3+ steps assert( steps >= 3 ); const __m128 w = { delta, gamma, beta, alpha }; const __m128 v = { 1/zeta, zeta, 1/zeta, zeta }; __m128 l; __m128 r; __m128 z; __m128 in; __m128 out; const int S = steps-3; const int U = S / 6; const int M = S % 6; const int T = M >> 1; if( scaling < 0 ) { // ****** inverse transform ****** for(int wrk = 0; wrk < dwt_util_get_num_workers(); wrk++) { // *** init *** float *addr = ASSUME_ALIGNED(calc_temp_offset2_s(arr, wrk, 0), 16); assert( is_aligned_16(addr) ); float *base = addr; // *** prolog2 *** // prolog2: import-preload op4s_sdl6_preload_prolog_s_sse(w, v, l, r, z, in, out, &addr); // prolog2: import(3) op4s_sdl6_import_s_sse(l, 3, out); // prolog2: pass-prolog-full op4s_sdl6_pass_inv_prolog_full_s_sse(w, v, l, r, z, in, out, &addr); // prolog2: import(2) op4s_sdl6_import_s_sse(l, 2, out); // prolog2: pass-prolog-light op4s_sdl6_pass_inv_prolog_light_s_sse(w, v, l, r, z, in, out, &addr); // prolog2: import(1) op4s_sdl6_import_s_sse(l, 1, out); // prolog2: pass-prolog-full op4s_sdl6_pass_inv_prolog_full_s_sse(w, v, l, r, z, in, out, &addr); // prolog2: import(0) op4s_sdl6_import_s_sse(l, 0, out); // *** core *** // core: for u = 0 to U for(int u = 0; u < U; u++) { // NOTE: l, r, z // core: pass1-core-light op4s_sdl6_pass_inv_core_light_s_sse(w, v, /*l*/l, /*r*/r, /*z*/z, in, out, &addr); // NOTE: z => l, l => r, r => z // core: pass1-core-full op4s_sdl6_pass_inv_core_full_s_sse(w, v, /*l*/r, /*r*/z, /*z*/l, in, out, &addr); // NOTE: (r => z) => l, (z => l) => r, (l => r) => z // core: pass2-core-light op4s_sdl6_pass_inv_core_light_s_sse(w, v, /*l*/z, /*r*/l, /*z*/r, in, out, &addr); // NOTE: ((l => r) => z) => l, ((r => z) => l) => r, ((z => l) => r) => z // core: pass2-core-full op4s_sdl6_pass_inv_core_full_s_sse(w, v, /*l*/l, /*r*/r, /*z*/z, in, out, &addr); // NOTE: z => l, l => r, r => z // core: pass3-core-light op4s_sdl6_pass_inv_core_light_s_sse(w, v, /*l*/r, /*r*/z, /*z*/l, in, out, &addr); // NOTE: (r => z) => l, (z => l) => r, (l => r) => z // core: pass3-core-full op4s_sdl6_pass_inv_core_full_s_sse(w, v, /*l*/z, /*r*/l, /*z*/r, in, out, &addr); // NOTE: ((l => r) => z) => l, ((r => z) => l) => r, ((z => l) => r) => z } // core: for t = 0 to T do for(int t = 0; t < T; t++) { // core: pass-core-light op4s_sdl6_pass_inv_postcore_light_s_sse(w, v, l, r, z, in, out, &addr); // core: pass-core-full op4s_sdl6_pass_inv_postcore_full_s_sse(w, v, l, r, z, in, out, &addr); } // core: if odd then if( is_odd(S) ) { // core: pass-core-light op4s_sdl6_pass_inv_postcore_light_s_sse(w, v, l, r, z, in, out, &addr); } // *** epilog2 *** if( is_odd(S) ) { // epilog2: export(3) op4s_sdl6_export_s_sse(l, &base[2*steps], 3); // epilog2: pass-epilog-full op4s_sdl6_pass_inv_epilog_full_s_sse(w, v, l, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl6_export_s_sse(l, &base[2*steps], 2); // epilog2: pass-epilog-light op4s_sdl6_pass_inv_epilog_light_s_sse(w, v, l, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl6_export_s_sse(l, &base[2*steps], 1); // epilog2: pass-epilog-full op4s_sdl6_pass_inv_epilog_full_s_sse(w, v, l, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl6_export_s_sse(l, &base[2*steps], 0); } else { // epilog2: export(3) op4s_sdl6_export_s_sse(l, &base[2*steps], 3); // epilog2: pass-epilog-light op4s_sdl6_pass_inv_epilog_light_s_sse(w, v, l, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl6_export_s_sse(l, &base[2*steps], 2); // epilog2: pass-epilog-full op4s_sdl6_pass_inv_epilog_full_s_sse(w, v, l, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl6_export_s_sse(l, &base[2*steps], 1); // epilog2: pass-epilog-flush op4s_sdl6_pass_inv_epilog_flush_s_sse(w, v, l, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl6_export_s_sse(l, &base[2*steps], 0); } } } else if ( scaling > 0 ) { // ****** forward transform ****** for(int wrk = 0; wrk < dwt_util_get_num_workers(); wrk++) { // *** init *** float *addr = ASSUME_ALIGNED(calc_temp_offset2_s(arr, wrk, 0), 16); assert( is_aligned_16(addr) ); float *base = addr; // *** prolog2 *** // prolog2: import-preload op4s_sdl6_preload_prolog_s_sse(w, v, l, r, z, in, out, &addr); // prolog2: import(3) op4s_sdl6_import_s_sse(l, 3, out); // prolog2: pass-prolog-full op4s_sdl6_pass_fwd_prolog_full_s_sse(w, v, l, r, z, in, out, &addr); // prolog2: import(2) op4s_sdl6_import_s_sse(l, 2, out); // prolog2: pass-prolog-light op4s_sdl6_pass_fwd_prolog_light_s_sse(w, v, l, r, z, in, out, &addr); // prolog2: import(1) op4s_sdl6_import_s_sse(l, 1, out); // prolog2: pass-prolog-full op4s_sdl6_pass_fwd_prolog_full_s_sse(w, v, l, r, z, in, out, &addr); // prolog2: import(0) op4s_sdl6_import_s_sse(l, 0, out); // *** core *** // core: for u = 0 to U for(int u = 0; u < U; u++) { // NOTE: l, r, z // core: pass1-core-light op4s_sdl6_pass_fwd_core_light_s_sse(w, v, /*l*/l, /*r*/r, /*z*/z, in, out, &addr); // NOTE: z => l, l => r, r => z // core: pass1-core-full op4s_sdl6_pass_fwd_core_full_s_sse(w, v, /*l*/r, /*r*/z, /*z*/l, in, out, &addr); // NOTE: (r => z) => l, (z => l) => r, (l => r) => z // core: pass2-core-light op4s_sdl6_pass_fwd_core_light_s_sse(w, v, /*l*/z, /*r*/l, /*z*/r, in, out, &addr); // NOTE: ((l => r) => z) => l, ((r => z) => l) => r, ((z => l) => r) => z // core: pass2-core-full op4s_sdl6_pass_fwd_core_full_s_sse(w, v, /*l*/l, /*r*/r, /*z*/z, in, out, &addr); // NOTE: z => l, l => r, r => z // core: pass3-core-light op4s_sdl6_pass_fwd_core_light_s_sse(w, v, /*l*/r, /*r*/z, /*z*/l, in, out, &addr); // NOTE: (r => z) => l, (z => l) => r, (l => r) => z // core: pass3-core-full op4s_sdl6_pass_fwd_core_full_s_sse(w, v, /*l*/z, /*r*/l, /*z*/r, in, out, &addr); // NOTE: ((l => r) => z) => l, ((r => z) => l) => r, ((z => l) => r) => z } // core: for t = 0 to T do for(int t = 0; t < T; t++) { // core: pass-core-light op4s_sdl6_pass_fwd_postcore_light_s_sse(w, v, l, r, z, in, out, &addr); // core: pass-core-full op4s_sdl6_pass_fwd_postcore_full_s_sse(w, v, l, r, z, in, out, &addr); } // core: if odd then if( is_odd(S) ) { // core: pass-core-light op4s_sdl6_pass_fwd_postcore_light_s_sse(w, v, l, r, z, in, out, &addr); } // *** epilog2 *** if( is_odd(S) ) { // epilog2: export(3) op4s_sdl6_export_s_sse(l, &base[2*steps], 3); // epilog2: pass-epilog-full op4s_sdl6_pass_fwd_epilog_full_s_sse(w, v, l, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl6_export_s_sse(l, &base[2*steps], 2); // epilog2: pass-epilog-light op4s_sdl6_pass_fwd_epilog_light_s_sse(w, v, l, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl6_export_s_sse(l, &base[2*steps], 1); // epilog2: pass-epilog-full op4s_sdl6_pass_fwd_epilog_full_s_sse(w, v, l, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl6_export_s_sse(l, &base[2*steps], 0); } else { // epilog2: export(3) op4s_sdl6_export_s_sse(l, &base[2*steps], 3); // epilog2: pass-epilog-light op4s_sdl6_pass_fwd_epilog_light_s_sse(w, v, l, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl6_export_s_sse(l, &base[2*steps], 2); // epilog2: pass-epilog-full op4s_sdl6_pass_fwd_epilog_full_s_sse(w, v, l, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl6_export_s_sse(l, &base[2*steps], 1); // epilog2: pass-epilog-flush op4s_sdl6_pass_fwd_epilog_flush_s_sse(w, v, l, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl6_export_s_sse(l, &base[2*steps], 0); } } } else { // ****** transform w/o scaling ****** // not implemented yet dwt_util_abort(); } } #endif /* __SSE__ */ /** * @brief Shifted Double-Loop implementation of lifting scheme with 6 * iterations merger. * * i.e. 12 = (6)*(2) = (2*3)*(2) coefficients per one iteration. */ static void accel_lift_op4s_main_sdl6_ref_s( float *restrict arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling) { // 6+ coeffs implies 3+ steps assert( steps >= 3 ); const float w[4] = { delta, gamma, beta, alpha }; const float v[4] = { 1/zeta, zeta, 1/zeta, zeta }; float l[4]; float r[4]; float z[4]; float in[4]; float out[4]; const int S = steps-3; const int U = S / 6; const int M = S % 6; const int T = M >> 1; if( scaling < 0 ) { // ****** inverse transform ****** for(int wrk = 0; wrk < dwt_util_get_num_workers(); wrk++) { // *** init *** float *addr = ASSUME_ALIGNED(calc_temp_offset2_s(arr, wrk, 0), 16); assert( is_aligned_s(addr) ); float *base = addr; // *** prolog2 *** // prolog2: import-preload op4s_sdl6_preload_prolog_s_ref(w, v, l, r, z, in, out, &addr); // prolog2: import(3) op4s_sdl6_import_s_ref(l, 3, out); // prolog2: pass-prolog-full op4s_sdl6_pass_inv_prolog_full_s_ref(w, v, l, r, z, in, out, &addr); // prolog2: import(2) op4s_sdl6_import_s_ref(l, 2, out); // prolog2: pass-prolog-light op4s_sdl6_pass_inv_prolog_light_s_ref(w, v, l, r, z, in, out, &addr); // prolog2: import(1) op4s_sdl6_import_s_ref(l, 1, out); // prolog2: pass-prolog-full op4s_sdl6_pass_inv_prolog_full_s_ref(w, v, l, r, z, in, out, &addr); // prolog2: import(0) op4s_sdl6_import_s_ref(l, 0, out); // *** core *** // core: for u = 0 to U for(int u = 0; u < U; u++) { // NOTE: l, r, z // core: pass1-core-light op4s_sdl6_pass_inv_core_light_s_ref(w, v, /*l*/l, /*r*/r, /*z*/z, in, out, &addr); // NOTE: z => l, l => r, r => z // core: pass1-core-full op4s_sdl6_pass_inv_core_full_s_ref(w, v, /*l*/r, /*r*/z, /*z*/l, in, out, &addr); // NOTE: (r => z) => l, (z => l) => r, (l => r) => z // core: pass2-core-light op4s_sdl6_pass_inv_core_light_s_ref(w, v, /*l*/z, /*r*/l, /*z*/r, in, out, &addr); // NOTE: ((l => r) => z) => l, ((r => z) => l) => r, ((z => l) => r) => z // core: pass2-core-full op4s_sdl6_pass_inv_core_full_s_ref(w, v, /*l*/l, /*r*/r, /*z*/z, in, out, &addr); // NOTE: z => l, l => r, r => z // core: pass3-core-light op4s_sdl6_pass_inv_core_light_s_ref(w, v, /*l*/r, /*r*/z, /*z*/l, in, out, &addr); // NOTE: (r => z) => l, (z => l) => r, (l => r) => z // core: pass3-core-full op4s_sdl6_pass_inv_core_full_s_ref(w, v, /*l*/z, /*r*/l, /*z*/r, in, out, &addr); // NOTE: ((l => r) => z) => l, ((r => z) => l) => r, ((z => l) => r) => z } // core: for t = 0 to T do for(int t = 0; t < T; t++) { // core: pass-core-light op4s_sdl6_pass_inv_postcore_light_s_ref(w, v, l, r, z, in, out, &addr); // core: pass-core-full op4s_sdl6_pass_inv_postcore_full_s_ref(w, v, l, r, z, in, out, &addr); } // core: if odd then if( is_odd(S) ) { // core: pass-core-light op4s_sdl6_pass_inv_postcore_light_s_ref(w, v, l, r, z, in, out, &addr); } // *** epilog2 *** if( is_odd(S) ) { // epilog2: export(3) op4s_sdl6_export_s_ref(l, &base[2*steps], 3); // epilog2: pass-epilog-full op4s_sdl6_pass_inv_epilog_full_s_ref(w, v, l, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl6_export_s_ref(l, &base[2*steps], 2); // epilog2: pass-epilog-light op4s_sdl6_pass_inv_epilog_light_s_ref(w, v, l, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl6_export_s_ref(l, &base[2*steps], 1); // epilog2: pass-epilog-full op4s_sdl6_pass_inv_epilog_full_s_ref(w, v, l, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl6_export_s_ref(l, &base[2*steps], 0); } else { // epilog2: export(3) op4s_sdl6_export_s_ref(l, &base[2*steps], 3); // epilog2: pass-epilog-light op4s_sdl6_pass_inv_epilog_light_s_ref(w, v, l, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl6_export_s_ref(l, &base[2*steps], 2); // epilog2: pass-epilog-full op4s_sdl6_pass_inv_epilog_full_s_ref(w, v, l, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl6_export_s_ref(l, &base[2*steps], 1); // epilog2: pass-epilog-flush op4s_sdl6_pass_inv_epilog_flush_s_ref(w, v, l, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl6_export_s_ref(l, &base[2*steps], 0); } } } else if ( scaling > 0 ) { // ****** forward transform ****** for(int wrk = 0; wrk < dwt_util_get_num_workers(); wrk++) { // *** init *** float *addr = ASSUME_ALIGNED(calc_temp_offset2_s(arr, wrk, 0), 16); assert( is_aligned_s(addr) ); float *base = addr; // *** prolog2 *** // prolog2: import-preload op4s_sdl6_preload_prolog_s_ref(w, v, l, r, z, in, out, &addr); // prolog2: import(3) op4s_sdl6_import_s_ref(l, 3, out); // prolog2: pass-prolog-full op4s_sdl6_pass_fwd_prolog_full_s_ref(w, v, l, r, z, in, out, &addr); // prolog2: import(2) op4s_sdl6_import_s_ref(l, 2, out); // prolog2: pass-prolog-light op4s_sdl6_pass_fwd_prolog_light_s_ref(w, v, l, r, z, in, out, &addr); // prolog2: import(1) op4s_sdl6_import_s_ref(l, 1, out); // prolog2: pass-prolog-full op4s_sdl6_pass_fwd_prolog_full_s_ref(w, v, l, r, z, in, out, &addr); // prolog2: import(0) op4s_sdl6_import_s_ref(l, 0, out); // *** core *** // core: for u = 0 to U for(int u = 0; u < U; u++) { // NOTE: l, r, z // core: pass1-core-light op4s_sdl6_pass_fwd_core_light_s_ref(w, v, /*l*/l, /*r*/r, /*z*/z, in, out, &addr); // NOTE: z => l, l => r, r => z // core: pass1-core-full op4s_sdl6_pass_fwd_core_full_s_ref(w, v, /*l*/r, /*r*/z, /*z*/l, in, out, &addr); // NOTE: (r => z) => l, (z => l) => r, (l => r) => z // core: pass2-core-light op4s_sdl6_pass_fwd_core_light_s_ref(w, v, /*l*/z, /*r*/l, /*z*/r, in, out, &addr); // NOTE: ((l => r) => z) => l, ((r => z) => l) => r, ((z => l) => r) => z // core: pass2-core-full op4s_sdl6_pass_fwd_core_full_s_ref(w, v, /*l*/l, /*r*/r, /*z*/z, in, out, &addr); // NOTE: z => l, l => r, r => z // core: pass3-core-light op4s_sdl6_pass_fwd_core_light_s_ref(w, v, /*l*/r, /*r*/z, /*z*/l, in, out, &addr); // NOTE: (r => z) => l, (z => l) => r, (l => r) => z // core: pass3-core-full op4s_sdl6_pass_fwd_core_full_s_ref(w, v, /*l*/z, /*r*/l, /*z*/r, in, out, &addr); // NOTE: ((l => r) => z) => l, ((r => z) => l) => r, ((z => l) => r) => z } // core: for t = 0 to T do for(int t = 0; t < T; t++) { // core: pass-core-light op4s_sdl6_pass_fwd_postcore_light_s_ref(w, v, l, r, z, in, out, &addr); // core: pass-core-full op4s_sdl6_pass_fwd_postcore_full_s_ref(w, v, l, r, z, in, out, &addr); } // core: if odd then if( is_odd(S) ) { // core: pass-core-light op4s_sdl6_pass_fwd_postcore_light_s_ref(w, v, l, r, z, in, out, &addr); } // *** epilog2 *** if( is_odd(S) ) { // epilog2: export(3) op4s_sdl6_export_s_ref(l, &base[2*steps], 3); // epilog2: pass-epilog-full op4s_sdl6_pass_fwd_epilog_full_s_ref(w, v, l, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl6_export_s_ref(l, &base[2*steps], 2); // epilog2: pass-epilog-light op4s_sdl6_pass_fwd_epilog_light_s_ref(w, v, l, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl6_export_s_ref(l, &base[2*steps], 1); // epilog2: pass-epilog-full op4s_sdl6_pass_fwd_epilog_full_s_ref(w, v, l, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl6_export_s_ref(l, &base[2*steps], 0); } else { // epilog2: export(3) op4s_sdl6_export_s_ref(l, &base[2*steps], 3); // epilog2: pass-epilog-light op4s_sdl6_pass_fwd_epilog_light_s_ref(w, v, l, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl6_export_s_ref(l, &base[2*steps], 2); // epilog2: pass-epilog-full op4s_sdl6_pass_fwd_epilog_full_s_ref(w, v, l, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl6_export_s_ref(l, &base[2*steps], 1); // epilog2: pass-epilog-flush op4s_sdl6_pass_fwd_epilog_flush_s_ref(w, v, l, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl6_export_s_ref(l, &base[2*steps], 0); } } } else { // ****** transform w/o scaling ****** // not implemented yet dwt_util_abort(); } } /** * @brief SDL with 2 iterations merged. * * i.e. loads and stores 4 = (2)*(2) coefficients in every iteration. */ static void accel_lift_op4s_main_sdl2_ref_s( float *restrict arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling) { // 6+ coeffs implies 3+ steps assert( steps >= 3 ); const float w[4] = { delta, gamma, beta, alpha }; const float v[4] = { 1/zeta, zeta, 1/zeta, zeta }; float l[4]; float c[4]; float r[4]; float z[4]; float in[4]; float out[4]; const int S = steps-3; const int T = S >> 1; if( scaling < 0 ) { // ****** inverse transform ****** for(int wrk = 0; wrk < dwt_util_get_num_workers(); wrk++) { // *** init *** float *addr = ASSUME_ALIGNED(calc_temp_offset2_s(arr, wrk, 0), 16); assert( is_aligned_s(addr) ); float *base = addr; // *** prolog2 *** // prolog2: import-preload op4s_sdl2_preload_prolog_s_ref(w, v, l, c, r, z, in, out, &addr); // prolog2: import(3) op4s_sdl2_import_s_ref(l, 3, out); // prolog2: pass-prolog-full op4s_sdl2_pass_inv_prolog_full_s_ref(w, v, l, c, r, z, in, out, &addr); // prolog2: import(2) op4s_sdl2_import_s_ref(l, 2, out); // prolog2: pass-prolog-light op4s_sdl2_pass_inv_prolog_light_s_ref(w, v, l, c, r, z, in, out, &addr); // prolog2: import(1) op4s_sdl2_import_s_ref(l, 1, out); // prolog2: pass-prolog-full op4s_sdl2_pass_inv_prolog_full_s_ref(w, v, l, c, r, z, in, out, &addr); // prolog2: import(0) op4s_sdl2_import_s_ref(l, 0, out); // *** core *** // core: for t = 0 to T do for(int t = 0; t < T; t++) { // core: pass-core-light op4s_sdl2_pass_inv_core_light_s_ref(w, v, l, c, r, z, in, out, &addr); // core: pass-core-full op4s_sdl2_pass_inv_core_full_s_ref(w, v, l, c, r, z, in, out, &addr); } // core: if odd then if( is_odd(S) ) { // core: pass-core-light op4s_sdl2_pass_inv_core_light_s_ref(w, v, l, c, r, z, in, out, &addr); } // *** epilog2 *** if( is_odd(S) ) { // epilog2: export(3) op4s_sdl2_export_s_ref(l, &base[2*steps], 3); // epilog2: pass-epilog-full op4s_sdl2_pass_inv_epilog_full_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl2_export_s_ref(l, &base[2*steps], 2); // epilog2: pass-epilog-light op4s_sdl2_pass_inv_epilog_light_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl2_export_s_ref(l, &base[2*steps], 1); // epilog2: pass-epilog-full op4s_sdl2_pass_inv_epilog_full_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl2_export_s_ref(l, &base[2*steps], 0); } else { // epilog2: export(3) op4s_sdl2_export_s_ref(l, &base[2*steps], 3); // epilog2: pass-epilog-light op4s_sdl2_pass_inv_epilog_light_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl2_export_s_ref(l, &base[2*steps], 2); // epilog2: pass-epilog-full op4s_sdl2_pass_inv_epilog_full_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl2_export_s_ref(l, &base[2*steps], 1); // epilog2: pass-epilog-flush op4s_sdl2_pass_inv_epilog_flush_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl2_export_s_ref(l, &base[2*steps], 0); } } } else if ( scaling > 0 ) { // ****** forward transform ****** for(int wrk = 0; wrk < dwt_util_get_num_workers(); wrk++) { // *** init *** float *addr = ASSUME_ALIGNED(calc_temp_offset2_s(arr, wrk, 0), 16); assert( is_aligned_s(addr) ); float *base = addr; // *** prolog2 *** // prolog2: import-preload op4s_sdl2_preload_prolog_s_ref(w, v, l, c, r, z, in, out, &addr); // prolog2: import(3) op4s_sdl2_import_s_ref(l, 3, out); // prolog2: pass-prolog-full op4s_sdl2_pass_fwd_prolog_full_s_ref(w, v, l, c, r, z, in, out, &addr); // prolog2: import(2) op4s_sdl2_import_s_ref(l, 2, out); // prolog2: pass-prolog-light op4s_sdl2_pass_fwd_prolog_light_s_ref(w, v, l, c, r, z, in, out, &addr); // prolog2: import(1) op4s_sdl2_import_s_ref(l, 1, out); // prolog2: pass-prolog-full op4s_sdl2_pass_fwd_prolog_full_s_ref(w, v, l, c, r, z, in, out, &addr); // prolog2: import(0) op4s_sdl2_import_s_ref(l, 0, out); // *** core *** // core: for t = 0 to T do for(int t = 0; t < T; t++) { // core: pass-core-light op4s_sdl2_pass_fwd_core_light_s_ref(w, v, l, c, r, z, in, out, &addr); // core: pass-core-full op4s_sdl2_pass_fwd_core_full_s_ref(w, v, l, c, r, z, in, out, &addr); } // core: if odd then if( is_odd(S) ) { // core: pass-core-light op4s_sdl2_pass_fwd_core_light_s_ref(w, v, l, c, r, z, in, out, &addr); } // *** epilog2 *** if( is_odd(S) ) { // epilog2: export(3) op4s_sdl2_export_s_ref(l, &base[2*steps], 3); // epilog2: pass-epilog-full op4s_sdl2_pass_fwd_epilog_full_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl2_export_s_ref(l, &base[2*steps], 2); // epilog2: pass-epilog-light op4s_sdl2_pass_fwd_epilog_light_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl2_export_s_ref(l, &base[2*steps], 1); // epilog2: pass-epilog-full op4s_sdl2_pass_fwd_epilog_full_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl2_export_s_ref(l, &base[2*steps], 0); } else { // epilog2: export(3) op4s_sdl2_export_s_ref(l, &base[2*steps], 3); // epilog2: pass-epilog-light op4s_sdl2_pass_fwd_epilog_light_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl2_export_s_ref(l, &base[2*steps], 2); // epilog2: pass-epilog-full op4s_sdl2_pass_fwd_epilog_full_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl2_export_s_ref(l, &base[2*steps], 1); // epilog2: pass-epilog-flush op4s_sdl2_pass_fwd_epilog_flush_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl2_export_s_ref(l, &base[2*steps], 0); } } } else { // ****** transform w/o scaling ****** // not implemented yet dwt_util_abort(); } } #ifdef __SSE__ static void accel_lift_op4s_main_sdl2_sse_s( float *restrict arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling) { // 6+ coeffs implies 3+ steps assert( steps >= 3 ); // FIXME: make global variables? const __m128 w = { delta, gamma, beta, alpha }; const __m128 v = { 1/zeta, zeta, 1/zeta, zeta }; __m128 l; __m128 c; __m128 r; __m128 z; __m128 in; __m128 out; const int S = steps-3; const int T = S >> 1; if( scaling < 0 ) { // ****** inverse transform ****** for(int wrk = 0; wrk < dwt_util_get_num_workers(); wrk++) { // *** init *** float *addr = ASSUME_ALIGNED(calc_temp_offset2_s(arr, wrk, 0), 16); assert( is_aligned_16(addr) ); float *base = addr; // *** prolog2 *** // prolog2: import-preload op4s_sdl2_preload_prolog_s_sse(w, v, l, c, r, z, in, out, &addr); // prolog2: import(3) op4s_sdl2_import_s_sse(l, 3, out); // prolog2: pass-prolog-full op4s_sdl2_pass_inv_prolog_full_s_sse(w, v, l, c, r, z, in, out, &addr); // prolog2: import(2) op4s_sdl2_import_s_sse(l, 2, out); // prolog2: pass-prolog-light op4s_sdl2_pass_inv_prolog_light_s_sse(w, v, l, c, r, z, in, out, &addr); // prolog2: import(1) op4s_sdl2_import_s_sse(l, 1, out); // prolog2: pass-prolog-full op4s_sdl2_pass_inv_prolog_full_s_sse(w, v, l, c, r, z, in, out, &addr); // prolog2: import(0) op4s_sdl2_import_s_sse(l, 0, out); // *** core *** // core: for t = 0 to T do for(int t = 0; t < T; t++) { // core: pass-core-light op4s_sdl2_pass_inv_core_light_s_sse(w, v, l, c, r, z, in, out, &addr); // core: pass-core-full op4s_sdl2_pass_inv_core_full_s_sse(w, v, l, c, r, z, in, out, &addr); } // core: if odd then if( is_odd(S) ) { // core: pass-core-light op4s_sdl2_pass_inv_core_light_s_sse(w, v, l, c, r, z, in, out, &addr); } // *** epilog2 *** if( is_odd(S) ) { // epilog2: export(3) op4s_sdl2_export_s_sse(l, &base[2*steps], 3); // epilog2: pass-epilog-full op4s_sdl2_pass_inv_epilog_full_s_sse(w, v, l, c, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl2_export_s_sse(l, &base[2*steps], 2); // epilog2: pass-epilog-light op4s_sdl2_pass_inv_epilog_light_s_sse(w, v, l, c, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl2_export_s_sse(l, &base[2*steps], 1); // epilog2: pass-epilog-full op4s_sdl2_pass_inv_epilog_full_s_sse(w, v, l, c, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl2_export_s_sse(l, &base[2*steps], 0); } else { // epilog2: export(3) op4s_sdl2_export_s_sse(l, &base[2*steps], 3); // epilog2: pass-epilog-light op4s_sdl2_pass_inv_epilog_light_s_sse(w, v, l, c, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl2_export_s_sse(l, &base[2*steps], 2); // epilog2: pass-epilog-full op4s_sdl2_pass_inv_epilog_full_s_sse(w, v, l, c, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl2_export_s_sse(l, &base[2*steps], 1); // epilog2: pass-epilog-flush op4s_sdl2_pass_inv_epilog_flush_s_sse(w, v, l, c, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl2_export_s_sse(l, &base[2*steps], 0); } } } else if ( scaling > 0 ) { // ****** forward transform ****** for(int wrk = 0; wrk < dwt_util_get_num_workers(); wrk++) { // *** init *** float *addr = ASSUME_ALIGNED(calc_temp_offset2_s(arr, wrk, 0), 16); assert( is_aligned_16(addr) ); float *base = addr; // *** prolog2 *** // prolog2: import-preload op4s_sdl2_preload_prolog_s_sse(w, v, l, c, r, z, in, out, &addr); // prolog2: import(3) op4s_sdl2_import_s_sse(l, 3, out); // prolog2: pass-prolog-full op4s_sdl2_pass_fwd_prolog_full_s_sse(w, v, l, c, r, z, in, out, &addr); // prolog2: import(2) op4s_sdl2_import_s_sse(l, 2, out); // prolog2: pass-prolog-light op4s_sdl2_pass_fwd_prolog_light_s_sse(w, v, l, c, r, z, in, out, &addr); // prolog2: import(1) op4s_sdl2_import_s_sse(l, 1, out); // prolog2: pass-prolog-full op4s_sdl2_pass_fwd_prolog_full_s_sse(w, v, l, c, r, z, in, out, &addr); // prolog2: import(0) op4s_sdl2_import_s_sse(l, 0, out); // *** core *** // core: for t = 0 to T do for(int t = 0; t < T; t++) { // core: pass-core-light op4s_sdl2_pass_fwd_core_light_s_sse(w, v, l, c, r, z, in, out, &addr); // core: pass-core-full op4s_sdl2_pass_fwd_core_full_s_sse(w, v, l, c, r, z, in, out, &addr); } // core: if odd then if( is_odd(S) ) { // core: pass-core-light op4s_sdl2_pass_fwd_core_light_s_sse(w, v, l, c, r, z, in, out, &addr); } // *** epilog2 *** if( is_odd(S) ) { // epilog2: export(3) op4s_sdl2_export_s_sse(l, &base[2*steps], 3); // epilog2: pass-epilog-full op4s_sdl2_pass_fwd_epilog_full_s_sse(w, v, l, c, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl2_export_s_sse(l, &base[2*steps], 2); // epilog2: pass-epilog-light op4s_sdl2_pass_fwd_epilog_light_s_sse(w, v, l, c, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl2_export_s_sse(l, &base[2*steps], 1); // epilog2: pass-epilog-full op4s_sdl2_pass_fwd_epilog_full_s_sse(w, v, l, c, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl2_export_s_sse(l, &base[2*steps], 0); } else { // epilog2: export(3) op4s_sdl2_export_s_sse(l, &base[2*steps], 3); // epilog2: pass-epilog-light op4s_sdl2_pass_fwd_epilog_light_s_sse(w, v, l, c, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl2_export_s_sse(l, &base[2*steps], 2); // epilog2: pass-epilog-full op4s_sdl2_pass_fwd_epilog_full_s_sse(w, v, l, c, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl2_export_s_sse(l, &base[2*steps], 1); // epilog2: pass-epilog-flush op4s_sdl2_pass_fwd_epilog_flush_s_sse(w, v, l, c, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl2_export_s_sse(l, &base[2*steps], 0); } } } else { // ****** transform w/o scaling ****** // not implemented yet dwt_util_abort(); } } #endif /* __SSE__ */ /** * @brief Shifted double-loop algorithm. * * This function processes 2 coefficients (even + odd) per one iteration. */ static void accel_lift_op4s_main_sdl_ref_s( float *restrict arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling) { // 6+ coeffs implies 3+ steps assert( steps >= 3 ); const float w[4] = { delta, gamma, beta, alpha }; const float v[4] = { 1/zeta, zeta, 1/zeta, zeta }; float l[4]; float c[4]; float r[4]; float z[4]; float in[4]; float out[4]; const int S = steps-3; if( scaling < 0 ) { // ****** inverse transform ****** for(int wrk = 0; wrk < dwt_util_get_num_workers(); wrk++) { // *** init *** float *addr = ASSUME_ALIGNED(calc_temp_offset2_s(arr, wrk, 0), 16); float *base = addr+0; assert( is_aligned_s(addr) ); // *** prolog2 *** // prolog2: import(3) op4s_sdl_import_s_ref(l, base, 3); // prolog2: pass-prolog op4s_sdl_pass_inv_prolog_s_ref(w, v, l, c, r, z, in, out, &addr); // prolog2: import(2) op4s_sdl_import_s_ref(l, base, 2); // prolog2: pass-prolog op4s_sdl_pass_inv_prolog_s_ref(w, v, l, c, r, z, in, out, &addr); // prolog2: import(1) op4s_sdl_import_s_ref(l, base, 1); // prolog2: pass-prolog op4s_sdl_pass_inv_prolog_s_ref(w, v, l, c, r, z, in, out, &addr); // prolog2: import(0) op4s_sdl_import_s_ref(l, base, 0); // *** core *** // core: for s = 0 to S do for(int s = 0; s < S; s++) { // core: pass-core op4s_sdl_pass_inv_core_s_ref(w, v, l, c, r, z, in, out, &addr); } // *** epilog2 *** // epilog2: export(3) op4s_sdl_export_s_ref(l, &base[2*steps], 3); // epilog2: pass-epilog op4s_sdl_pass_inv_epilog_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl_export_s_ref(l, &base[2*steps], 2); // epilog2: pass-epilog op4s_sdl_pass_inv_epilog_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl_export_s_ref(l, &base[2*steps], 1); // epilog2: pass-epilog op4s_sdl_pass_inv_epilog_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl_export_s_ref(l, &base[2*steps], 0); } } else if ( scaling > 0 ) { // ****** forward transform ****** for(int wrk = 0; wrk < dwt_util_get_num_workers(); wrk++) { // *** init *** float *addr = ASSUME_ALIGNED(calc_temp_offset2_s(arr, wrk, 0), 16); float *base = addr+0; assert( is_aligned_s(addr) ); // *** prolog2 *** // prolog2: import(3) op4s_sdl_import_s_ref(l, base, 3); // prolog2: pass-prolog op4s_sdl_pass_fwd_prolog_s_ref(w, v, l, c, r, z, in, out, &addr); // prolog2: import(2) op4s_sdl_import_s_ref(l, base, 2); // prolog2: pass-prolog op4s_sdl_pass_fwd_prolog_s_ref(w, v, l, c, r, z, in, out, &addr); // prolog2: import(1) op4s_sdl_import_s_ref(l, base, 1); // prolog2: pass-prolog op4s_sdl_pass_fwd_prolog_s_ref(w, v, l, c, r, z, in, out, &addr); // prolog2: import(0) op4s_sdl_import_s_ref(l, base, 0); // *** core *** // core: for s = 0 to S do for(int s = 0; s < S; s++) { // core: pass-core op4s_sdl_pass_fwd_core_s_ref(w, v, l, c, r, z, in, out, &addr); } // *** epilog2 *** // epilog2: export(3) op4s_sdl_export_s_ref(l, &base[2*steps], 3); // epilog2: pass-epilog op4s_sdl_pass_fwd_epilog_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(2) op4s_sdl_export_s_ref(l, &base[2*steps], 2); // epilog2: pass-epilog op4s_sdl_pass_fwd_epilog_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(1) op4s_sdl_export_s_ref(l, &base[2*steps], 1); // epilog2: pass-epilog op4s_sdl_pass_fwd_epilog_s_ref(w, v, l, c, r, z, in, out, &addr); // epilog2: export(0) op4s_sdl_export_s_ref(l, &base[2*steps], 0); } } else { // ****** transform w/o scaling ****** // not implemented yet dwt_util_abort(); } } static void accel_lift_op4s_fwd_main_sdl_stride_ref_part_prolog2_s( float *base, const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float **addr, int stride ) { // prolog2: import(3) op4s_sdl_import_stride_s_ref(l, base, 3, stride); // prolog2: pass-prolog op4s_sdl_pass_fwd_prolog_stride_s_ref(w, v, l, c, r, z, in, out, addr, stride); // prolog2: import(2) op4s_sdl_import_stride_s_ref(l, base, 2, stride); // prolog2: pass-prolog op4s_sdl_pass_fwd_prolog_stride_s_ref(w, v, l, c, r, z, in, out, addr, stride); // prolog2: import(1) op4s_sdl_import_stride_s_ref(l, base, 1, stride); // prolog2: pass-prolog op4s_sdl_pass_fwd_prolog_stride_s_ref(w, v, l, c, r, z, in, out, addr, stride); // prolog2: import(0) op4s_sdl_import_stride_s_ref(l, base, 0, stride); } #ifdef __SSE__ #define accel_lift_op4s_fwd_main_sdl_stride_sse_part_prolog2_s(base,w,v,l,c,r,z,in,out,addr,stride) \ do { \ op4s_sdl_import_stride_s_sse(l, base, 3, stride); \ op4s_sdl_pass_fwd_prolog_stride_s_sse(w, v, l, c, r, z, in, out, addr, stride); \ op4s_sdl_import_stride_s_sse(l, base, 2, stride); \ op4s_sdl_pass_fwd_prolog_stride_s_sse(w, v, l, c, r, z, in, out, addr, stride); \ op4s_sdl_import_stride_s_sse(l, base, 1, stride); \ op4s_sdl_pass_fwd_prolog_stride_s_sse(w, v, l, c, r, z, in, out, addr, stride); \ op4s_sdl_import_stride_s_sse(l, base, 0, stride); \ } while(0) #endif static void op4_fwd_sdl_prolog2_s( float *ptr0, float *ptr1, float *ptr2, float *ptr3, float *ptr4, float *ptr5, float *ptr6, float *ptr7, float *ptr8, float *ptr9, const float *w, const float *v, // unused float *l, float *c, float *r ) { UNUSED(v); float buff[2]; float t[4]; // part0 // prolog2: import(3) l[3] = *ptr3; // base+3 // part1 // prolog2: pass-prolog op4s_sdl_shuffle_s_ref(c, r); buff[0] = *ptr4; // base+0+4+0 buff[1] = *ptr5; // base+0+4+1 op4s_sdl_input_s_ref(buff, c, r); op4s_sdl_op_s_ref(t, c, w, l, r); op4s_sdl_update_s_ref(c, l, r, t); // part2 // prolog2: import(2) l[2] = *ptr2; // base+2 // part3 // prolog2: pass-prolog op4s_sdl_shuffle_s_ref(c, r); buff[0] = *ptr6; // base+2+4+0 buff[1] = *ptr7; // base+2+4+1 op4s_sdl_input_s_ref(buff, c, r); op4s_sdl_op_s_ref(t, c, w, l, r); op4s_sdl_update_s_ref(c, l, r, t); // part4 // prolog2: import(1) l[1] = *ptr1; // base+1 // part5 // prolog2: pass-prolog op4s_sdl_shuffle_s_ref(c, r); buff[0] = *ptr8; // base+4+4+0 buff[1] = *ptr9; // base+4+4+1 op4s_sdl_input_s_ref(buff, c, r); op4s_sdl_op_s_ref(t, c, w, l, r); op4s_sdl_update_s_ref(c, l, r, t); // part6 // prolog2: import(0) l[0] = *ptr0; // base+0 } // part0 static void op4_fwd_sdl_prolog2_part0_s( float *ptr3, const float *w, const float *v, // unused float *l, float *c, float *r ) { UNUSED(w); UNUSED(v); UNUSED(c); UNUSED(r); // prolog2: import(3) l[3] = *ptr3; // base+3 } static void op4_fwd_sdl_prolog2_import_s( float *ptr, float *lcr, int idx ) { // prolog2: import(i) (lcr+0)[idx] = *ptr; // base+i } // part2 static void op4_fwd_sdl_prolog2_part2_s( float *ptr2, const float *w, const float *v, // unused float *l, float *c, float *r ) { UNUSED(w); UNUSED(v); UNUSED(c); UNUSED(r); // prolog2: import(2) l[2] = *ptr2; // base+2 } // part4 static void op4_fwd_sdl_prolog2_part4_s( float *ptr1, const float *w, const float *v, // unused float *l, float *c, float *r ) { UNUSED(w); UNUSED(v); UNUSED(c); UNUSED(r); // prolog2: import(1) l[1] = *ptr1; // base+1 } // part6 static void op4_fwd_sdl_prolog2_part6_s( float *ptr0, const float *w, const float *v, // unused float *l, float *c, float *r ) { UNUSED(w); UNUSED(v); UNUSED(c); UNUSED(r); // prolog2: import(0) l[0] = *ptr0; // base+0 } // part_odd static void op4_fwd_sdl_prolog2_part_s( float *ptr0, float *ptr1, const float *w, const float *v, float *l, float *c, float *r ) { UNUSED(v); #ifdef __SSE__ __m128 buff, z; buff[0] = *ptr0; buff[1] = *ptr1; op4s_sdl2_shuffle_input_low_s_sse(buff, *(__m128 *)c, *(__m128 *)r); op4s_sdl2_op_s_sse(z, *(__m128 *)c, *(__m128 *)w, *(__m128 *)l, *(__m128 *)r); op4s_sdl2_update_s_sse(*(__m128 *)c, *(__m128 *)l, *(__m128 *)r, z); #else // TODO: test this float buff[4], z[4]; buff[0] = *ptr0; buff[1] = *ptr1; op4s_sdl2_shuffle_input_low_s_ref(buff, c, r); op4s_sdl2_op_s_ref(z, c, w, l, r); op4s_sdl2_update_s_ref(c, l, r, z); #endif } static void accel_lift_op4s_fwd_main_sdl_stride_ref_part_epilog2_s( float *base, const float *w, const float *v, float *l, float *c, float *r, float *z, float *in, float *out, float **addr, int stride ) { // epilog2: export(3) op4s_sdl_export_stride_s_ref(l, base, 3, stride); // epilog2: pass-epilog op4s_sdl_pass_fwd_epilog_stride_s_ref(w, v, l, c, r, z, in, out, addr, stride); // epilog2: export(2) op4s_sdl_export_stride_s_ref(l, base, 2, stride); // epilog2: pass-epilog op4s_sdl_pass_fwd_epilog_stride_s_ref(w, v, l, c, r, z, in, out, addr, stride); // epilog2: export(1) op4s_sdl_export_stride_s_ref(l, base, 1, stride); // epilog2: pass-epilog op4s_sdl_pass_fwd_epilog_stride_s_ref(w, v, l, c, r, z, in, out, addr, stride); // epilog2: export(0) op4s_sdl_export_stride_s_ref(l, base, 0, stride); } #ifdef __SSE__ #define accel_lift_op4s_fwd_main_sdl_stride_sse_part_epilog2_s(base,w,v,l,c,r,z,in,out,addr,stride) \ do { \ op4s_sdl_export_stride_s_sse(l, base, 3, stride); \ op4s_sdl_pass_fwd_epilog_stride_s_sse(w, v, l, c, r, z, in, out, addr, stride); \ op4s_sdl_export_stride_s_sse(l, base, 2, stride); \ op4s_sdl_pass_fwd_epilog_stride_s_sse(w, v, l, c, r, z, in, out, addr, stride); \ op4s_sdl_export_stride_s_sse(l, base, 1, stride); \ op4s_sdl_pass_fwd_epilog_stride_s_sse(w, v, l, c, r, z, in, out, addr, stride); \ op4s_sdl_export_stride_s_sse(l, base, 0, stride); \ } while(0) #endif #if 1 static void op4_fwd_sdl_epilog2_part_s( float *ptr0, float *ptr1, const float *w, const float *v, float *l, float *c, float *r ) { #ifndef __SSE__ float buff[2]; float t[4]; op4s_sdl_shuffle_s_ref(c, r); op4s_sdl_op_s_ref(t, c, w, l, r); op4s_sdl_output_s_ref(buff, l, t); op4s_sdl_scale_s_ref(buff, v); op4s_sdl_update_s_ref(c, l, r, t); #else __m128 buff, z; op4s_sdl2_shuffle_s_sse(*(__m128 *)c, *(__m128 *)r); op4s_sdl2_op_s_sse(z, *(__m128 *)c, *(__m128 *)w, *(__m128 *)l, *(__m128 *)r); op4s_sdl2_output_low_s_sse(buff, *(__m128 *)l, z); op4s_sdl2_scale_s_sse(buff, *(__m128 *)v); op4s_sdl2_update_s_sse(*(__m128 *)c, *(__m128 *)l, *(__m128 *)r, z); #endif *ptr0 = buff[0]; *ptr1 = buff[1]; } #endif #ifdef __SSE__ static void op4_fwd_sdl_epilog2_s( float *ptr_6, float *ptr_5, float *ptr_4, float *ptr_3, float *ptr_2, float *ptr_1, float *ptr0, float *ptr1, float *ptr2, float *ptr3, const float *w, const float *v, float *l, float *c, float *r ) { // epilog2: export(3) *ptr3 = l[3]; // base+3 // epilog2: pass-epilog op4_fwd_sdl_epilog2_part_s( ptr_6, ptr_5, w, v, l, c, r ); // epilog2: export(2) *ptr2 = l[2]; // base+2 // epilog2: pass-epilog op4_fwd_sdl_epilog2_part_s( ptr_4, ptr_3, w, v, l, c, r ); // epilog2: export(1) *ptr1 = l[1]; // base+1 // epilog2: pass-epilog op4_fwd_sdl_epilog2_part_s( ptr_2, ptr_1, w, v, l, c, r ); // epilog2: export(0) *ptr0 = l[0]; // base+0 } #endif #if 1 static void op4_fwd_sdl_epilog2_fast_s( float *ptr0, float *ptr1, float *ptr2, float *ptr3, float *ptr4, float *ptr5, float *ptr6, float *ptr7, float *ptr8, float *ptr9, const float *w, const float *v, float *lcr ) { // epilog2: export(3) *ptr9 = (lcr+0)[3]; // base+3 // epilog2: pass-epilog op4_fwd_sdl_epilog2_part_s( ptr0, ptr1, w, v, (lcr+0), (lcr+4), (lcr+8) ); // epilog2: export(2) *ptr8 = (lcr+0)[2]; // base+2 // epilog2: pass-epilog op4_fwd_sdl_epilog2_part_s( ptr2, ptr3, w, v, (lcr+0), (lcr+4), (lcr+8) ); // epilog2: export(1) *ptr7 = (lcr+0)[1]; // base+1 // epilog2: pass-epilog op4_fwd_sdl_epilog2_part_s( ptr4, ptr5, w, v, (lcr+0), (lcr+4), (lcr+8) ); // epilog2: export(0) *ptr6 = (lcr+0)[0]; // base+0 } #endif static void accel_lift_op4s_fwd_main_dl_stride_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling, int stride ); static void accel_lift_op4s_fwd_main_sdl_stride_ref_part_exception_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling, int stride ) { if( steps < 3 ) { accel_lift_op4s_fwd_main_dl_stride_s( arr, steps, alpha, beta, gamma, delta, zeta, scaling, stride ); } } static void accel_lift_op4s_fwd_main_sdl_stride_ref_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling, int stride ) { assert( scaling > 0 ); assert( 1 == dwt_util_get_num_workers() ); accel_lift_op4s_fwd_main_sdl_stride_ref_part_exception_s( arr, steps, alpha, beta, gamma, delta, zeta, scaling, stride ); if( steps < 3 ) return; const float w[4] = { delta, gamma, beta, alpha }; const float v[4] = { 1/zeta, zeta, 1/zeta, zeta }; float l[4]; float c[4]; float r[4]; float z[4]; float in[4]; float out[4]; const int S = steps-3; // *** init *** float *addr = arr; // *** prolog2 *** accel_lift_op4s_fwd_main_sdl_stride_ref_part_prolog2_s(arr, w, v, l, c, r, z, in, out, &addr, stride); // *** core *** for(int s = 0; s < S; s++) { // core: pass-core op4s_sdl_pass_fwd_core_stride_s_ref(w, v, l, c, r, z, in, out, &addr, stride); } // *** epilog2 *** accel_lift_op4s_fwd_main_sdl_stride_ref_part_epilog2_s(addr1_s(arr,2*steps,stride), w, v, l, c, r, z, in, out, &addr, stride); } #ifdef __SSE__ static void accel_lift_op4s_fwd_main_sdl_stride_sse_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling, int stride ) { assert( scaling > 0 ); assert( 1 == dwt_util_get_num_workers() ); accel_lift_op4s_fwd_main_sdl_stride_ref_part_exception_s( arr, steps, alpha, beta, gamma, delta, zeta, scaling, stride ); if( steps < 3 ) return; const __m128 w = { delta, gamma, beta, alpha }; const __m128 v = { 1/zeta, zeta, 1/zeta, zeta }; __m128 l; __m128 c; __m128 r; __m128 z; __m128 in; __m128 out; const int S = steps-3; // *** init *** float *addr = arr; // *** prolog2 *** accel_lift_op4s_fwd_main_sdl_stride_sse_part_prolog2_s(arr, w, v, l, c, r, z, in, out, &addr, stride); // *** core *** for(int s = 0; s < S; s++) { // core: pass-core op4s_sdl_pass_fwd_core_stride_s_sse(w, v, l, c, r, z, in, out, &addr, stride); } // *** epilog2 *** accel_lift_op4s_fwd_main_sdl_stride_sse_part_epilog2_s(addr1_s(arr,2*steps,stride), w, v, l, c, r, z, in, out, &addr, stride); } #endif #ifdef __x86_64__ static void accel_lift_op4s_main_dl_nosse_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling) // this long string disables SSE support (only no-sse is not enough) __attribute__ ((__target__ ("no-mmx,no-sse,no-sse2,no-sse3,no-sse4,no-sse4.1"))); #endif static void accel_lift_op4s_main_dl_nosse_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling) { assert( steps >= 0 ); if( scaling < 0 ) { for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *arr_local = calc_temp_offset2_s(arr, w, 0); // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // aux. variables float in[2]; float out[2]; float r[4]; float c[4]; float l[4]; // values that have to be passed from iteration to iteration // slide in left border l[0] = arr_local[0]; l[1] = arr_local[1]; l[2] = arr_local[2]; l[3] = arr_local[3]; // init float *addr = arr_local + 4; // loop by pairs from left to right for(int s = 0; s < steps; s++) { // inputs in[0] = addr[0]; in[1] = addr[1]; // scales in[0] = in[0] * v[0]; in[1] = in[1] * v[1]; // shuffles out[0] = l[0]; c[0] = l[1]; c[1] = l[2]; c[2] = l[3]; c[3] = in[0]; // operation z[] = c[] + w[] * ( l[] + r[] ) // by sequential computation from top/right to bottom/left r[3] = in[1]; r[2] = c[3]+w[3]*(l[3]+r[3]); r[1] = c[2]+w[2]*(l[2]+r[2]); r[0] = c[1]+w[1]*(l[1]+r[1]); out[1] = c[0]+w[0]*(l[0]+r[0]); // outputs addr[0-4] = out[0]; addr[1-4] = out[1]; // update l[] l[0] = r[0]; l[1] = r[1]; l[2] = r[2]; l[3] = r[3]; // pointers addr += 2; } // slide out right border addr[0-4] = l[0]; addr[1-4] = l[1]; addr[2-4] = l[2]; addr[3-4] = l[3]; } } else if ( scaling > 0 ) { for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *arr_local = calc_temp_offset2_s(arr, w, 0); // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // aux. variables float in[2]; float out[2]; float r[4]; float c[4]; float l[4]; // values that have to be passed from iteration to iteration // slide in left border l[0] = arr_local[0]; l[1] = arr_local[1]; l[2] = arr_local[2]; l[3] = arr_local[3]; // init float *addr = arr_local + 4; // loop by pairs from left to right for(int s = 0; s < steps; s++) { // inputs in[0] = addr[0]; in[1] = addr[1]; // shuffles out[0] = l[0]; c[0] = l[1]; c[1] = l[2]; c[2] = l[3]; c[3] = in[0]; // operation z[] = c[] + w[] * ( l[] + r[] ) // by sequential computation from top/right to bottom/left r[3] = in[1]; r[2] = c[3]+w[3]*(l[3]+r[3]); r[1] = c[2]+w[2]*(l[2]+r[2]); r[0] = c[1]+w[1]*(l[1]+r[1]); out[1] = c[0]+w[0]*(l[0]+r[0]); // scales out[0] = out[0] * v[0]; out[1] = out[1] * v[1]; // outputs addr[0-4] = out[0]; addr[1-4] = out[1]; // update l[] l[0] = r[0]; l[1] = r[1]; l[2] = r[2]; l[3] = r[3]; // pointers addr += 2; } // slide out right border addr[0-4] = l[0]; addr[1-4] = l[1]; addr[2-4] = l[2]; addr[3-4] = l[3]; } } else { // fallback, not implemented accel_lift_op4s_main_s(arr, steps, alpha, beta, gamma, delta, zeta, scaling); } } /** * @brief Double-loop algorithm from Rade Kutil: A Single-Loop Approach to * SIMD Parallelization of 2-D Wavelet Lifting. */ static void accel_lift_op4s_main_dl_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling) { assert( steps >= 0 ); if( scaling < 0 ) { for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *arr_local = calc_temp_offset2_s(arr, w, 0); // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // aux. variables float in[2]; float out[2]; float r[4]; float c[4]; float l[4]; // values that have to be passed from iteration to iteration // slide in left border l[0] = arr_local[0]; l[1] = arr_local[1]; l[2] = arr_local[2]; l[3] = arr_local[3]; // init float *addr = ASSUME_ALIGNED(arr_local + 4, 16); // loop by pairs from left to right for(int s = 0; s < steps; s++) { // inputs in[0] = addr[0]; in[1] = addr[1]; // scales in[0] = in[0] * v[0]; in[1] = in[1] * v[1]; // shuffles out[0] = l[0]; c[0] = l[1]; c[1] = l[2]; c[2] = l[3]; c[3] = in[0]; // operation z[] = c[] + w[] * ( l[] + r[] ) // by sequential computation from top/right to bottom/left r[3] = in[1]; r[2] = c[3]+w[3]*(l[3]+r[3]); r[1] = c[2]+w[2]*(l[2]+r[2]); r[0] = c[1]+w[1]*(l[1]+r[1]); out[1] = c[0]+w[0]*(l[0]+r[0]); // outputs addr[0-4] = out[0]; addr[1-4] = out[1]; // update l[] l[0] = r[0]; l[1] = r[1]; l[2] = r[2]; l[3] = r[3]; // pointers addr += 2; } // slide out right border addr[0-4] = l[0]; addr[1-4] = l[1]; addr[2-4] = l[2]; addr[3-4] = l[3]; } } else if ( scaling > 0 ) { for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *arr_local = calc_temp_offset2_s(arr, w, 0); // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // aux. variables float in[2]; float out[2]; float r[4]; float c[4]; float l[4]; // values that have to be passed from iteration to iteration // slide in left border l[0] = arr_local[0]; l[1] = arr_local[1]; l[2] = arr_local[2]; l[3] = arr_local[3]; // init float *addr = ASSUME_ALIGNED(arr_local + 4, 16); // loop by pairs from left to right for(int s = 0; s < steps; s++) { // inputs in[0] = addr[0]; in[1] = addr[1]; // shuffles out[0] = l[0]; c[0] = l[1]; c[1] = l[2]; c[2] = l[3]; c[3] = in[0]; // operation z[] = c[] + w[] * ( l[] + r[] ) // by sequential computation from top/right to bottom/left r[3] = in[1]; r[2] = c[3]+w[3]*(l[3]+r[3]); r[1] = c[2]+w[2]*(l[2]+r[2]); r[0] = c[1]+w[1]*(l[1]+r[1]); out[1] = c[0]+w[0]*(l[0]+r[0]); // scales out[0] = out[0] * v[0]; out[1] = out[1] * v[1]; // outputs addr[0-4] = out[0]; addr[1-4] = out[1]; // update l[] l[0] = r[0]; l[1] = r[1]; l[2] = r[2]; l[3] = r[3]; // pointers addr += 2; } // slide out right border addr[0-4] = l[0]; addr[1-4] = l[1]; addr[2-4] = l[2]; addr[3-4] = l[3]; } } else { // fallback, not implemented accel_lift_op4s_main_s(arr, steps, alpha, beta, gamma, delta, zeta, scaling); } } static void accel_lift_op4s_fwd_main_dl_stride_pair_prolog0_s( float *ptr0, float *ptr1, float *out0, float *out1, float alpha, float beta, float gamma, float delta, float zeta, float *l // [4] ) { UNUSED(out0); UNUSED(out1); UNUSED(alpha); UNUSED(beta); UNUSED(gamma); UNUSED(delta); UNUSED(zeta); l[0] = *ptr0; l[1] = *ptr1; } static void accel_lift_op4s_fwd_main_dl_stride_pair_prolog1_s( float *ptr0, float *ptr1, float *out0, float *out1, float alpha, float beta, float gamma, float delta, float zeta, float *l // [4] ) { UNUSED(out0); UNUSED(out1); UNUSED(alpha); UNUSED(beta); UNUSED(gamma); UNUSED(delta); UNUSED(zeta); l[2] = *ptr0; l[3] = *ptr1; } static void accel_lift_op4s_fwd_main_dl_stride_pair_core_s( float *ptr0, float *ptr1, float *out0, float *out1, float alpha, float beta, float gamma, float delta, float zeta, float *l // [4] ) { // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // aux. variables float in[2]; float out[2]; float r[4]; float c[4]; // inputs in[0] = *ptr0; in[1] = *ptr1; // shuffles out[0] = l[0]; c[0] = l[1]; c[1] = l[2]; c[2] = l[3]; c[3] = in[0]; // operation z[] = c[] + w[] * ( l[] + r[] ) // by sequential computation from top/right to bottom/left r[3] = in[1]; r[2] = c[3]+w[3]*(l[3]+r[3]); r[1] = c[2]+w[2]*(l[2]+r[2]); r[0] = c[1]+w[1]*(l[1]+r[1]); out[1] = c[0]+w[0]*(l[0]+r[0]); // scales out[0] = out[0] * v[0]; out[1] = out[1] * v[1]; // outputs *out0 = out[0]; *out1 = out[1]; // update l[] l[0] = r[0]; l[1] = r[1]; l[2] = r[2]; l[3] = r[3]; } #ifdef __SSE__ static void cdf97_fwd_core_dl_sc_sse_2x2_s( float *ptr_y0_x0, // in float *ptr_y0_x1, // in float *ptr_y1_x0, // in float *ptr_y1_x1, // in float *out_y0_x0, // out float *out_y0_x1, // out float *out_y1_x0, // out float *out_y1_x1, // out float *buff_h0, // [4] float *buff_h1, // [4] float *buff_v0, // [4] float *buff_v1 // [4] ) { const __m128 w = { dwt_cdf97_u2_s, -dwt_cdf97_p2_s, dwt_cdf97_u1_s, -dwt_cdf97_p1_s }; const __m128 v_vertL = { 1/(dwt_cdf97_s1_s*dwt_cdf97_s1_s), 1.f, 0.f, 0.f }; const __m128 v_vertR = { 1.f, (dwt_cdf97_s1_s*dwt_cdf97_s1_s), 0.f, 0.f }; // temp __m128 t; // aux. variables __m128 x, y, r, c; // horiz 1 { float *l = buff_h0; // inputs x[0] = *ptr_y0_x0; x[1] = *ptr_y0_x1; // shuffles y[0] = l[0]; c[0] = l[1]; c[1] = l[2]; c[2] = l[3]; c[3] = x[0]; // operation r[3] = x[1]; r[2] = c[3]+w[3]*(l[3]+r[3]); r[1] = c[2]+w[2]*(l[2]+r[2]); r[0] = c[1]+w[1]*(l[1]+r[1]); y[1] = c[0]+w[0]*(l[0]+r[0]); // outputs t[0] = y[0]; t[1] = y[1]; // update l[] l[0] = r[0]; l[1] = r[1]; l[2] = r[2]; l[3] = r[3]; } // horiz 2 { float *l = buff_h1; // inputs x[0] = *ptr_y1_x0; x[1] = *ptr_y1_x1; // shuffles y[0] = l[0]; c[0] = l[1]; c[1] = l[2]; c[2] = l[3]; c[3] = x[0]; // operation r[3] = x[1]; r[2] = c[3]+w[3]*(l[3]+r[3]); r[1] = c[2]+w[2]*(l[2]+r[2]); r[0] = c[1]+w[1]*(l[1]+r[1]); y[1] = c[0]+w[0]*(l[0]+r[0]); // outputs t[2] = y[0]; t[3] = y[1]; // update l[] l[0] = r[0]; l[1] = r[1]; l[2] = r[2]; l[3] = r[3]; } // vert 1 { float *l = buff_v0; // inputs x[0] = t[0]; x[1] = t[2]; // shuffles y[0] = l[0]; c[0] = l[1]; c[1] = l[2]; c[2] = l[3]; c[3] = x[0]; // operation r[3] = x[1]; r[2] = c[3]+w[3]*(l[3]+r[3]); r[1] = c[2]+w[2]*(l[2]+r[2]); r[0] = c[1]+w[1]*(l[1]+r[1]); y[1] = c[0]+w[0]*(l[0]+r[0]); // scaling y[0] *= v_vertL[0]; y[1] *= v_vertL[1]; // outputs *out_y0_x0 = y[0]; *out_y1_x0 = y[1]; // update l[] l[0] = r[0]; l[1] = r[1]; l[2] = r[2]; l[3] = r[3]; } // vert 2 { float *l = buff_v1; // inputs x[0] = t[1]; x[1] = t[3]; // shuffles y[2] = l[0]; c[0] = l[1]; c[1] = l[2]; c[2] = l[3]; c[3] = x[0]; // operation r[3] = x[1]; r[2] = c[3]+w[3]*(l[3]+r[3]); r[1] = c[2]+w[2]*(l[2]+r[2]); r[0] = c[1]+w[1]*(l[1]+r[1]); y[3] = c[0]+w[0]*(l[0]+r[0]); // scaling y[2] *= v_vertR[0]; y[3] *= v_vertR[1]; // outputs *out_y0_x1 = y[2]; *out_y1_x1 = y[3]; // update l[] l[0] = r[0]; l[1] = r[1]; l[2] = r[2]; l[3] = r[3]; } } #endif static void accel_lift_op4s_fwd_main_dl_stride_pair_core_2x2_s( float *ptr_y0_x0, // in float *ptr_y0_x1, // in float *ptr_y1_x0, // in float *ptr_y1_x1, // in float *out_y0_x0, // out float *out_y0_x1, // out float *out_y1_x0, // out float *out_y1_x1, // out float alpha, // w float beta, // w float gamma, // w float delta, // w float zeta, // v float *buff_h0, // [4] float *buff_h1, // [4] float *buff_v0, // [4] float *buff_v1 // [4] ) { float tmp[4]; float *tmp_y0_x0 = tmp+0; float *tmp_y0_x1 = tmp+1; float *tmp_y1_x0 = tmp+2; float *tmp_y1_x1 = tmp+3; // horizontal 0 // [y+0, x+0], [y+0, x+1] => [y+0, x+0-4], [y+0, x+1-4] accel_lift_op4s_fwd_main_dl_stride_pair_core_s( ptr_y0_x0, ptr_y0_x1, tmp_y0_x0, tmp_y0_x1, alpha, beta, gamma, delta, zeta, buff_h0 ); // horizontal 1 // [y+1, x+0], [y+1, x+1] => [y+1, x+0-4], [y+1, x+1-4] accel_lift_op4s_fwd_main_dl_stride_pair_core_s( ptr_y1_x0, ptr_y1_x1, tmp_y1_x0, tmp_y1_x1, alpha, beta, gamma, delta, zeta, buff_h1 ); // vertical 0 // [y+0, x+0-4] [y+1, x+0-4] => [y+0-4, x+0-4] [y+1-4, x+0-4] accel_lift_op4s_fwd_main_dl_stride_pair_core_s( tmp_y0_x0, tmp_y1_x0, out_y0_x0, out_y1_x0, alpha, beta, gamma, delta, zeta, buff_v0 ); // vertical 1 // [y+0, x+1-4] [y+1, x+1-4] => [y+0-4, x+1-4] [y+1-4, x+1-4] accel_lift_op4s_fwd_main_dl_stride_pair_core_s( tmp_y0_x1, tmp_y1_x1, out_y0_x1, out_y1_x1, alpha, beta, gamma, delta, zeta, buff_v1 ); } static void accel_lift_op4s_fwd_main_dl_stride_pair_epilog0_s( float *ptr0, float *ptr1, float *out0, float *out1, float alpha, float beta, float gamma, float delta, float zeta, float *l // [4] ) { UNUSED(ptr0); UNUSED(ptr1); UNUSED(alpha); UNUSED(beta); UNUSED(gamma); UNUSED(delta); UNUSED(zeta); *out0 = l[0]; *out1 = l[1]; } static void accel_lift_op4s_fwd_main_dl_stride_pair_epilog1_s( float *ptr0, float *ptr1, float *out0, float *out1, float alpha, float beta, float gamma, float delta, float zeta, float *l // [4] ) { UNUSED(ptr0); UNUSED(ptr1); UNUSED(alpha); UNUSED(beta); UNUSED(gamma); UNUSED(delta); UNUSED(zeta); *out0 = l[2]; *out1 = l[3]; } /** * vertical vectorisation (double-loop approach), forward transform */ static void accel_lift_op4s_fwd_main_dl_stride_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling, int stride ) { assert( steps >= 0 ); assert( scaling > 0 ); float l[4]; accel_lift_op4s_fwd_main_dl_stride_pair_prolog0_s( addr1_s(arr, 0, stride), addr1_s(arr, 1, stride), NULL, NULL, alpha, beta, gamma, delta, zeta, l ); accel_lift_op4s_fwd_main_dl_stride_pair_prolog1_s( addr1_s(arr, 2, stride), addr1_s(arr, 3, stride), NULL, NULL, alpha, beta, gamma, delta, zeta, l ); // init float *addr = addr1_s(arr, 4, stride); // loop by pairs from left to right for(int s = 0; s < steps; s++) { accel_lift_op4s_fwd_main_dl_stride_pair_core_s( addr1_s(addr, 0, stride), addr1_s(addr, 1, stride), addr1_s(addr, 0-4, stride), addr1_s(addr, 1-4, stride), alpha, beta, gamma, delta, zeta, l ); // pointers addr = addr1_s(addr, 2, stride); } accel_lift_op4s_fwd_main_dl_stride_pair_epilog0_s( NULL, NULL, addr1_s(addr, 0-4, stride), addr1_s(addr, 1-4, stride), alpha, beta, gamma, delta, zeta, l ); accel_lift_op4s_fwd_main_dl_stride_pair_epilog1_s( NULL, NULL, addr1_s(addr, 2-4, stride), addr1_s(addr, 3-4, stride), alpha, beta, gamma, delta, zeta, l ); } /** * vertical vectorisation (double-loop approach), inverse transform */ static void accel_lift_op4s_inv_main_dl_stride_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling, int stride ) { assert( steps >= 0 ); assert( scaling < 0 ); // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // aux. variables float in[2]; float out[2]; float r[4]; float c[4]; float l[4]; // values that have to be passed from iteration to iteration // slide in left border l[0] = *addr1_s(arr, 0, stride); l[1] = *addr1_s(arr, 1, stride); l[2] = *addr1_s(arr, 2, stride); l[3] = *addr1_s(arr, 3, stride); // init float *addr = addr1_s(arr, 4, stride); // loop by pairs from left to right for(int s = 0; s < steps; s++) { // inputs in[0] = *addr1_s(addr, 0, stride); in[1] = *addr1_s(addr, 1, stride); // scales in[0] = in[0] * v[0]; in[1] = in[1] * v[1]; // shuffles out[0] = l[0]; c[0] = l[1]; c[1] = l[2]; c[2] = l[3]; c[3] = in[0]; // operation z[] = c[] + w[] * ( l[] + r[] ) // by sequential computation from top/right to bottom/left r[3] = in[1]; r[2] = c[3]+w[3]*(l[3]+r[3]); r[1] = c[2]+w[2]*(l[2]+r[2]); r[0] = c[1]+w[1]*(l[1]+r[1]); out[1] = c[0]+w[0]*(l[0]+r[0]); // outputs *addr1_s(addr, 0-4, stride) = out[0]; *addr1_s(addr, 1-4, stride) = out[1]; // update l[] l[0] = r[0]; l[1] = r[1]; l[2] = r[2]; l[3] = r[3]; // pointers addr = addr1_s(addr, 2, stride); } // slide out right border *addr1_s(addr, 0-4, stride) = l[0]; *addr1_s(addr, 1-4, stride) = l[1]; *addr1_s(addr, 2-4, stride) = l[2]; *addr1_s(addr, 3-4, stride) = l[3]; } static void accel_lift_op4s_main_dl4line_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling) { assert( steps >= 0 ); const int PAIR = 2; const int TAPS = 4; // 4-fold SIMD //const int LIFT_STEPS = 4; // 4 lifting steps for CDF 9/7 if( scaling < 0 ) { for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *arr_local = calc_temp_offset2_s(arr, w, 0); // constants const float w[4/*LIFT_STEPS*/] = { delta, gamma, beta, alpha }; const float v[2/*PAIR*/] = { 1/zeta, zeta }; // aux. variables float x[2]; float y[2]; float r[4]; float c[4]; float l[4]; // values that have to be passed from iteration to iteration // slide in left border l[0] = arr_local[0]; l[1] = arr_local[1]; l[2] = arr_local[2]; l[3] = arr_local[3]; // init float *addr = ASSUME_ALIGNED(arr_local + 4, 16); float c4[TAPS], l4[TAPS], r4[TAPS]; const float w4[4/*LIFT_STEPS*/][4/*TAPS*/] = { { w[0], w[0], w[0], w[0] }, { w[1], w[1], w[1], w[1] }, { w[2], w[2], w[2], w[2] }, { w[3], w[3], w[3], w[3] } }; const int S = steps; const int S4 = S / TAPS; const int R4 = S - S4*TAPS; // loop by group of four pairs from left to right for(int s = 0; s < S4; s++) { // inputs for(int i = 0; i < TAPS; i++) { l4[i] = addr[2*i+0]; r4[i] = addr[2*i+1]; } // scales for(int i = 0; i < TAPS; i++) { l4[i] *= v[0]; r4[i] *= v[1]; } #if 0 for(int t = LIFT_STEPS-1; t >= 0; t--) { // c[] c4[0] = l4[0]; c4[1] = l4[1]; c4[2] = l4[2]; c4[3] = l4[3]; // l[] l4[0] = l[t]; l4[1] = r4[0]; l4[2] = r4[1]; l4[3] = r4[2]; l[t] = r4[3]; // r[] r4[0] = c4[0]+w4[t][0]*(l4[0]+r4[0]); r4[1] = c4[1]+w4[t][1]*(l4[1]+r4[1]); r4[2] = c4[2]+w4[t][2]*(l4[2]+r4[2]); r4[3] = c4[3]+w4[t][3]*(l4[3]+r4[3]); } #else int t; t = 3; { // c[] c4[0] = l4[0]; c4[1] = l4[1]; c4[2] = l4[2]; c4[3] = l4[3]; // l[] l4[0] = l[t]; l4[1] = r4[0]; l4[2] = r4[1]; l4[3] = r4[2]; l[t] = r4[3]; // r[] r4[0] = c4[0]+w4[t][0]*(l4[0]+r4[0]); r4[1] = c4[1]+w4[t][1]*(l4[1]+r4[1]); r4[2] = c4[2]+w4[t][2]*(l4[2]+r4[2]); r4[3] = c4[3]+w4[t][3]*(l4[3]+r4[3]); } t = 2; { // c[] c4[0] = l4[0]; c4[1] = l4[1]; c4[2] = l4[2]; c4[3] = l4[3]; // l[] l4[0] = l[t]; l4[1] = r4[0]; l4[2] = r4[1]; l4[3] = r4[2]; l[t] = r4[3]; // r[] r4[0] = c4[0]+w4[t][0]*(l4[0]+r4[0]); r4[1] = c4[1]+w4[t][1]*(l4[1]+r4[1]); r4[2] = c4[2]+w4[t][2]*(l4[2]+r4[2]); r4[3] = c4[3]+w4[t][3]*(l4[3]+r4[3]); } t = 1; { // c[] c4[0] = l4[0]; c4[1] = l4[1]; c4[2] = l4[2]; c4[3] = l4[3]; // l[] l4[0] = l[t]; l4[1] = r4[0]; l4[2] = r4[1]; l4[3] = r4[2]; l[t] = r4[3]; // r[] r4[0] = c4[0]+w4[t][0]*(l4[0]+r4[0]); r4[1] = c4[1]+w4[t][1]*(l4[1]+r4[1]); r4[2] = c4[2]+w4[t][2]*(l4[2]+r4[2]); r4[3] = c4[3]+w4[t][3]*(l4[3]+r4[3]); } t = 0; { // c[] c4[0] = l4[0]; c4[1] = l4[1]; c4[2] = l4[2]; c4[3] = l4[3]; // l[] l4[0] = l[t]; l4[1] = r4[0]; l4[2] = r4[1]; l4[3] = r4[2]; l[t] = r4[3]; // r[] r4[0] = c4[0]+w4[t][0]*(l4[0]+r4[0]); r4[1] = c4[1]+w4[t][1]*(l4[1]+r4[1]); r4[2] = c4[2]+w4[t][2]*(l4[2]+r4[2]); r4[3] = c4[3]+w4[t][3]*(l4[3]+r4[3]); } #endif // outputs for(int i = 0; i < TAPS; i++) { addr[2*i+0-4] = l4[i]; addr[2*i+1-4] = r4[i]; } // pointers addr += PAIR * TAPS; } // loop by pairs from left to right for(int s = 0; s < R4; s++) { // inputs x[0] = addr[0]; x[1] = addr[1]; // scales x[0] *= v[0]; x[1] *= v[1]; // shuffles y[0] = l[0]; c[0] = l[1]; c[1] = l[2]; c[2] = l[3]; c[3] = x[0]; // operation z[] = c[] + w[] * ( l[] + r[] ) // by sequential computation from top/right to bottom/left r[3] = x[1]; r[2] = c[3]+w[3]*(l[3]+r[3]); r[1] = c[2]+w[2]*(l[2]+r[2]); r[0] = c[1]+w[1]*(l[1]+r[1]); y[1] = c[0]+w[0]*(l[0]+r[0]); // outputs addr[0-4] = y[0]; addr[1-4] = y[1]; // update l[] l[0] = r[0]; l[1] = r[1]; l[2] = r[2]; l[3] = r[3]; // pointers addr += PAIR; } // slide out right border addr[0-4] = l[0]; addr[1-4] = l[1]; addr[2-4] = l[2]; addr[3-4] = l[3]; } } else if ( scaling > 0 ) { for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *arr_local = calc_temp_offset2_s(arr, w, 0); // constants const float w[4/*LIFT_STEPS*/] = { delta, gamma, beta, alpha }; const float v[2/*PAIR*/] = { 1/zeta, zeta }; // aux. variables float x[2]; float y[2]; float r[4]; float c[4]; float l[4]; // values that have to be passed from iteration to iteration // slide in left border l[0] = arr_local[0]; l[1] = arr_local[1]; l[2] = arr_local[2]; l[3] = arr_local[3]; // init float *addr = ASSUME_ALIGNED(arr_local + 4, 16); float c4[TAPS], l4[TAPS], r4[TAPS]; const float w4[4/*LIFT_STEPS*/][4/*TAPS*/] = { { w[0], w[0], w[0], w[0] }, { w[1], w[1], w[1], w[1] }, { w[2], w[2], w[2], w[2] }, { w[3], w[3], w[3], w[3] } }; const int S = steps; const int S4 = S / TAPS; const int R4 = S - S4*TAPS; // loop by group of four pairs from left to right for(int s = 0; s < S4; s++) { // inputs for(int i = 0; i < TAPS; i++) { l4[i] = addr[2*i+0]; r4[i] = addr[2*i+1]; } #if 0 for(int t = LIFT_STEPS-1; t >= 0; t--) { // c[] c4[0] = l4[0]; c4[1] = l4[1]; c4[2] = l4[2]; c4[3] = l4[3]; // l[] l4[0] = l[t]; l4[1] = r4[0]; l4[2] = r4[1]; l4[3] = r4[2]; l[t] = r4[3]; // r[] r4[0] = c4[0]+w4[t][0]*(l4[0]+r4[0]); r4[1] = c4[1]+w4[t][1]*(l4[1]+r4[1]); r4[2] = c4[2]+w4[t][2]*(l4[2]+r4[2]); r4[3] = c4[3]+w4[t][3]*(l4[3]+r4[3]); } #else int t; t = 3; { // c[] c4[0] = l4[0]; c4[1] = l4[1]; c4[2] = l4[2]; c4[3] = l4[3]; // l[] l4[0] = l[t]; l4[1] = r4[0]; l4[2] = r4[1]; l4[3] = r4[2]; l[t] = r4[3]; // r[] r4[0] = c4[0]+w4[t][0]*(l4[0]+r4[0]); r4[1] = c4[1]+w4[t][1]*(l4[1]+r4[1]); r4[2] = c4[2]+w4[t][2]*(l4[2]+r4[2]); r4[3] = c4[3]+w4[t][3]*(l4[3]+r4[3]); } t = 2; { // c[] c4[0] = l4[0]; c4[1] = l4[1]; c4[2] = l4[2]; c4[3] = l4[3]; // l[] l4[0] = l[t]; l4[1] = r4[0]; l4[2] = r4[1]; l4[3] = r4[2]; l[t] = r4[3]; // r[] r4[0] = c4[0]+w4[t][0]*(l4[0]+r4[0]); r4[1] = c4[1]+w4[t][1]*(l4[1]+r4[1]); r4[2] = c4[2]+w4[t][2]*(l4[2]+r4[2]); r4[3] = c4[3]+w4[t][3]*(l4[3]+r4[3]); } t = 1; { // c[] c4[0] = l4[0]; c4[1] = l4[1]; c4[2] = l4[2]; c4[3] = l4[3]; // l[] l4[0] = l[t]; l4[1] = r4[0]; l4[2] = r4[1]; l4[3] = r4[2]; l[t] = r4[3]; // r[] r4[0] = c4[0]+w4[t][0]*(l4[0]+r4[0]); r4[1] = c4[1]+w4[t][1]*(l4[1]+r4[1]); r4[2] = c4[2]+w4[t][2]*(l4[2]+r4[2]); r4[3] = c4[3]+w4[t][3]*(l4[3]+r4[3]); } t = 0; { // c[] c4[0] = l4[0]; c4[1] = l4[1]; c4[2] = l4[2]; c4[3] = l4[3]; // l[] l4[0] = l[t]; l4[1] = r4[0]; l4[2] = r4[1]; l4[3] = r4[2]; l[t] = r4[3]; // r[] r4[0] = c4[0]+w4[t][0]*(l4[0]+r4[0]); r4[1] = c4[1]+w4[t][1]*(l4[1]+r4[1]); r4[2] = c4[2]+w4[t][2]*(l4[2]+r4[2]); r4[3] = c4[3]+w4[t][3]*(l4[3]+r4[3]); } #endif // scales for(int i = 0; i < TAPS; i++) { l4[i] *= v[0]; r4[i] *= v[1]; } // outputs for(int i = 0; i < TAPS; i++) { addr[2*i+0-4] = l4[i]; addr[2*i+1-4] = r4[i]; } // pointers addr += PAIR * TAPS; } // loop by pairs from left to right for(int s = 0; s < R4; s++) { // inputs x[0] = addr[0]; x[1] = addr[1]; // shuffles y[0] = l[0]; c[0] = l[1]; c[1] = l[2]; c[2] = l[3]; c[3] = x[0]; // operation z[] = c[] + w[] * ( l[] + r[] ) // by sequential computation from top/right to bottom/left r[3] = x[1]; r[2] = c[3]+w[3]*(l[3]+r[3]); r[1] = c[2]+w[2]*(l[2]+r[2]); r[0] = c[1]+w[1]*(l[1]+r[1]); y[1] = c[0]+w[0]*(l[0]+r[0]); // scales y[0] *= v[0]; y[1] *= v[1]; // outputs addr[0-4] = y[0]; addr[1-4] = y[1]; // update l[] l[0] = r[0]; l[1] = r[1]; l[2] = r[2]; l[3] = r[3]; // pointers addr += PAIR; } // slide out right border addr[0-4] = l[0]; addr[1-4] = l[1]; addr[2-4] = l[2]; addr[3-4] = l[3]; } } else { // fallback, not implemented accel_lift_op4s_main_s(arr, steps, alpha, beta, gamma, delta, zeta, scaling); } } #ifdef __SSE__ static void accel_lift_op4s_main_dl4line_sse_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling) { assert( steps >= 0 ); const int PAIR = 2; // even and odd coefficient form a pair const int TAPS = 4; // 4-fold SIMD //const int LIFT_STEPS = 4; // 4 lifting steps for CDF 9/7 const int S = steps; const int S4 = S / TAPS; const int R4 = S - S4*TAPS; if( scaling < 0 ) { for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *arr_local = ASSUME_ALIGNED(calc_temp_offset2_s(arr, w, 0), 16); // intermediate results; force into registers __m128 l0, l1, l2, l3; // slide in left border { __m128 tmp0 = _mm_load_ps(arr_local); l0 = (__m128){ tmp0[0], tmp0[0], tmp0[0], tmp0[0] }; // movss+shufps l1 = (__m128){ tmp0[1], tmp0[1], tmp0[1], tmp0[1] }; // movss+shufps l2 = (__m128){ tmp0[2], tmp0[2], tmp0[2], tmp0[2] }; // movss+shufps l3 = (__m128){ tmp0[3], tmp0[3], tmp0[3], tmp0[3] }; // movss+shufps } // start from addr. float *addr = ASSUME_ALIGNED(arr_local + 4, 16); // aux. variables __m128 c4, l4, r4; __m128 w0 = { delta, delta, delta, delta }; // movss+shufps __m128 w1 = { gamma, gamma, gamma, gamma }; // movss+shufps __m128 w2 = { beta, beta, beta, beta }; // movss+shufps __m128 w3 = { alpha, alpha, alpha, alpha }; // movss+shufps __m128 v0 = { 1/zeta, 1/zeta, 1/zeta, 1/zeta }; // movss+shufps __m128 v1 = { zeta, zeta, zeta, zeta }; // movss+shufps // loop by group of four pairs from left to right for(int s = 0; s < S4; s++) { // inputs { r4 = _mm_load_ps(addr+0); // movaps __m128 tmp0 = _mm_load_ps(addr+4); // movaps l4 = r4; // movaps r4 = _mm_shuffle_ps(r4, tmp0, 0xdd/*221*/); // shufps l4 = _mm_shuffle_ps(l4, tmp0, 0x88/*136*/); // shufps } // scales { l4 *= v0; // mulps r4 *= v1; // mulps } // loop 3 #if 0 { // c c4 = l4; // movaps, kill l4 // l l4 = r4; // movaps l4 = _mm_shuffle_ps(l4, l4, 0x93/*147*/); // shufps l4[0] = l3[0]; // movss l3 = r4; // movaps l3 = _mm_shuffle_ps(l3, r4, 0xff/*255*/); // shufps // r r4 += l4; // addps r4 *= w3; // mulps r4 += c4; // addps } #else __asm__ __volatile__( "movaps %[l4], %[c4] \n\t" // l4 => c4 "movaps %[r4], %[l4] \n\t" // r4 => l4 "shufps $147, %[l4], %[l4] \n\t" // l4 => l4 "movss %[l3], %[l4] \n\t" // l3 => l4 "movaps %[r4], %[l3] \n\t" // l4 => l3 "shufps $255, %[r4], %[l3] \n\t" // r4 => l4 "addps %[l4], %[r4] \n\t" // l4 => r4 "mulps %[w3], %[r4] \n\t" // w3 => r4 "addps %[c4], %[r4] \n\t" // c4 => r4 : [l4]"+x"(l4), [r4]"+x"(r4), [l3]"+x"(l3), [c4]"=x"(c4) : [w3]"x"(w3) : ); #endif // loop 2 #if 0 { // c c4 = l4; // movaps // l l4 = r4; // movaps l4 = _mm_shuffle_ps(l4, l4, 0x93); // shufps l4[0] = l2[0]; // movss l2 = r4; // movaps l2 = _mm_shuffle_ps(l2, r4, 0xff); // shufps // r r4 += l4; // addps r4 *= w2; // mulps r4 += c4; // addps } #else __asm__ __volatile__( "movaps %[l4], %[c4] \n\t" // l4 => c4 "movaps %[r4], %[l4] \n\t" // r4 => l4 "shufps $147, %[l4], %[l4] \n\t" // l4 => l4 "movss %[l2], %[l4] \n\t" // l3 => l4 "movaps %[r4], %[l2] \n\t" // l4 => l3 "shufps $255, %[r4], %[l2] \n\t" // r4 => l4 "addps %[l4], %[r4] \n\t" // l4 => r4 "mulps %[w2], %[r4] \n\t" // w3 => r4 "addps %[c4], %[r4] \n\t" // c4 => r4 : [l4]"+x"(l4), [r4]"+x"(r4), [l2]"+x"(l2), [c4]"=x"(c4) : [w2]"x"(w2) : ); #endif // loop 1 #if 0 { // c c4 = l4; // movaps // l l4 = r4; // movaps l4 = _mm_shuffle_ps(l4, l4, 0x93); // shufps l4[0] = l1[0]; // movss l1 = r4; // movaps l1 = _mm_shuffle_ps(l1, r4, 0xff); // shufps // r r4 += l4; // addps r4 *= w1; // mulps r4 += c4; // addps } #else __asm__ __volatile__( "movaps %[l4], %[c4] \n\t" // l4 => c4 "movaps %[r4], %[l4] \n\t" // r4 => l4 "shufps $147, %[l4], %[l4] \n\t" // l4 => l4 "movss %[l1], %[l4] \n\t" // l3 => l4 "movaps %[r4], %[l1] \n\t" // l4 => l3 "shufps $255, %[r4], %[l1] \n\t" // r4 => l4 "addps %[l4], %[r4] \n\t" // l4 => r4 "mulps %[w1], %[r4] \n\t" // w3 => r4 "addps %[c4], %[r4] \n\t" // c4 => r4 : [l4]"+x"(l4), [r4]"+x"(r4), [l1]"+x"(l1), [c4]"=x"(c4) : [w1]"x"(w1) : ); #endif // loop 0 #if 0 { // c c4 = l4; // movaps // l l4 = r4; // movaps l4 = _mm_shuffle_ps(l4, l4, 0x93); // shufps l4[0] = l0[0]; // movss l0 = r4; // movaps l0 = _mm_shuffle_ps(l0, r4, 0xff); // shufps // r r4 += l4; // addps r4 *= w0; // mulps r4 += c4; // addps } #else __asm__ __volatile__( "movaps %[l4], %[c4] \n\t" // l4 => c4 "movaps %[r4], %[l4] \n\t" // r4 => l4 "shufps $147, %[l4], %[l4] \n\t" // l4 => l4 "movss %[l0], %[l4] \n\t" // l3 => l4 "movaps %[r4], %[l0] \n\t" // l4 => l3 "shufps $255, %[r4], %[l0] \n\t" // r4 => l4 "addps %[l4], %[r4] \n\t" // l4 => r4 "mulps %[w0], %[r4] \n\t" // w3 => r4 "addps %[c4], %[r4] \n\t" // c4 => r4 : [l4]"+x"(l4), [r4]"+x"(r4), [l0]"+x"(l0), [c4]"=x"(c4) : [w0]"x"(w0) : ); #endif // outputs { // [ A B C D ] [ E F G H ] => [ A E B F ] [ C G D H ] __m128 tmp0 = l4; // movaps tmp0 = _mm_unpacklo_ps(tmp0, r4); // unpcklps _mm_store_ps(addr-4, tmp0); // movaps l4 = _mm_unpackhi_ps(l4, r4); // unpckhps _mm_store_ps(addr-0, l4); // movaps } // pointers addr += PAIR * TAPS; } // constants const __m128 w = { delta, gamma, beta, alpha }; const __m128 v = { 1/zeta, zeta, 1/zeta, zeta }; // variables __m128 x, y, r, c, l; // intermediate results l = (__m128){ l0[0], l1[0], l2[0], l3[0] }; // loop by pairs from left to right for(int s = 0; s < R4; s++) { // inputs x[0] = addr[0]; x[1] = addr[1]; // scales x[0] *= v[0]; x[1] *= v[1]; // shuffles c = l; c = _mm_shuffle_ps(c,c,0x39); y[0] = l[0]; c[3] = x[0]; // operation z[] = c[] + w[] * ( l[] + r[] ) // by sequential computation from top/right to bottom/left r[3] = x[1]; r[2] = c[3]+w[3]*(l[3]+r[3]); r[1] = c[2]+w[2]*(l[2]+r[2]); r[0] = c[1]+w[1]*(l[1]+r[1]); y[1] = c[0]+w[0]*(l[0]+r[0]); // outputs addr[0-4] = y[0]; addr[1-4] = y[1]; // update l[] l = r; // pointers addr += PAIR; } // slide out right border _mm_storeu_ps(addr-4, l); } } else if ( scaling > 0 ) { for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *arr_local = ASSUME_ALIGNED(calc_temp_offset2_s(arr, w, 0), 16); // intermediate results, force into registers __m128 l0, l1, l2, l3; // slide in left border { __m128 tmp0 = _mm_load_ps(arr_local); l0 = (__m128){ tmp0[0], tmp0[0], tmp0[0], tmp0[0] }; // movss+shufps l1 = (__m128){ tmp0[1], tmp0[1], tmp0[1], tmp0[1] }; // movss+shufps l2 = (__m128){ tmp0[2], tmp0[2], tmp0[2], tmp0[2] }; // movss+shufps l3 = (__m128){ tmp0[3], tmp0[3], tmp0[3], tmp0[3] }; // movss+shufps } // start from addr. float *addr = ASSUME_ALIGNED(arr_local + 4, 16); // aux. variables __m128 c4, l4, r4; __m128 w0 = { delta, delta, delta, delta }; // movss+shufps __m128 w1 = { gamma, gamma, gamma, gamma }; // movss+shufps __m128 w2 = { beta, beta, beta, beta }; // movss+shufps __m128 w3 = { alpha, alpha, alpha, alpha }; // movss+shufps __m128 v0 = { 1/zeta, 1/zeta, 1/zeta, 1/zeta }; // movss+shufps __m128 v1 = { zeta, zeta, zeta, zeta }; // movss+shufps // loop by group of four pairs from left to right for(int s = 0; s < S4; s++) { // inputs { r4 = _mm_load_ps(addr+0); // movaps __m128 tmp0 = _mm_load_ps(addr+4); // movaps l4 = r4; // movaps r4 = _mm_shuffle_ps(r4, tmp0, 0xdd/*221*/); // shufps l4 = _mm_shuffle_ps(l4, tmp0, 0x88/*136*/); // shufps } // loop 3 #if 0 { // c c4 = l4; // movaps, kill l4 // l l4 = r4; // movaps l4 = _mm_shuffle_ps(l4, l4, 0x93/*147*/); // shufps l4[0] = l3[0]; // movss l3 = r4; // movaps l3 = _mm_shuffle_ps(l3, r4, 0xff/*255*/); // shufps // r r4 += l4; // addps r4 *= w3; // mulps r4 += c4; // addps } #else __asm__ __volatile__( "movaps %[l4], %[c4] \n\t" // l4 => c4 "movaps %[r4], %[l4] \n\t" // r4 => l4 "shufps $147, %[l4], %[l4] \n\t" // l4 => l4 "movss %[l3], %[l4] \n\t" // l3 => l4 "movaps %[r4], %[l3] \n\t" // l4 => l3 "shufps $255, %[r4], %[l3] \n\t" // r4 => l4 "addps %[l4], %[r4] \n\t" // l4 => r4 "mulps %[w3], %[r4] \n\t" // w3 => r4 "addps %[c4], %[r4] \n\t" // c4 => r4 : [l4]"+x"(l4), [r4]"+x"(r4), [l3]"+x"(l3), [c4]"=x"(c4) : [w3]"x"(w3) : ); #endif // loop 2 #if 0 { // c c4 = l4; // movaps // l l4 = r4; // movaps l4 = _mm_shuffle_ps(l4, l4, 0x93); // shufps l4[0] = l2[0]; // movss l2 = r4; // movaps l2 = _mm_shuffle_ps(l2, r4, 0xff); // shufps // r r4 += l4; // addps r4 *= w2; // mulps r4 += c4; // addps } #else __asm__ __volatile__( "movaps %[l4], %[c4] \n\t" // l4 => c4 "movaps %[r4], %[l4] \n\t" // r4 => l4 "shufps $147, %[l4], %[l4] \n\t" // l4 => l4 "movss %[l2], %[l4] \n\t" // l3 => l4 "movaps %[r4], %[l2] \n\t" // l4 => l3 "shufps $255, %[r4], %[l2] \n\t" // r4 => l4 "addps %[l4], %[r4] \n\t" // l4 => r4 "mulps %[w2], %[r4] \n\t" // w3 => r4 "addps %[c4], %[r4] \n\t" // c4 => r4 : [l4]"+x"(l4), [r4]"+x"(r4), [l2]"+x"(l2), [c4]"=x"(c4) : [w2]"x"(w2) : ); #endif // loop 1 #if 0 { // c c4 = l4; // movaps // l l4 = r4; // movaps l4 = _mm_shuffle_ps(l4, l4, 0x93); // shufps l4[0] = l1[0]; // movss l1 = r4; // movaps l1 = _mm_shuffle_ps(l1, r4, 0xff); // shufps // r r4 += l4; // addps r4 *= w1; // mulps r4 += c4; // addps } #else __asm__ __volatile__( "movaps %[l4], %[c4] \n\t" // l4 => c4 "movaps %[r4], %[l4] \n\t" // r4 => l4 "shufps $147, %[l4], %[l4] \n\t" // l4 => l4 "movss %[l1], %[l4] \n\t" // l3 => l4 "movaps %[r4], %[l1] \n\t" // l4 => l3 "shufps $255, %[r4], %[l1] \n\t" // r4 => l4 "addps %[l4], %[r4] \n\t" // l4 => r4 "mulps %[w1], %[r4] \n\t" // w3 => r4 "addps %[c4], %[r4] \n\t" // c4 => r4 : [l4]"+x"(l4), [r4]"+x"(r4), [l1]"+x"(l1), [c4]"=x"(c4) : [w1]"x"(w1) : ); #endif // loop 0 #if 0 { // c c4 = l4; // movaps // l l4 = r4; // movaps l4 = _mm_shuffle_ps(l4, l4, 0x93); // shufps l4[0] = l0[0]; // movss l0 = r4; // movaps l0 = _mm_shuffle_ps(l0, r4, 0xff); // shufps // r r4 += l4; // addps r4 *= w0; // mulps r4 += c4; // addps } #else __asm__ __volatile__( "movaps %[l4], %[c4] \n\t" // l4 => c4 "movaps %[r4], %[l4] \n\t" // r4 => l4 "shufps $147, %[l4], %[l4] \n\t" // l4 => l4 "movss %[l0], %[l4] \n\t" // l3 => l4 "movaps %[r4], %[l0] \n\t" // l4 => l3 "shufps $255, %[r4], %[l0] \n\t" // r4 => l4 "addps %[l4], %[r4] \n\t" // l4 => r4 "mulps %[w0], %[r4] \n\t" // w3 => r4 "addps %[c4], %[r4] \n\t" // c4 => r4 : [l4]"+x"(l4), [r4]"+x"(r4), [l0]"+x"(l0), [c4]"=x"(c4) : [w0]"x"(w0) : ); #endif // scales { l4 *= v0; // mulps r4 *= v1; // mulps } // outputs { // [ A B C D ] [ E F G H ] => [ A E B F ] [ C G D H ] __m128 tmp0 = l4; // movaps tmp0 = _mm_unpacklo_ps(tmp0, r4); // unpcklps _mm_store_ps(addr-4, tmp0); // movaps l4 = _mm_unpackhi_ps(l4, r4); // unpckhps _mm_store_ps(addr-0, l4); // movaps } // pointers addr += PAIR * TAPS; } // constants const __m128 w = { delta, gamma, beta, alpha }; const __m128 v = { 1/zeta, zeta, 1/zeta, zeta }; // variables __m128 x, y, r, c, l; // intermediate results l = (__m128){ l0[0], l1[0], l2[0], l3[0] }; // loop by pairs from left to right for(int s = 0; s < R4; s++) { // inputs x[0] = addr[0]; x[1] = addr[1]; // shuffles c = l; c = _mm_shuffle_ps(c,c,0x39); y[0] = l[0]; c[3] = x[0]; // operation z[] = c[] + w[] * ( l[] + r[] ) // by sequential computation from top/right to bottom/left r[3] = x[1]; r[2] = c[3]+w[3]*(l[3]+r[3]); r[1] = c[2]+w[2]*(l[2]+r[2]); r[0] = c[1]+w[1]*(l[1]+r[1]); y[1] = c[0]+w[0]*(l[0]+r[0]); // scales y[0] *= v[0]; y[1] *= v[1]; // outputs addr[0-4] = y[0]; addr[1-4] = y[1]; // update l[] l = r; // pointers addr += PAIR; } // slide out right border _mm_storeu_ps(addr-4, l); } } else { // fallback, not implemented accel_lift_op4s_main_s(arr, steps, alpha, beta, gamma, delta, zeta, scaling); } } #endif /** * double-loop algorithm for 4 rows in parallel */ static void accel_lift_op4s_main_dl4_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling) { assert( steps >= 0 ); if( scaling < 0 ) { assert( 4 == dwt_util_get_num_workers() ); // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // aux. variables float in[4][2]; float out[4][2]; float r[4][4]; float c[4][4]; float l[4][4]; // pointers float *addr[4]; for(int worker = 0; worker < dwt_util_get_num_workers(); worker++) { // init addr[worker] = calc_temp_offset2_s(arr, worker, 0); // import l[worker][0] = addr[worker][0]; l[worker][1] = addr[worker][1]; l[worker][2] = addr[worker][2]; l[worker][3] = addr[worker][3]; addr[worker] += 4; } // loop for(int s = 0; s < steps; s++) { for(int worker = 0; worker < dwt_util_get_num_workers(); worker++) { // inputs in[worker][0] = addr[worker][0]; in[worker][1] = addr[worker][1]; // scales in[worker][0] = in[worker][0] * v[0]; in[worker][1] = in[worker][1] * v[1]; // shuffles out[worker][0] = l[worker][0]; c[worker][0] = l[worker][1]; c[worker][1] = l[worker][2]; c[worker][2] = l[worker][3]; c[worker][3] = in[worker][0]; // operation r[worker][3] = in[worker][1]; r[worker][2] = c[worker][3] + w[3]*(l[worker][3] + r[worker][3]); r[worker][1] = c[worker][2] + w[2]*(l[worker][2] + r[worker][2]); r[worker][0] = c[worker][1] + w[1]*(l[worker][1] + r[worker][1]); out[worker][1] = c[worker][0] + w[0]*(l[worker][0] + r[worker][0]); // outputs addr[worker][0-4] = out[worker][0]; addr[worker][1-4] = out[worker][1]; // update l[worker][0] = r[worker][0]; l[worker][1] = r[worker][1]; l[worker][2] = r[worker][2]; l[worker][3] = r[worker][3]; // pointers addr[worker] += 2; } } for(int worker = 0; worker < dwt_util_get_num_workers(); worker++) { // export addr[worker][0-4] = l[worker][0]; addr[worker][1-4] = l[worker][1]; addr[worker][2-4] = l[worker][2]; addr[worker][3-4] = l[worker][3]; } } else if ( scaling > 0 ) { assert( 4 == dwt_util_get_num_workers() ); // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // aux. variables float in[4][2]; float out[4][2]; float r[4][4]; float c[4][4]; float l[4][4]; // pointers float *addr[4]; for(int worker = 0; worker < dwt_util_get_num_workers(); worker++) { // init addr[worker] = calc_temp_offset2_s(arr, worker, 0); // import l[worker][0] = addr[worker][0]; l[worker][1] = addr[worker][1]; l[worker][2] = addr[worker][2]; l[worker][3] = addr[worker][3]; addr[worker] += 4; } // loop for(int s = 0; s < steps; s++) { for(int worker = 0; worker < dwt_util_get_num_workers(); worker++) { // inputs in[worker][0] = addr[worker][0]; in[worker][1] = addr[worker][1]; // shuffles out[worker][0] = l[worker][0]; c[worker][0] = l[worker][1]; c[worker][1] = l[worker][2]; c[worker][2] = l[worker][3]; c[worker][3] = in[worker][0]; // operation r[worker][3] = in[worker][1]; r[worker][2] = c[worker][3] + w[3]*(l[worker][3] + r[worker][3]); r[worker][1] = c[worker][2] + w[2]*(l[worker][2] + r[worker][2]); r[worker][0] = c[worker][1] + w[1]*(l[worker][1] + r[worker][1]); out[worker][1] = c[worker][0] + w[0]*(l[worker][0] + r[worker][0]); // scales out[worker][0] = out[worker][0] * v[0]; out[worker][1] = out[worker][1] * v[1]; // outputs addr[worker][0-4] = out[worker][0]; addr[worker][1-4] = out[worker][1]; // update l[worker][0] = r[worker][0]; l[worker][1] = r[worker][1]; l[worker][2] = r[worker][2]; l[worker][3] = r[worker][3]; // pointers addr[worker] += 2; } } for(int worker = 0; worker < dwt_util_get_num_workers(); worker++) { // export addr[worker][0-4] = l[worker][0]; addr[worker][1-4] = l[worker][1]; addr[worker][2-4] = l[worker][2]; addr[worker][3-4] = l[worker][3]; } } else { // fallback, not implemented accel_lift_op4s_main_s(arr, steps, alpha, beta, gamma, delta, zeta, scaling); } } #ifdef __SSE__ #define op4s_dl4_preload_s_sse(addr, idx, temp, s) \ do { \ (*(addr))[(idx)] = (temp)[(idx)][(s)]; \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_preload_s_sse4(addr, temp, s) \ do { \ op4s_dl4_preload_s_sse((addr), 0, (temp), (s)); \ op4s_dl4_preload_s_sse((addr), 1, (temp), (s)); \ op4s_dl4_preload_s_sse((addr), 2, (temp), (s)); \ op4s_dl4_preload_s_sse((addr), 3, (temp), (s)); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_import_s_sse(l, addr) \ do { \ (l) = *(addr); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_import_s_sse4(l, addr) \ do { \ op4s_dl4_import_s_sse((l)[0], (addr) + 0); \ op4s_dl4_import_s_sse((l)[1], (addr) + 1); \ op4s_dl4_import_s_sse((l)[2], (addr) + 2); \ op4s_dl4_import_s_sse((l)[3], (addr) + 3); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_input_s_sse(in, addr) \ do { \ (in) = *(addr); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_input_s_sse4(in, addr) \ do { \ op4s_dl4_input_s_sse((in)[0], (addr) + 0); \ op4s_dl4_input_s_sse((in)[1], (addr) + 1); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_scale_s_sse(in, v) \ do { \ (in) *= (v); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_descale_s_sse(out, v) \ do { \ (out) *= (v); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_scale_s_sse4(in, v) \ do { \ op4s_dl4_scale_s_sse((in)[0], (v)[0]); \ op4s_dl4_scale_s_sse((in)[1], (v)[1]); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_descale_s_sse4(out, v) \ do { \ op4s_dl4_descale_s_sse((out)[0], (v)[0]); \ op4s_dl4_descale_s_sse((out)[1], (v)[1]); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_mov_s_sse(z, l) \ do { \ (z) = (l); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_op3_s_sse(z, c, w, r) \ do { \ (z) += (r); \ (z) *= (w); \ (z) += (c); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_op_s_sse4(in, out, w, c, l, r) \ do { \ op4s_dl4_mov_s_sse((out)[0], (l)[0]); \ op4s_dl4_mov_s_sse((c)[0], (l)[1]); \ op4s_dl4_mov_s_sse((c)[1], (l)[2]); \ op4s_dl4_mov_s_sse((c)[2], (l)[3]); \ op4s_dl4_mov_s_sse((c)[3], (in)[0]); \ op4s_dl4_mov_s_sse((r)[3], (in)[1]); \ op4s_dl4_mov_s_sse((r)[2], (l)[3]); op4s_dl4_op3_s_sse((r)[2], (c)[3], (w)[3], (r)[3]); \ op4s_dl4_mov_s_sse((r)[1], (l)[2]); op4s_dl4_op3_s_sse((r)[1], (c)[2], (w)[2], (r)[2]); \ op4s_dl4_mov_s_sse((r)[0], (l)[1]); op4s_dl4_op3_s_sse((r)[0], (c)[1], (w)[1], (r)[1]); \ op4s_dl4_mov_s_sse((out)[1], (l)[0]); op4s_dl4_op3_s_sse((out)[1], (c)[0], (w)[0], (r)[0]); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_output_s_sse(addr, out) \ do { \ *(addr) = (out); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_output_s_sse4(addr, out) \ do { \ op4s_dl4_output_s_sse((addr) + 0, (out)[0]); \ op4s_dl4_output_s_sse((addr) + 1, (out)[1]); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_update_s_sse(l, r) \ do { \ (l) = (r); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_update_s_sse4(l, r) \ do { \ op4s_dl4_update_s_sse((l)[0], (r)[0]); \ op4s_dl4_update_s_sse((l)[1], (r)[1]); \ op4s_dl4_update_s_sse((l)[2], (r)[2]); \ op4s_dl4_update_s_sse((l)[3], (r)[3]); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_export_s_sse(addr, l) \ do { \ *(addr) = (l); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_export_s_sse4(addr, l) \ do { \ op4s_dl4_export_s_sse((addr) + 0, (l)[0]); \ op4s_dl4_export_s_sse((addr) + 1, (l)[1]); \ op4s_dl4_export_s_sse((addr) + 2, (l)[2]); \ op4s_dl4_export_s_sse((addr) + 3, (l)[3]); \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_postsave_s_sse(temp, s, addr, idx) \ do { \ (temp)[(idx)][(s)] = (*(addr))[(idx)]; \ } while(0) #endif #ifdef __SSE__ #define op4s_dl4_postsave_s_sse4(temp, s, addr) \ do { \ op4s_dl4_postsave_s_sse((temp), (s), (addr), 0); \ op4s_dl4_postsave_s_sse((temp), (s), (addr), 1); \ op4s_dl4_postsave_s_sse((temp), (s), (addr), 2); \ op4s_dl4_postsave_s_sse((temp), (s), (addr), 3); \ } while(0) #endif #ifdef __SSE__ /** * double-loop algorithm for 4 rows in parallel using SSE */ static void accel_lift_op4s_main_dl4_sse_s( float *arr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling) { assert( steps >= 0 ); if( scaling < 0 ) { assert( 4 == dwt_util_get_num_workers() ); // constants const __m128 w[4] = { { delta, delta, delta, delta }, { gamma, gamma, gamma, gamma }, { beta, beta, beta, beta }, { alpha, alpha, alpha, alpha } }; const __m128 v[2] = { { 1/zeta, 1/zeta, 1/zeta, 1/zeta }, { zeta, zeta, zeta, zeta } }; // aux. variables __m128 in[2]; __m128 out[2]; __m128 r[4]; __m128 c[4]; __m128 l[4]; // pointers float *arr_local[4]; // init for(int worker = 0; worker < 4; worker++) { arr_local[worker] = calc_temp_offset2_s(arr, worker, 0); } // buffer const int buff_size = 4 + 2*steps; __m128 buff[buff_size]; // FIXME(x86): huge array on the stack __m128 *buff_ptr = buff; // load buffer assert( is_aligned_16(arr_local[0]) && is_aligned_16(arr_local[1]) && is_aligned_16(arr_local[2]) && is_aligned_16(arr_local[3]) ); const int t4 = buff_size >> 2; const int t3 = buff_size & ~3; for(int t = 0; t < t4; t++) { // FIXME(x86): access to image data directly instead of storing it into temp[] __m128 s0 = _mm_load_ps(&arr_local[0][4*t]); __m128 s1 = _mm_load_ps(&arr_local[1][4*t]); __m128 s2 = _mm_load_ps(&arr_local[2][4*t]); __m128 s3 = _mm_load_ps(&arr_local[3][4*t]); _MM_TRANSPOSE4_PS(s0, s1, s2, s3); buff[4*t+0] = s0; buff[4*t+1] = s1; buff[4*t+2] = s2; buff[4*t+3] = s3; } for(int t = t3; t < buff_size; t++) { // FIXME(x86): access to image data directly instead of storing it into temp[] buff[t][0] = arr_local[0][t]; buff[t][1] = arr_local[1][t]; buff[t][2] = arr_local[2][t]; buff[t][3] = arr_local[3][t]; } // import op4s_dl4_import_s_sse4(l, buff_ptr); // pointers buff_ptr += 4; // loop for(int s = 0; s < steps; s++) { // inputs op4s_dl4_input_s_sse4(in, buff_ptr); // scales op4s_dl4_scale_s_sse4(in, v); // shuffles + operation op4s_dl4_op_s_sse4(in, out, w, c, l, r); // outputs op4s_dl4_output_s_sse4(buff_ptr-4, out); // update op4s_dl4_update_s_sse4(l, r); // pointers buff_ptr += 2; } // export op4s_dl4_export_s_sse4(buff_ptr-4, l); // store buffer for(int t = 0; t < t4; t++) { __m128 s0 = buff[4*t+0]; __m128 s1 = buff[4*t+1]; __m128 s2 = buff[4*t+2]; __m128 s3 = buff[4*t+3]; _MM_TRANSPOSE4_PS(s0, s1, s2, s3); _mm_store_ps(&arr_local[0][4*t], s0); _mm_store_ps(&arr_local[1][4*t], s1); _mm_store_ps(&arr_local[2][4*t], s2); _mm_store_ps(&arr_local[3][4*t], s3); } for(int t = t3; t < buff_size; t++) { arr_local[0][t] = buff[t][0]; arr_local[1][t] = buff[t][1]; arr_local[2][t] = buff[t][2]; arr_local[3][t] = buff[t][3]; } } else if ( scaling > 0 ) { assert( 4 == dwt_util_get_num_workers() ); // constants const __m128 w[4] = { { delta, delta, delta, delta }, { gamma, gamma, gamma, gamma }, { beta, beta, beta, beta }, { alpha, alpha, alpha, alpha } }; const __m128 v[2] = { { 1/zeta, 1/zeta, 1/zeta, 1/zeta }, { zeta, zeta, zeta, zeta } }; // aux. variables __m128 in[2]; __m128 out[2]; __m128 r[4]; __m128 c[4]; __m128 l[4]; // pointers float *arr_local[4]; // init for(int worker = 0; worker < 4; worker++) { arr_local[worker] = calc_temp_offset2_s(arr, worker, 0); } // buffer const int buff_size = 4 + 2*steps; __m128 buff[buff_size]; // FIXME(x86): huge array on the stack __m128 *buff_ptr = buff; // load buffer assert( is_aligned_16(arr_local[0]) && is_aligned_16(arr_local[1]) && is_aligned_16(arr_local[2]) && is_aligned_16(arr_local[3]) ); const int t4 = buff_size >> 2; const int t3 = buff_size & ~3; for(int t = 0; t < t4; t++) { // FIXME(x86): access to image data directly instead of storing it into temp[] __m128 s0 = _mm_load_ps(&arr_local[0][4*t]); __m128 s1 = _mm_load_ps(&arr_local[1][4*t]); __m128 s2 = _mm_load_ps(&arr_local[2][4*t]); __m128 s3 = _mm_load_ps(&arr_local[3][4*t]); _MM_TRANSPOSE4_PS(s0, s1, s2, s3); buff[4*t+0] = s0; buff[4*t+1] = s1; buff[4*t+2] = s2; buff[4*t+3] = s3; } for(int t = t3; t < buff_size; t++) { // FIXME(x86): access to image data directly instead of storing it into temp[] buff[t][0] = arr_local[0][t]; buff[t][1] = arr_local[1][t]; buff[t][2] = arr_local[2][t]; buff[t][3] = arr_local[3][t]; } // import op4s_dl4_import_s_sse4(l, buff_ptr); // pointers buff_ptr += 4; // loop for(int s = 0; s < steps; s++) { // inputs op4s_dl4_input_s_sse4(in, buff_ptr); // shuffles + operation op4s_dl4_op_s_sse4(in, out, w, c, l, r); // descales op4s_dl4_descale_s_sse4(out, v); // outputs op4s_dl4_output_s_sse4(buff_ptr-4, out); // update op4s_dl4_update_s_sse4(l, r); // pointers buff_ptr += 2; } // export op4s_dl4_export_s_sse4(buff_ptr-4, l); // store buffer for(int t = 0; t < t4; t++) { __m128 s0 = buff[4*t+0]; __m128 s1 = buff[4*t+1]; __m128 s2 = buff[4*t+2]; __m128 s3 = buff[4*t+3]; _MM_TRANSPOSE4_PS(s0, s1, s2, s3); _mm_store_ps(&arr_local[0][4*t], s0); _mm_store_ps(&arr_local[1][4*t], s1); _mm_store_ps(&arr_local[2][4*t], s2); _mm_store_ps(&arr_local[3][4*t], s3); } for(int t = t3; t < buff_size; t++) { arr_local[0][t] = buff[t][0]; arr_local[1][t] = buff[t][1]; arr_local[2][t] = buff[t][2]; arr_local[3][t] = buff[t][3]; } } else { // fallback, not implemented accel_lift_op4s_main_s(arr, steps, alpha, beta, gamma, delta, zeta, scaling); } } #endif int dwt_util_is_aligned_16( const void *ptr) { return is_aligned_16(ptr); } int dwt_util_is_aligned_8( const void *ptr) { return is_aligned_8(ptr); } int dwt_util_is_aligned_4( const void *ptr) { return is_aligned_4(ptr); } /** * @brief Accelerated PicoBlaze operation. * * Two pairs (predict and update) of lifting steps and coefficients scaling * merged together. This function is accelerated on ASVP/EdkDSP. */ static void accel_lift_op4s_main_pb_s( float *addr, int steps, float alpha, float beta, float gamma, float delta, float zeta, int scaling) { FUNC_BEGIN; assert( steps >= 0 ); #ifdef __asvp__ UNUSED(scaling); UNUSED(alpha); UNUSED(beta); UNUSED(gamma); UNUSED(delta); UNUSED(zeta); assert( steps <= (BANK_SIZE - 4) / 2 ); const int size = 2*steps + 4; assert( is_even(size) ); for(int w = 0; w < get_active_workers(); w++) { // FIXME(ASVP): channel w according to worker ID; but each worker has independent DMA channels, thus this is not necessary const uint8_t ch = w; float *addr_local = calc_temp_offset2_s(addr, w, 0); assert( is_aligned_8(addr_local) ); WAL_CHECK( wal_dma_configure(worker[w], ch, addr_local, 0, WAL_BCE_JSY_DMEM_A, WAL_BANK_POS(0), size) ); WAL_CHECK( wal_dma_start(worker[w], ch, WAL_DMA_REQ_RD) ); } for(int w = 0; w < get_active_workers(); w++) { // HACK(ASVP): wait for completing memory transfers on all 8 channels; but each worker has independent DMA channels while( wal_dma_isbusy(worker[w], /*WAL_DMA_MASK(ch)*/ 0x0f) ) ; } const uint32_t steps_32 = (uint32_t)steps; // start BCE computations for(int w = 0; w < get_active_workers(); w++) { WAL_CHECK( wal_mb2cmem(worker[w], WAL_CMEM_MB2PB, 0x01, &steps_32, 1) ); WAL_CHECK( wal_mb2pb(worker[w], 1) ); } // wait for finishing every BCE computation for(int w = 0; w < get_active_workers(); w++) { WAL_CHECK( wal_pb2mb(worker[w], NULL) ); } assert( is_even(size) ); for(int w = 0; w < get_active_workers(); w++) { const uint8_t ch = w; float *addr_local = calc_temp_offset2_s(addr, w, 0); assert( is_aligned_8(addr_local) ); WAL_CHECK( wal_dma_configure(worker[w], ch, addr_local, 0, WAL_BCE_JSY_DMEM_C, WAL_BANK_POS(0), size) ); WAL_CHECK( wal_dma_start(worker[w], ch, WAL_DMA_REQ_WR) ); } for(int w = 0; w < get_active_workers(); w++) { while( wal_dma_isbusy(worker[w], /*WAL_DMA_MASK(ch)*/ 0x0f) ) ; } for(int w = 0; w < get_active_workers(); w++) { float *addr_local = calc_temp_offset2_s(addr, w, 0); flush_cache_s(addr_local-1, size); // HACK(ASVP): why -1? } #else /* microblaze */ // fallback accel_lift_op4s_main_s(addr, steps, alpha, beta, gamma, delta, zeta, scaling); #endif /* microblaze */ FUNC_END; } static void accel_lift_op4s_prolog_s( float *arr, int off, int N, float alpha, float beta, float gamma, float delta, float zeta, int scaling ) { assert( N-off >= 4 ); #ifdef NDEBUG UNUSED(N); #endif for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *arr_local = calc_temp_offset2_s(arr, w, off); if(off) { // inv-scaling if( scaling < 0 ) { // TODO } // alpha arr_local[1] += alpha*(arr_local[0]+arr_local[2]); arr_local[3] += alpha*(arr_local[2]+arr_local[4]); // beta arr_local[0] += 2*beta*(arr_local[1]); arr_local[2] += beta*(arr_local[1]+arr_local[3]); // gamma arr_local[1] += gamma*(arr_local[0]+arr_local[2]); // delta arr_local[0] += 2*delta*(arr_local[1]); // scaling if( scaling > 0) { arr_local[0] *= zeta; } } else { // inv-scaling if( scaling < 0 ) { arr_local[0] *= 1/zeta; arr_local[1] *= zeta; arr_local[2] *= 1/zeta; arr_local[3] *= zeta; } // alpha arr_local[0] += 2*alpha*(arr_local[1]); arr_local[2] += alpha*(arr_local[1]+arr_local[3]); // beta arr_local[1] += beta*(arr_local[0]+arr_local[2]); // gamma arr_local[0] += 2*gamma*(arr_local[1]); // delta // none // scaling // none } } } static void accel_lift_op4s_prolog_stride_s( float *arr, int off, int N, float alpha, float beta, float gamma, float delta, float zeta, int scaling, int stride ) { assert( N-off >= 4 ); #ifdef NDEBUG UNUSED(N); #endif assert( 1 == dwt_util_get_num_workers() ); { if( off ) { // inv-scaling if( scaling < 0 ) { // TODO } // alpha *addr1_s(arr, 1, stride) += alpha*(*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); *addr1_s(arr, 3, stride) += alpha*(*addr1_s(arr, 2, stride) + *addr1_s(arr, 4, stride)); // beta *addr1_s(arr, 0, stride) += 2*beta*(*addr1_s(arr, 1, stride)); *addr1_s(arr, 2, stride) += beta*(*addr1_s(arr, 1, stride) + *addr1_s(arr, 3, stride)); // gamma *addr1_s(arr, 1, stride) += gamma*(*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); // delta *addr1_s(arr, 0, stride) += 2*delta*(*addr1_s(arr, 1, stride)); // scaling if( scaling > 0) { *addr1_s(arr, 0, stride) *= zeta; } } else { // inv-scaling if( scaling < 0 ) { *addr1_s(arr, 0, stride) *= 1/zeta; *addr1_s(arr, 1, stride) *= zeta; *addr1_s(arr, 2, stride) *= 1/zeta; *addr1_s(arr, 3, stride) *= zeta; } // alpha *addr1_s(arr, 0, stride) += 2*alpha*(*addr1_s(arr, 1, stride)); *addr1_s(arr, 2, stride) += alpha*(*addr1_s(arr, 1, stride) + *addr1_s(arr, 3, stride)); // beta *addr1_s(arr, 1, stride) += beta*(*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); // gamma *addr1_s(arr, 0, stride) += 2*gamma*(*addr1_s(arr, 1, stride)); // delta // none // scaling // none } } } // hole static void accel_lift_op4s_prolog_stride_hole_s( float *arr, int off, int N, float alpha, float beta, float gamma, float delta, float zeta, int scaling, int stride ) { assert( N-off >= 4 ); #ifdef NDEBUG UNUSED(N); #endif assert( 1 == dwt_util_get_num_workers() ); { if( off ) { // inv-scaling if( scaling < 0 ) { // TODO } // alpha *addr1_s(arr, 1, stride) += alpha*(*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); *addr1_s(arr, 3, stride) += alpha*(*addr1_s(arr, 2, stride) + *addr1_s(arr, 4, stride)); // beta *addr1_s(arr, 0, stride) += beta *(*addr1_s(arr, 1, stride) + 0.f); *addr1_s(arr, 2, stride) += beta *(*addr1_s(arr, 1, stride) + *addr1_s(arr, 3, stride)); // gamma *addr1_s(arr, 1, stride) += gamma*(*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); // delta *addr1_s(arr, 0, stride) += delta*(*addr1_s(arr, 1, stride) + 0.f); // scaling if( scaling > 0) { *addr1_s(arr, 0, stride) *= zeta; } } else { // inv-scaling if( scaling < 0 ) { *addr1_s(arr, 0, stride) *= 1/zeta; *addr1_s(arr, 1, stride) *= zeta; *addr1_s(arr, 2, stride) *= 1/zeta; *addr1_s(arr, 3, stride) *= zeta; } // alpha *addr1_s(arr, 0, stride) += alpha*(*addr1_s(arr, 1, stride) + 0.f); *addr1_s(arr, 2, stride) += alpha*(*addr1_s(arr, 1, stride) + *addr1_s(arr, 3, stride)); // beta *addr1_s(arr, 1, stride) += beta *(*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); // gamma *addr1_s(arr, 0, stride) += gamma*(*addr1_s(arr, 1, stride) + 0.f); // delta // none // scaling // none } } } // zero static void accel_lift_op4s_prolog_stride_zero_s( float *arr, int off, int N, float alpha, float beta, float gamma, float delta, float zeta, int scaling, int stride ) { assert( N-off >= 4 ); #ifdef NDEBUG UNUSED(N); #endif assert( 1 == dwt_util_get_num_workers() ); { if( off ) { // initially zeros float neg1 = 0.f; float neg2 = 0.f; // inv-scaling if( scaling < 0 ) { // TODO } // alpha neg1 += alpha*(0.f/*[-2]*/ + *addr1_s(arr, 0, stride)); *addr1_s(arr, 1, stride) += alpha*(*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); *addr1_s(arr, 3, stride) += alpha*(*addr1_s(arr, 2, stride) + *addr1_s(arr, 4, stride)); // beta neg2 += beta *(0.f/*[-3]*/ + neg1/*[-1]*/); *addr1_s(arr, 0, stride) += beta *(neg1/*[-1]*/ + *addr1_s(arr, 1, stride)); *addr1_s(arr, 2, stride) += beta *(*addr1_s(arr, 1, stride) + *addr1_s(arr, 3, stride)); // gamma neg1 += gamma*(neg2/*[-2]*/ + *addr1_s(arr, 0, stride)); *addr1_s(arr, 1, stride) += gamma*(*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); // delta *addr1_s(arr, 0, stride) += delta*(neg1/*[-1]*/ + *addr1_s(arr, 1, stride)); // scaling if( scaling > 0) { *addr1_s(arr, 0, stride) *= zeta; } } else { // initially zeros float neg1 = 0.f; // inv-scaling if( scaling < 0 ) { *addr1_s(arr, 0, stride) *= 1/zeta; *addr1_s(arr, 1, stride) *= zeta; *addr1_s(arr, 2, stride) *= 1/zeta; *addr1_s(arr, 3, stride) *= zeta; } // alpha *addr1_s(arr, 0, stride) += alpha*(0.f/*[-1]*/ + *addr1_s(arr, 1, stride)); *addr1_s(arr, 2, stride) += alpha*(*addr1_s(arr, 1, stride) + *addr1_s(arr, 3, stride)); // beta neg1 += beta *(0.f/*[-2]*/ + *addr1_s(arr, 0, stride)); *addr1_s(arr, 1, stride) += beta *(*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); // gamma *addr1_s(arr, 0, stride) += gamma*(*addr1_s(arr, 1, stride) + neg1/*[-1]*/); // delta // none // scaling // none } } } static void accel_lift_op4s_epilog_s( float *arr, int off, int N, float alpha, float beta, float gamma, float delta, float zeta, int scaling) { assert( N-off >= 4 ); for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *arr_local = calc_temp_offset2_s(arr, w, off); if( is_even(N-off) ) { // inv-scaling if( scaling < 0 ) { // TODO } // alpha // none // beta arr_local[N-1] += 2*beta*(arr_local[N-2]); // gamma arr_local[N-2] += gamma*(arr_local[N-1]+arr_local[N-3]); // delta arr_local[N-1] += 2*delta*(arr_local[N-2]); arr_local[N-3] += delta*(arr_local[N-4]+arr_local[N-2]); // scaling if( scaling > 0 ) { // FIXME: this is dependend on "off" arr_local[N-4] *= 1/zeta; arr_local[N-3] *= zeta; arr_local[N-2] *= 1/zeta; arr_local[N-1] *= zeta; } } else /* is_odd(N-off) */ { // inv-scaling if( scaling < 0 ) { arr_local[N-1] *= 1/zeta; } // alpha arr_local[N-1] += 2*alpha*(arr_local[N-2]); // beta arr_local[N-2] += beta*(arr_local[N-1]+arr_local[N-3]); // gamma arr_local[N-1] += 2*gamma*(arr_local[N-2]); arr_local[N-3] += gamma*(arr_local[N-2]+arr_local[N-4]); // delta arr_local[N-2] += delta*(arr_local[N-1]+arr_local[N-3]); arr_local[N-4] += delta*(arr_local[N-5]+arr_local[N-3]); // scaling if( scaling > 0 ) { // FIXME: this is dependend on "off" arr_local[N-5] *= 1/zeta; arr_local[N-4] *= zeta; arr_local[N-3] *= 1/zeta; arr_local[N-2] *= zeta; arr_local[N-1] *= 1/zeta; } } } } static void accel_lift_op4s_epilog_stride_s( float *arr, int off, int N, float alpha, float beta, float gamma, float delta, float zeta, int scaling, int stride ) { assert( N-off >= 4 ); assert( 1 == dwt_util_get_num_workers() ); { if( is_even(N - off) ) { // inv-scaling if( scaling < 0 ) { // TODO } // alpha // none // beta *addr1_s(arr, N-1, stride) += 2*beta*(*addr1_s(arr, N-2, stride)); // gamma *addr1_s(arr, N-2, stride) += gamma*(*addr1_s(arr, N-1, stride) + *addr1_s(arr, N-3, stride)); // delta *addr1_s(arr, N-1, stride) += 2*delta*(*addr1_s(arr, N-2, stride)); *addr1_s(arr, N-3, stride) += delta*(*addr1_s(arr, N-4, stride) + *addr1_s(arr, N-2, stride)); // scaling if( scaling > 0 ) { // FIXME: this is dependend on "off" *addr1_s(arr, N-4, stride) *= 1/zeta; *addr1_s(arr, N-3, stride) *= zeta; *addr1_s(arr, N-2, stride) *= 1/zeta; *addr1_s(arr, N-1, stride) *= zeta; } } else /* is_odd(N-off) */ { // inv-scaling if( scaling < 0 ) { *addr1_s(arr, N-1, stride) *= 1/zeta; } // alpha *addr1_s(arr, N-1, stride) += 2*alpha*(*addr1_s(arr, N-2, stride)); // beta *addr1_s(arr, N-2, stride) += beta*(*addr1_s(arr, N-1, stride) + *addr1_s(arr, N-3, stride)); // gamma *addr1_s(arr, N-1, stride) += 2*gamma*(*addr1_s(arr, N-2, stride)); *addr1_s(arr, N-3, stride) += gamma*(*addr1_s(arr, N-2, stride) + *addr1_s(arr, N-4, stride)); // delta *addr1_s(arr, N-2, stride) += delta*(*addr1_s(arr, N-1, stride) + *addr1_s(arr, N-3, stride)); *addr1_s(arr, N-4, stride) += delta*(*addr1_s(arr, N-5, stride) + *addr1_s(arr, N-3, stride)); // scaling if( scaling > 0 ) { // FIXME: this is dependend on "off" *addr1_s(arr, N-5, stride) *= 1/zeta; *addr1_s(arr, N-4, stride) *= zeta; *addr1_s(arr, N-3, stride) *= 1/zeta; *addr1_s(arr, N-2, stride) *= zeta; *addr1_s(arr, N-1, stride) *= 1/zeta; } } } } // hole static void accel_lift_op4s_epilog_stride_hole_s( float *arr, int off, int N, float alpha, float beta, float gamma, float delta, float zeta, int scaling, int stride ) { assert( N-off >= 4 ); assert( 1 == dwt_util_get_num_workers() ); { if( is_even(N - off) ) { // inv-scaling if( scaling < 0 ) { // TODO } // alpha // none // beta *addr1_s(arr, N-1, stride) += beta *(*addr1_s(arr, N-2, stride) + 0.f); // gamma *addr1_s(arr, N-2, stride) += gamma*(*addr1_s(arr, N-1, stride) + *addr1_s(arr, N-3, stride)); // delta *addr1_s(arr, N-1, stride) += delta*(*addr1_s(arr, N-2, stride) + 0.f); *addr1_s(arr, N-3, stride) += delta*(*addr1_s(arr, N-4, stride) + *addr1_s(arr, N-2, stride)); // scaling if( scaling > 0 ) { // FIXME: this is dependend on "off" *addr1_s(arr, N-4, stride) *= 1/zeta; *addr1_s(arr, N-3, stride) *= zeta; *addr1_s(arr, N-2, stride) *= 1/zeta; *addr1_s(arr, N-1, stride) *= zeta; } } else /* is_odd(N-off) */ { // inv-scaling if( scaling < 0 ) { *addr1_s(arr, N-1, stride) *= 1/zeta; } // alpha *addr1_s(arr, N-1, stride) += alpha*(*addr1_s(arr, N-2, stride) + 0.f); // beta *addr1_s(arr, N-2, stride) += beta *(*addr1_s(arr, N-1, stride) + *addr1_s(arr, N-3, stride)); // gamma *addr1_s(arr, N-1, stride) += gamma*(*addr1_s(arr, N-2, stride) + 0.f); *addr1_s(arr, N-3, stride) += gamma*(*addr1_s(arr, N-2, stride) + *addr1_s(arr, N-4, stride)); // delta *addr1_s(arr, N-2, stride) += delta*(*addr1_s(arr, N-1, stride) + *addr1_s(arr, N-3, stride)); *addr1_s(arr, N-4, stride) += delta*(*addr1_s(arr, N-5, stride) + *addr1_s(arr, N-3, stride)); // scaling if( scaling > 0 ) { // FIXME: this is dependend on "off" *addr1_s(arr, N-5, stride) *= 1/zeta; *addr1_s(arr, N-4, stride) *= zeta; *addr1_s(arr, N-3, stride) *= 1/zeta; *addr1_s(arr, N-2, stride) *= zeta; *addr1_s(arr, N-1, stride) *= 1/zeta; } } } } // zero static void accel_lift_op4s_epilog_stride_zero_s( float *arr, int off, int N, float alpha, float beta, float gamma, float delta, float zeta, int scaling, int stride ) { assert( N-off >= 4 ); assert( 1 == dwt_util_get_num_workers() ); { if( is_even(N - off) ) { // inititally zeros float n0 = 0.f; float n1 = 0.f; // inv-scaling if( scaling < 0 ) { // TODO } // alpha n0 += alpha*(*addr1_s(arr, N-1, stride) + 0.f/*[N+1]*/);; // beta n1 += beta *(n0/*[N-0]*/ + 0.f/*[N+2]*/); *addr1_s(arr, N-1, stride) += beta *(*addr1_s(arr, N-2, stride) + n0/*[N-0]*/); // gamma n0 += gamma*(*addr1_s(arr, N-1, stride) + n1/*[N+1]*/); *addr1_s(arr, N-2, stride) += gamma*(*addr1_s(arr, N-1, stride) + *addr1_s(arr, N-3, stride)); // delta *addr1_s(arr, N-1, stride) += delta*(*addr1_s(arr, N-2, stride) + n0/*[N-0]*/); *addr1_s(arr, N-3, stride) += delta*(*addr1_s(arr, N-4, stride) + *addr1_s(arr, N-2, stride)); // scaling if( scaling > 0 ) { // FIXME: this is dependend on "off" *addr1_s(arr, N-4, stride) *= 1/zeta; *addr1_s(arr, N-3, stride) *= zeta; *addr1_s(arr, N-2, stride) *= 1/zeta; *addr1_s(arr, N-1, stride) *= zeta; } } else /* is_odd(N-off) */ { // inititally zero float n0 = 0.f; // inv-scaling if( scaling < 0 ) { *addr1_s(arr, N-1, stride) *= 1/zeta; } // alpha *addr1_s(arr, N-1, stride) += alpha*(*addr1_s(arr, N-2, stride) + 0.f/*[N+0]*/); // beta n0 += beta *(*addr1_s(arr, N-1, stride) + 0.f/*[N+1]*/); *addr1_s(arr, N-2, stride) += beta *(*addr1_s(arr, N-1, stride) + *addr1_s(arr, N-3, stride)); // gamma *addr1_s(arr, N-1, stride) += gamma*(*addr1_s(arr, N-2, stride) + n0/*[N+0]*/); *addr1_s(arr, N-3, stride) += gamma*(*addr1_s(arr, N-2, stride) + *addr1_s(arr, N-4, stride)); // delta *addr1_s(arr, N-2, stride) += delta*(*addr1_s(arr, N-1, stride) + *addr1_s(arr, N-3, stride)); *addr1_s(arr, N-4, stride) += delta*(*addr1_s(arr, N-5, stride) + *addr1_s(arr, N-3, stride)); // scaling if( scaling > 0 ) { // FIXME: this is dependend on "off" *addr1_s(arr, N-5, stride) *= 1/zeta; *addr1_s(arr, N-4, stride) *= zeta; *addr1_s(arr, N-3, stride) *= 1/zeta; *addr1_s(arr, N-2, stride) *= zeta; *addr1_s(arr, N-1, stride) *= 1/zeta; } } } } /** * @brief Prolog and epilog for N-off < 4. */ static void accel_lift_op4s_short_s( float *arr, int off, int N, float alpha, float beta, float gamma, float delta, float zeta, int scaling ) { assert( N-off < 4 ); for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *arr_local = calc_temp_offset2_s(arr, w, off); if(off) { if( N == 2 ) { // inv-scaling if( scaling < 0 ) { // TODO } // alpha arr_local[1] += 2*alpha*(arr_local[0]); // beta arr_local[0] += 2*beta*(arr_local[1]); // gamma arr_local[1] += 2*gamma*(arr_local[0]); // delta arr_local[0] += 2*delta*(arr_local[1]); // scaling if( scaling > 0 ) { arr_local[0] *= zeta; arr_local[1] *= 1/zeta; } } else if( N == 3 ) { // inv-scaling if( scaling < 0 ) { // TODO } // alpha arr_local[1] += alpha*(arr_local[0]+arr_local[2]); // beta arr_local[0] += 2*beta*(arr_local[1]); arr_local[2] += 2*beta*(arr_local[1]); // gamma arr_local[1] += gamma*(arr_local[0]+arr_local[2]); // delta arr_local[0] += 2*delta*(arr_local[1]); arr_local[2] += 2*delta*(arr_local[1]); // scaling if( scaling > 0 ) { arr_local[0] *= zeta; arr_local[1] *= 1/zeta; arr_local[2] *= zeta; } } else /* N == 4 */ { // inv-scaling if( scaling < 0 ) { // TODO } // alpha arr_local[1] += alpha*(arr_local[0]+arr_local[2]); arr_local[3] += 2*alpha*(arr_local[2]); // beta arr_local[0] += 2*beta*(arr_local[1]); arr_local[2] += beta*(arr_local[1]+arr_local[3]); // gamma arr_local[1] += gamma*(arr_local[0]+arr_local[2]); arr_local[3] += 2*gamma*(arr_local[2]); // delta arr_local[0] += 2*delta*(arr_local[1]); arr_local[2] += delta*(arr_local[1]+arr_local[3]); // scaling if( scaling > 0 ) { arr_local[0] *= zeta; arr_local[1] *= 1/zeta; arr_local[2] *= zeta; arr_local[3] *= 1/zeta; } } } else /* !off */ { if( N == 2 ) { // inv-scaling if( scaling < 0 ) { arr_local[0] *= 1/zeta; arr_local[1] *= zeta; } // alpha arr_local[0] += 2*alpha*(arr_local[1]); // beta arr_local[1] += 2*beta*(arr_local[0]); // gamma arr_local[0] += 2*gamma*(arr_local[1]); // delta arr_local[1] += 2*delta*(arr_local[0]); // scaling if( scaling > 0 ) { // TODO } } else /* N == 3 */ { // inv-scaling if( scaling < 0 ) { arr_local[0] *= 1/zeta; arr_local[1] *= zeta; arr_local[2] *= 1/zeta; } // alpha arr_local[0] += 2*alpha*(arr_local[1]); arr_local[2] += 2*alpha*(arr_local[1]); // beta arr_local[1] += beta*(arr_local[0]+arr_local[2]); // gamma arr_local[0] += 2*gamma*(arr_local[1]); arr_local[2] += 2*gamma*(arr_local[1]); // delta arr_local[1] += delta*(arr_local[0]+arr_local[2]); // scaling if( scaling > 0 ) { // TODO } } } } } static void accel_lift_op4s_short_stride_s( float *arr, int off, int N, float alpha, float beta, float gamma, float delta, float zeta, int scaling, int stride ) { assert( N-off < 4 ); assert( 1 == dwt_util_get_num_workers() ); { if( off ) { if( N == 2 ) { // inv-scaling if( scaling < 0 ) { // TODO } // alpha *addr1_s(arr, 1, stride) += 2*alpha*(*addr1_s(arr, 0, stride)); // beta *addr1_s(arr, 0, stride) += 2*beta*(*addr1_s(arr, 1, stride)); // gamma *addr1_s(arr, 1, stride) += 2*gamma*(*addr1_s(arr, 0, stride)); // delta *addr1_s(arr, 0, stride) += 2*delta*(*addr1_s(arr, 1, stride)); // scaling if( scaling > 0 ) { *addr1_s(arr, 0, stride) *= zeta; *addr1_s(arr, 1, stride) *= 1/zeta; } } else if( N == 3 ) { // inv-scaling if( scaling < 0 ) { // TODO } // alpha *addr1_s(arr, 1, stride) += alpha*(*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); // beta *addr1_s(arr, 0, stride) += 2*beta*(*addr1_s(arr, 1, stride)); *addr1_s(arr, 2, stride) += 2*beta*(*addr1_s(arr, 1, stride)); // gamma *addr1_s(arr, 1, stride) += gamma*(*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); // delta *addr1_s(arr, 0, stride) += 2*delta*(*addr1_s(arr, 1, stride)); *addr1_s(arr, 2, stride) += 2*delta*(*addr1_s(arr, 1, stride)); // scaling if( scaling > 0 ) { *addr1_s(arr, 0, stride) *= zeta; *addr1_s(arr, 1, stride) *= 1/zeta; *addr1_s(arr, 2, stride) *= zeta; } } else /* N == 4 */ { // inv-scaling if( scaling < 0 ) { // TODO } // alpha *addr1_s(arr, 1, stride) += alpha*(*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); *addr1_s(arr, 3, stride) += 2*alpha*(*addr1_s(arr, 2, stride)); // beta *addr1_s(arr, 0, stride) += 2*beta*(*addr1_s(arr, 1, stride)); *addr1_s(arr, 2, stride) += beta*(*addr1_s(arr, 1, stride) + *addr1_s(arr, 3, stride)); // gamma *addr1_s(arr, 1, stride) += gamma*(*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); *addr1_s(arr, 3, stride) += 2*gamma*(*addr1_s(arr, 2, stride)); // delta *addr1_s(arr, 0, stride) += 2*delta*(*addr1_s(arr, 1, stride)); *addr1_s(arr, 2, stride) += delta*(*addr1_s(arr, 1, stride) + *addr1_s(arr, 3, stride)); // scaling if( scaling > 0 ) { *addr1_s(arr, 0, stride) *= zeta; *addr1_s(arr, 1, stride) *= 1/zeta; *addr1_s(arr, 2, stride) *= zeta; *addr1_s(arr, 3, stride) *= 1/zeta; } } } else /* !off */ { if( N == 2 ) { // inv-scaling if( scaling < 0 ) { *addr1_s(arr, 0, stride) *= 1/zeta; *addr1_s(arr, 1, stride) *= zeta; } // alpha *addr1_s(arr, 0, stride) += 2*alpha*(*addr1_s(arr, 1, stride)); // beta *addr1_s(arr, 1, stride) += 2*beta*(*addr1_s(arr, 0, stride)); // gamma *addr1_s(arr, 0, stride) += 2*gamma*(*addr1_s(arr, 1, stride)); // delta *addr1_s(arr, 1, stride) += 2*delta*(*addr1_s(arr, 0, stride)); // scaling if( scaling > 0 ) { // TODO } } else /* N == 3 */ { // inv-scaling if( scaling < 0 ) { *addr1_s(arr, 0, stride) *= 1/zeta; *addr1_s(arr, 1, stride) *= zeta; *addr1_s(arr, 2, stride) *= 1/zeta; } // alpha *addr1_s(arr, 0, stride) += 2*alpha*(*addr1_s(arr, 1, stride)); *addr1_s(arr, 2, stride) += 2*alpha*(*addr1_s(arr, 1, stride)); // beta *addr1_s(arr, 1, stride) += beta*(*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); // gamma *addr1_s(arr, 0, stride) += 2*gamma*(*addr1_s(arr, 1, stride)); *addr1_s(arr, 2, stride) += 2*gamma*(*addr1_s(arr, 1, stride)); // delta *addr1_s(arr, 1, stride) += delta*(*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); // scaling if( scaling > 0 ) { // TODO } } } } } static void accel_lift_op4s_s( float *restrict arr, int off, int len, float alpha, float beta, float gamma, float delta, float zeta, int scaling ) { FUNC_BEGIN; assert( len >= 2 ); assert( 0 == off || 1 == off ); if( len-off < 4 ) { accel_lift_op4s_short_s(arr, off, len, alpha, beta, gamma, delta, zeta, scaling); } else { accel_lift_op4s_prolog_s(arr, off, len, alpha, beta, gamma, delta, zeta, scaling); // FIXME: with GCC use (un)likely, i.e. __builtin_expect if(1 == get_accel_type()) { const int max_inner_len = to_even(BANK_SIZE) - 4; const int inner_len = to_even(len-off) - 4; const int blocks = inner_len / max_inner_len; // full length blocks for(int b = 0; b < blocks; b++) { const int left = off + b * max_inner_len; const int steps = max_inner_len/2; accel_lift_op4s_main_pb_s(&arr[left], steps, alpha, beta, gamma, delta, zeta, scaling); } // last block if( blocks*max_inner_len < inner_len ) { const int left = off + blocks * max_inner_len; const int steps = (off + inner_len - left)/2; // TODO(ASVP): here should be a test if last block should be accelerated on PicoBlaze or rather computed on MicroBlaze if( steps > 25 ) accel_lift_op4s_main_pb_s(&arr[left], steps, alpha, beta, gamma, delta, zeta, scaling); else accel_lift_op4s_main_s(&arr[left], steps, alpha, beta, gamma, delta, zeta, scaling); } } else if(0 == get_accel_type()) { accel_lift_op4s_main_s(arr+off, (to_even(len-off)-4)/2, alpha, beta, gamma, delta, zeta, scaling); } else if(2 == get_accel_type()) { // empty } else if(3 == get_accel_type()) { accel_lift_op4s_main_pb_s(arr+off, (to_even(len-off)-4)/2, alpha, beta, gamma, delta, zeta, scaling); } else if(4 == get_accel_type()) { accel_lift_op4s_main_dl_s(arr+off, (to_even(len-off)-4)/2, alpha, beta, gamma, delta, zeta, scaling); } else if(5 == get_accel_type()) { const int steps = (to_even(len-off)-4)/2; if( steps < 3 ) accel_lift_op4s_main_s(arr+off, steps, alpha, beta, gamma, delta, zeta, scaling); else accel_lift_op4s_main_sdl_ref_s(arr+off, steps, alpha, beta, gamma, delta, zeta, scaling); } else if(6 == get_accel_type()) { const int steps = (to_even(len-off)-4)/2; if( steps < 3 ) accel_lift_op4s_main_s(arr+off, steps, alpha, beta, gamma, delta, zeta, scaling); else accel_lift_op4s_main_sdl2_ref_s(arr+off, steps, alpha, beta, gamma, delta, zeta, scaling); } else if(7 == get_accel_type()) { const int steps = (to_even(len-off)-4)/2; if( steps < 3 ) accel_lift_op4s_main_s(arr+off, steps, alpha, beta, gamma, delta, zeta, scaling); else accel_lift_op4s_main_sdl6_ref_s(arr+off, steps, alpha, beta, gamma, delta, zeta, scaling); } else if(8 == get_accel_type()) { const int steps = (to_even(len-off)-4)/2; if( steps < 3 ) accel_lift_op4s_main_s(arr+off, steps, alpha, beta, gamma, delta, zeta, scaling); else #ifdef __SSE__ accel_lift_op4s_main_sdl2_sse_s(arr+off, steps, alpha, beta, gamma, delta, zeta, scaling); #else accel_lift_op4s_main_sdl2_ref_s(arr+off, steps, alpha, beta, gamma, delta, zeta, scaling); #endif } else if(9 == get_accel_type()) { const int steps = (to_even(len-off)-4)/2; if( steps < 3 ) accel_lift_op4s_main_s(arr+off, steps, alpha, beta, gamma, delta, zeta, scaling); else { #ifdef __SSE__ accel_lift_op4s_main_sdl6_sse_s(arr+off, steps, alpha, beta, gamma, delta, zeta, scaling); #else accel_lift_op4s_main_sdl6_ref_s(arr+off, steps, alpha, beta, gamma, delta, zeta, scaling); #endif } } else if(10 == get_accel_type()) { // FIXME: this needs to be threated inside of caller if( 4 != dwt_util_get_num_workers() ) accel_lift_op4s_main_s(arr+off, (to_even(len-off)-4)/2, alpha, beta, gamma, delta, zeta, scaling); else accel_lift_op4s_main_dl4_s(arr+off, (to_even(len-off)-4)/2, alpha, beta, gamma, delta, zeta, scaling); } else if(11 == get_accel_type()) { // FIXME: this needs to be threated inside of caller if( 4 != dwt_util_get_num_workers() ) accel_lift_op4s_main_s(arr+off, (to_even(len-off)-4)/2, alpha, beta, gamma, delta, zeta, scaling); else #ifdef __SSE__ accel_lift_op4s_main_dl4_sse_s(arr+off, (to_even(len-off)-4)/2, alpha, beta, gamma, delta, zeta, scaling); #else accel_lift_op4s_main_dl4_s(arr+off, (to_even(len-off)-4)/2, alpha, beta, gamma, delta, zeta, scaling); #endif } else if(12 == get_accel_type()) { // FIXME: this needs to be threated inside of caller if( 4 != dwt_util_get_num_workers() ) { accel_lift_op4s_main_s(arr+off, (to_even(len-off)-4)/2, alpha, beta, gamma, delta, zeta, scaling); } else { #ifdef __SSE__ accel_lift_op4s_main_ml4_s(arr+off, (to_even(len-off)-4)/2, alpha, beta, gamma, delta, zeta, scaling); #else accel_lift_op4s_main_s(arr+off, (to_even(len-off)-4)/2, alpha, beta, gamma, delta, zeta, scaling); #endif } } else if(13 == get_accel_type()) { accel_lift_op4s_main_nosse_s(arr+off, (to_even(len-off)-4)/2, alpha, beta, gamma, delta, zeta, scaling); } else if(14 == get_accel_type()) { accel_lift_op4s_main_dl_nosse_s(arr+off, (to_even(len-off)-4)/2, alpha, beta, gamma, delta, zeta, scaling); } else if(15 == get_accel_type()) { accel_lift_op4s_main_dl4line_s(arr+off, (to_even(len-off)-4)/2, alpha, beta, gamma, delta, zeta, scaling); } else if(16 == get_accel_type()) { #ifdef __SSE__ accel_lift_op4s_main_dl4line_sse_s(arr+off, (to_even(len-off)-4)/2, alpha, beta, gamma, delta, zeta, scaling); #else accel_lift_op4s_main_dl4line_s(arr+off, (to_even(len-off)-4)/2, alpha, beta, gamma, delta, zeta, scaling); #endif } else { dwt_util_log(LOG_ERR, "Unsupported value of acceleration.\n"); dwt_util_abort(); } accel_lift_op4s_epilog_s(arr, off, len, alpha, beta, gamma, delta, zeta, scaling); } FUNC_END; } void dwt_cdf97_f_ex_stride_s( const float *src, float *dst_l, float *dst_h, float *tmp, int N, int stride) { assert( N >= 0 && NULL != src && NULL != dst_l && NULL != dst_h && NULL != tmp && 0 != stride ); const int offset = 1; // fix for small N if(N < 2) { if(1 == N) dst_l[0] = src[0] * dwt_cdf97_s1_s; return; } // copy src into tmp for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *tmp_local = calc_temp_offset2_s(tmp, w, offset); const float *src_local = calc_data_offset_const_s(src, w); #ifndef DISABLE_MEMCPY dwt_util_memcpy_stride_s(tmp_local, sizeof(float), src_local, stride, N); #endif #ifdef ENABLE_LAZY_MEMCPY // FIXME: copy only if column is processed if( sizeof(float) != stride ) dwt_util_memcpy_stride_s(tmp_local, sizeof(float), src_local, stride, N); #endif } accel_lift_op4s_s(tmp, offset, N, -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, +1); // copy tmp into dst for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *tmp_local = calc_temp_offset2_s(tmp, w, offset); float *dst_l_local = calc_data_offset_s(dst_l, w); float *dst_h_local = calc_data_offset_s(dst_h, w); #ifndef DISABLE_MEMCPY dwt_util_memcpy_stride_s(dst_l_local, stride, tmp_local+0, 2*sizeof(float), ceil_div2(N)); dwt_util_memcpy_stride_s(dst_h_local, stride, tmp_local+1, 2*sizeof(float), floor_div2(N)); #endif #ifdef ENABLE_LAZY_MEMCPY // FIXME: copy only if column is processed; keep L and H subbands interleaved const float *src_local = calc_data_offset_const_s(src, w); if( sizeof(float) != stride ) dwt_util_memcpy_stride_s(src_local, stride, tmp_local, sizeof(float), N); #endif } } static void dwt_cdf97_f_ex_stride_inplace_part_exceptions_s( float *ptr, int N, int stride ) { const int offset = 1; if( N < 2 ) { // respect stride if( 1 == N ) ptr[0] *= dwt_cdf97_s1_s; return; } else { accel_lift_op4s_short_stride_s(ptr, offset, N, -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, +1, stride); } } static void dwt_cdf97_f_ex_stride_inplace_part_prolog_s( float *ptr, int N, int stride ) { const int offset = 1; accel_lift_op4s_prolog_stride_s(ptr, offset, N, -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, +1, stride); } static void dwt_cdf97_f_ex_stride_inplace_part_core_s( float *ptr, int N, int stride ) { const int offset = 1; #if 0 accel_lift_op4s_fwd_main_stride_s(addr1_s(ptr, offset, stride), (to_even(N-offset)-4)/2, -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, +1, stride); #endif #if 1 accel_lift_op4s_fwd_main_dl_stride_s(addr1_s(ptr, offset, stride), (to_even(N-offset)-4)/2, -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, +1, stride); #endif #if 0 accel_lift_op4s_fwd_main_sdl_stride_ref_s(addr1_s(ptr, offset, stride), (to_even(N-offset)-4)/2, -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, +1, stride); #endif } static void dwt_cdf97_f_ex_stride_inplace_part_core_sdl_s( float *ptr, int N, int stride ) { const int offset = 1; accel_lift_op4s_fwd_main_sdl_stride_ref_s(addr1_s(ptr, offset, stride), (to_even(N-offset)-4)/2, -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, +1, stride); } #ifdef __SSE__ static void dwt_cdf97_f_ex_stride_inplace_part_core_sdl_sse_s( float *ptr, int N, int stride ) { const int offset = 1; accel_lift_op4s_fwd_main_sdl_stride_sse_s(addr1_s(ptr, offset, stride), (to_even(N-offset)-4)/2, -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, +1, stride); } #endif static void dwt_cdf97_f_ex_stride_inplace_part_epilog_s( float *ptr, int N, int stride ) { const int offset = 1; accel_lift_op4s_epilog_stride_s(ptr, offset, N, -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, +1, stride); } /* * CDF 9/7 (4,4) * http://www.ece.uvic.ca/~frodo/publications/phdthesis.pdf * * 9/7-F, p. 88, 89 * * [24] M. Antonini, M. Barlaud, P. Mathieu, and I. Daubechies. Image coding using wavelet transform. IEEE Trans. on Image Processing, 1(2):205–220, April 1992. * * [40] A. R. Calderbank, I. Daubechies,W. Sweldens, and B.-L. Yeo. Wavelet transforms that map integers to integers. Applied and Computational Harmonic Analysis, 5(3):332–369, July 1998. */ void dwt_cdf97_f_ex_stride_i( const int *src, int *dst_l, int *dst_h, int *tmp, int N, int stride) { assert( N >= 0 && NULL != src && NULL != dst_l && NULL != dst_h && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) return; // copy src into tmp dwt_util_memcpy_stride_i(tmp, sizeof(int), src, stride, N); // predict 1 + update 1 for(int i=1; i<N-2+(N&1); i+=2) tmp[i] -= ( +203*(tmp[i-1]+tmp[i+1]) - (1<<6) ) >> 7; if(is_odd(N)) tmp[N-1] += ( -217*(tmp[N-2]+tmp[N-2]) + (1<<11) ) >> 12; else tmp[N-1] -= ( +203*(tmp[N-2]+tmp[N-2]) - (1<<6) ) >> 7; tmp[0] += ( -217*(tmp[1]+tmp[1]) + (1<<11) ) >> 12; for(int i=2; i<N-(N&1); i+=2) tmp[i] += ( -217*(tmp[i-1]+tmp[i+1]) + (1<<11) ) >> 12; // predict 2 + update 2 for(int i=1; i<N-2+(N&1); i+=2) tmp[i] -= ( -113*(tmp[i-1]+tmp[i+1]) - (1<<6) ) >> 7; if(is_odd(N)) tmp[N-1] += ( 1817*(tmp[N-2]+tmp[N-2]) + (1<<11) ) >> 12; else tmp[N-1] -= ( -113*(tmp[N-2]+tmp[N-2]) - (1<<6) ) >> 7; tmp[0] += ( 1817*(tmp[1]+tmp[1]) + (1<<11) ) >> 12; for(int i=2; i<N-(N&1); i+=2) tmp[i] += ( 1817*(tmp[i-1]+tmp[i+1]) + (1<<11) ) >> 12; // copy tmp into dst dwt_util_memcpy_stride_i(dst_l, stride, tmp+0, 2*sizeof(int), ceil_div2(N)); dwt_util_memcpy_stride_i(dst_h, stride, tmp+1, 2*sizeof(int), floor_div2(N)); } // http://www.ece.uvic.ca/~frodo/publications/phdthesis.pdf void dwt_cdf53_f_ex_stride_i( const int *src, int *dst_l, int *dst_h, int *tmp, int N, int stride) { assert( N >= 0 && NULL != src && NULL != dst_l && NULL != dst_h && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) return; // copy src into tmp dwt_util_memcpy_stride_i(tmp, sizeof(int), src, stride, N); // predict 1 + update 1 for(int i=1; i<N-2+(N&1); i+=2) tmp[i] -= (tmp[i-1] + tmp[i+1]) >> 1; if(is_odd(N)) tmp[N-1] += (tmp[N-2] + 1) >> 1; else tmp[N-1] -= tmp[N-2]; tmp[0] += (tmp[1] + 1) >> 1; for(int i=2; i<N-(N&1); i+=2) tmp[i] += ( (tmp[i-1] + tmp[i+1]) + 2 ) >> 2; // copy tmp into dst dwt_util_memcpy_stride_i(dst_l, stride, tmp+0, 2*sizeof(int), ceil_div2(N)); dwt_util_memcpy_stride_i(dst_h, stride, tmp+1, 2*sizeof(int), floor_div2(N)); } void dwt_cdf53_f_ex_stride_s( const float *src, float *dst_l, float *dst_h, float *tmp, int N, int stride) { assert( N >= 0 && NULL != src && NULL != dst_l && NULL != dst_h && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) { if(1 == N) dst_l[0] = src[0] * dwt_cdf53_s1_s; return; } // copy src into tmp dwt_util_memcpy_stride_s(tmp, sizeof(float), src, stride, N); // predict 1 + update 1 for(int i=1; i<N-2+(N&1); i+=2) tmp[i] -= dwt_cdf53_p1_s * (tmp[i-1] + tmp[i+1]); if(is_odd(N)) tmp[N-1] += 2 * dwt_cdf53_u1_s * tmp[N-2]; else tmp[N-1] -= 2 * dwt_cdf53_p1_s * tmp[N-2]; tmp[0] += 2 * dwt_cdf53_u1_s * tmp[1]; for(int i=2; i<N-(N&1); i+=2) tmp[i] += dwt_cdf53_u1_s * (tmp[i-1] + tmp[i+1]); // scale for(int i=0; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s1_s; for(int i=1; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s2_s; // copy tmp into dst dwt_util_memcpy_stride_s(dst_l, stride, tmp+0, 2*sizeof(float), ceil_div2(N)); dwt_util_memcpy_stride_s(dst_h, stride, tmp+1, 2*sizeof(float), floor_div2(N)); } void dwt_cdf53_f_ex_stride_inplace_s( float *tmp, int N, int stride ) { assert( N >= 0 && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) { if(1 == N) *addr1_s(tmp,0,stride) *= dwt_cdf53_s1_s; return; } // predict 1 + update 1 for(int i=1; i<N-2+(N&1); i+=2) *addr1_s(tmp,i,stride) -= dwt_cdf53_p1_s * (*addr1_s(tmp,i-1,stride) + *addr1_s(tmp,i+1,stride)); if(is_odd(N)) *addr1_s(tmp,N-1,stride) += 2 * dwt_cdf53_u1_s * *addr1_s(tmp,N-2,stride); else *addr1_s(tmp,N-1,stride) -= 2 * dwt_cdf53_p1_s * *addr1_s(tmp,N-2,stride); *addr1_s(tmp,0,stride) += 2 * dwt_cdf53_u1_s * *addr1_s(tmp,1,stride); for(int i=2; i<N-(N&1); i+=2) *addr1_s(tmp,i,stride) += dwt_cdf53_u1_s * (*addr1_s(tmp,i-1,stride) + *addr1_s(tmp,i+1,stride)); // scale for(int i=0; i<N; i+=2) *addr1_s(tmp,i,stride) *= dwt_cdf53_s1_s; for(int i=1; i<N; i+=2) *addr1_s(tmp,i,stride) *= dwt_cdf53_s2_s; } static float dwt_eaw_w(float n, float m, float alpha) { const float eps = 1.0e-5f; return 1.f / (powf(fabsf(n-m), alpha) + eps); } static void dwt_calc_eaw_w(float *w, float *arr, int N, float alpha) { for(int i = 0; i < N-1; i++) { w[i] = dwt_eaw_w(arr[i], arr[i+1], alpha); } w[N-1] = 0.f; // not necessary } static void dwt_calc_eaw_w_stride_s( float *w, float *arr, int N, int stride, float alpha ) { for(int i = 0; i < N-1; i++) { w[i] = dwt_eaw_w( *addr1_s(arr,i,stride), *addr1_s(arr,i+1,stride), alpha); } w[N-1] = 0.f; // not necessary } // http://www.cs.huji.ac.il/~raananf/projects/eaw/ // TODO: move calculation of weights outside of this function void dwt_eaw53_f_ex_stride_s( const float *src, float *dst_l, float *dst_h, float *tmp, int N, int stride, float *w, // float w[N] float alpha ) { assert( N >= 0 && NULL != src && NULL != dst_l && NULL != dst_h && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) { if(1 == N) dst_l[0] = src[0] * dwt_cdf53_s1_s; return; } // copy src into tmp dwt_util_memcpy_stride_s(tmp, sizeof(float), src, stride, N); // FIXME: move outside dwt_calc_eaw_w(w, tmp, N, alpha); // predict 1 + update 1 for(int i=1; i<N-2+(N&1); i+=2) { float wL = w[i-1]; float wR = w[i+0]; tmp[i] -= (wL * tmp[i-1] + wR * tmp[i+1]) / (wL+wR); } if( is_odd(N) ) { float wL = w[N-2]; float wR = w[N-2]; tmp[N-1] += (wL * tmp[N-2] + wR * tmp[N-2]) / ( 2.f * (wL+wR) ); } else { float wL = w[N-2]; float wR = w[N-2]; tmp[N-1] -= (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR); } { float wL = w[0]; float wR = w[0]; tmp[0] += (wL * tmp[1] + wR * tmp[1]) / ( 2.f * (wL+wR) ); } for(int i=2; i<N-(N&1); i+=2) { float wL = w[i-1]; float wR = w[i+0]; tmp[i] += (wL * tmp[i-1] + wR * tmp[i+1]) / ( 2.f * (wL+wR) ); } // scale for(int i=0; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s1_s; for(int i=1; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s2_s; // copy tmp into dst dwt_util_memcpy_stride_s(dst_l, stride, tmp+0, 2*sizeof(float), ceil_div2(N)); dwt_util_memcpy_stride_s(dst_h, stride, tmp+1, 2*sizeof(float), floor_div2(N)); } void dwt_eaw53_f_ex_stride_inplace_s( float *tmp, int N, int stride, float *w, // float w[N] float alpha ) { assert( N >= 0 && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) { if(1 == N) *addr1_s(tmp, 0, stride) *= dwt_cdf53_s1_s; return; } // calc weights dwt_calc_eaw_w_stride_s(w, tmp, N, stride, alpha); // predict 1 + update 1 for(int i=1; i<N-2+(N&1); i+=2) { float wL = w[i-1]; float wR = w[i+0]; *addr1_s(tmp, i, stride) -= (wL * *addr1_s(tmp, i-1, stride) + wR * *addr1_s(tmp, i+1, stride)) / (wL+wR); } if( is_odd(N) ) { float wL = w[N-2]; float wR = w[N-2]; *addr1_s(tmp, N-1, stride) += (wL * *addr1_s(tmp, N-2, stride) + wR * *addr1_s(tmp, N-2, stride)) / ( 2.f * (wL+wR) ); } else { float wL = w[N-2]; float wR = w[N-2]; *addr1_s(tmp, N-1, stride) -= (wL * *addr1_s(tmp, N-2, stride) + wR * *addr1_s(tmp, N-2, stride)) / (wL+wR); } { float wL = w[0]; float wR = w[0]; *addr1_s(tmp, 0, stride) += (wL * *addr1_s(tmp, 1, stride) + wR * *addr1_s(tmp, 1, stride)) / ( 2.f * (wL+wR) ); } for(int i=2; i<N-(N&1); i+=2) { float wL = w[i-1]; float wR = w[i+0]; *addr1_s(tmp, i, stride) += (wL * *addr1_s(tmp, i-1, stride) + wR * *addr1_s(tmp, i+1, stride)) / ( 2.f * (wL+wR) ); } // scale for(int i=0; i<N; i+=2) *addr1_s(tmp, i, stride) *= dwt_cdf53_s1_s; for(int i=1; i<N; i+=2) *addr1_s(tmp, i, stride) *= dwt_cdf53_s2_s; } // TODO: interpolating version of CDF 5/3 // FIXME: fix scaling void dwt_interp53_f_ex_stride_s( const float *src, float *dst_l, float *dst_h, float *tmp, int N, int stride) { assert( N >= 0 && NULL != src && NULL != dst_l && NULL != dst_h && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) { if(1 == N) dst_l[0] = src[0] * dwt_cdf53_s1_s; return; } // copy src into tmp dwt_util_memcpy_stride_s(tmp, sizeof(float), src, stride, N); // predict 1 + update 1 for(int i=1; i<N-2+(N&1); i+=2) tmp[i] -= dwt_cdf53_p1_s * (tmp[i-1] + tmp[i+1]); if(is_odd(N)) ; else tmp[N-1] -= 2 * dwt_cdf53_p1_s * tmp[N-2]; // scale for(int i=0; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s1_s; for(int i=1; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s2_s; // copy tmp into dst dwt_util_memcpy_stride_s(dst_l, stride, tmp+0, 2*sizeof(float), ceil_div2(N)); dwt_util_memcpy_stride_s(dst_h, stride, tmp+1, 2*sizeof(float), floor_div2(N)); } // TODO: implement for N < 4 void dwt_interp2_f_ex_stride_s( const float *src, float *dst_l, float *dst_h, float *tmp, int N, int stride) { assert( N >= 0 && NULL != src && NULL != dst_l && NULL != dst_h && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) { if(1 == N) dst_l[0] = src[0] * dwt_cdf53_s1_s; return; } if(N < 4) { dwt_util_log(LOG_WARN, "not implemented\n"); // FIXME return; } // copy src into tmp dwt_util_memcpy_stride_s(tmp, sizeof(float), src, stride, N); // predict 1 + update 1 for(int i=1+2; i<N-2+(N&1)-2; i+=2) tmp[i] -= 0.1f*tmp[i-3] + 0.4f*tmp[i-1] + 0.4f*tmp[i+1] + 0.1f*tmp[i+3]; if( is_even(N) ) tmp[N-1] -= 2*0.1f*tmp[N-5] + 2*0.4f*tmp[N-2]; // scale for(int i=0; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s1_s; for(int i=1; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s2_s; // copy tmp into dst dwt_util_memcpy_stride_s(dst_l, stride, tmp+0, 2*sizeof(float), ceil_div2(N)); dwt_util_memcpy_stride_s(dst_h, stride, tmp+1, 2*sizeof(float), floor_div2(N)); } void dwt_cdf97_i_ex_d( const double *src_l, const double *src_h, double *dst, double *tmp, int N) { dwt_cdf97_i_ex_stride_d( src_l, src_h, dst, tmp, N, sizeof(double) ); } void dwt_cdf53_i_ex_d( const double *src_l, const double *src_h, double *dst, double *tmp, int N) { dwt_cdf53_i_ex_stride_d( src_l, src_h, dst, tmp, N, sizeof(double) ); } void dwt_cdf97_i_ex_s( const float *src_l, const float *src_h, float *dst, float *tmp, int N) { dwt_cdf97_i_ex_stride_s( src_l, src_h, dst, tmp, N, sizeof(float) ); } void dwt_cdf53_i_ex_i( const int *src_l, const int *src_h, int *dst, int *tmp, int N) { dwt_cdf53_i_ex_stride_i( src_l, src_h, dst, tmp, N, sizeof(int) ); } void dwt_cdf53_i_ex_s( const float *src_l, const float *src_h, float *dst, float *tmp, int N) { dwt_cdf53_i_ex_stride_s( src_l, src_h, dst, tmp, N, sizeof(float) ); } void dwt_cdf97_i_ex_stride_d( const double *src_l, const double *src_h, double *dst, double *tmp, int N, int stride) { assert( N >= 0 && NULL != src_l && NULL != src_h && NULL != dst && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) { if(1 == N) dst[0] = src_l[0] * dwt_cdf97_s2_d; return; } // copy src into tmp dwt_util_memcpy_stride_d(tmp+0, 2*sizeof(double), src_l, stride, ceil_div2(N)); dwt_util_memcpy_stride_d(tmp+1, 2*sizeof(double), src_h, stride, floor_div2(N)); // inverse scale for(int i=0; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf97_s2_d; for(int i=1; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf97_s1_d; // backward update 2 + backward predict 2 for(int i=2; i<N-(N&1); i+=2) tmp[i] -= dwt_cdf97_u2_d * (tmp[i-1] + tmp[i+1]); tmp[0] -= 2 * dwt_cdf97_u2_d * tmp[1]; if(is_odd(N)) tmp[N-1] -= 2 * dwt_cdf97_u2_d * tmp[N-2]; else tmp[N-1] += 2 * dwt_cdf97_p2_d * tmp[N-2]; for(int i=1; i<N-2+(N&1); i+=2) tmp[i] += dwt_cdf97_p2_d * (tmp[i-1] + tmp[i+1]); // backward update 1 + backward predict 1 for(int i=2; i<N-(N&1); i+=2) tmp[i] -= dwt_cdf97_u1_d * (tmp[i-1] + tmp[i+1]); tmp[0] -= 2 * dwt_cdf97_u1_d * tmp[1]; if(is_odd(N)) tmp[N-1] -= 2 * dwt_cdf97_u1_d * tmp[N-2]; else tmp[N-1] += 2 * dwt_cdf97_p1_d * tmp[N-2]; for(int i=1; i<N-2+(N&1); i+=2) tmp[i] += dwt_cdf97_p1_d * (tmp[i-1] + tmp[i+1]); // copy tmp into dst dwt_util_memcpy_stride_d(dst, stride, tmp, sizeof(double), N); } void dwt_cdf53_i_ex_stride_d( const double *src_l, const double *src_h, double *dst, double *tmp, int N, int stride) { assert( N >= 0 && NULL != src_l && NULL != src_h && NULL != dst && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) { if(1 == N) dst[0] = src_l[0] * dwt_cdf53_s2_d; return; } // copy src into tmp dwt_util_memcpy_stride_d(tmp+0, 2*sizeof(double), src_l, stride, ceil_div2(N)); dwt_util_memcpy_stride_d(tmp+1, 2*sizeof(double), src_h, stride, floor_div2(N)); // inverse scale for(int i=0; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s2_d; for(int i=1; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s1_d; // backward update 1 + backward predict 1 for(int i=2; i<N-(N&1); i+=2) tmp[i] -= dwt_cdf53_u1_d * (tmp[i-1] + tmp[i+1]); tmp[0] -= 2 * dwt_cdf53_u1_d * tmp[1]; if(is_odd(N)) tmp[N-1] -= 2 * dwt_cdf53_u1_d * tmp[N-2]; else tmp[N-1] += 2 * dwt_cdf53_p1_d * tmp[N-2]; for(int i=1; i<N-2+(N&1); i+=2) tmp[i] += dwt_cdf53_p1_d * (tmp[i-1] + tmp[i+1]); // copy tmp into dst dwt_util_memcpy_stride_d(dst, stride, tmp, sizeof(double), N); } void dwt_cdf97_i_ex_stride_s( const float *src_l, const float *src_h, float *dst, float *tmp, int N, int stride) { assert( N >= 0 && NULL != src_l && NULL != src_h && NULL != dst && NULL != tmp && 0 != stride ); const int offset = 0; // fix for small N if(N < 2) { if(1 == N) dst[0] = src_l[0] * dwt_cdf97_s2_s; // FIXME: 1/zeta return; } // copy src into tmp for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *tmp_local = calc_temp_offset2_s(tmp, w, 0); const float *src_l_local = calc_data_offset_const_s(src_l, w); const float *src_h_local = calc_data_offset_const_s(src_h, w); dwt_util_memcpy_stride_s(tmp_local+0, 2*sizeof(float), src_l_local, stride, ceil_div2(N)); dwt_util_memcpy_stride_s(tmp_local+1, 2*sizeof(float), src_h_local, stride, floor_div2(N)); } accel_lift_op4s_s(tmp, offset, N, -dwt_cdf97_u2_s, dwt_cdf97_p2_s, -dwt_cdf97_u1_s, dwt_cdf97_p1_s, dwt_cdf97_s1_s, -1); // copy tmp into dst for(int w = 0; w < dwt_util_get_num_workers(); w++) { float *tmp_local = calc_temp_offset2_s(tmp, w, 0); float *dst_local = calc_data_offset_s(dst, w); dwt_util_memcpy_stride_s(dst_local, stride, tmp_local, sizeof(float), N); } } static void dwt_cdf97_i_ex_stride_inplace_part_exceptions_s( float *ptr, int N, int stride ) { const int offset = 0; if( N < 2 ) { // respect stride if( 1 == N ) ptr[0] *= dwt_cdf97_s2_s; return; } else { accel_lift_op4s_short_stride_s(ptr, offset, N,-dwt_cdf97_u2_s, dwt_cdf97_p2_s, -dwt_cdf97_u1_s, dwt_cdf97_p1_s, dwt_cdf97_s1_s, -1, stride); } } static void dwt_cdf97_i_ex_stride_inplace_part_prolog_s( float *ptr, int N, int stride ) { const int offset = 0; accel_lift_op4s_prolog_stride_s(ptr, offset, N, -dwt_cdf97_u2_s, dwt_cdf97_p2_s, -dwt_cdf97_u1_s, dwt_cdf97_p1_s, dwt_cdf97_s1_s, -1, stride); } // hole static void dwt_cdf97_i_ex_stride_inplace_part_prolog_hole_s( float *ptr, int N, int stride ) { const int offset = 0; accel_lift_op4s_prolog_stride_hole_s(ptr, offset, N, -dwt_cdf97_u2_s, dwt_cdf97_p2_s, -dwt_cdf97_u1_s, dwt_cdf97_p1_s, dwt_cdf97_s1_s, -1, stride); } // zero static void dwt_cdf97_i_ex_stride_inplace_part_prolog_zero_s( float *ptr, int N, int stride ) { const int offset = 0; accel_lift_op4s_prolog_stride_zero_s(ptr, offset, N, -dwt_cdf97_u2_s, dwt_cdf97_p2_s, -dwt_cdf97_u1_s, dwt_cdf97_p1_s, dwt_cdf97_s1_s, -1, stride); } static void dwt_cdf97_i_ex_stride_inplace_part_core_s( float *ptr, int N, int stride ) { const int offset = 0; #if 0 accel_lift_op4s_inv_main_stride_s(addr1_s(ptr, offset, stride), (to_even(N-offset)-4)/2, -dwt_cdf97_u2_s, dwt_cdf97_p2_s, -dwt_cdf97_u1_s, dwt_cdf97_p1_s, dwt_cdf97_s1_s, -1, stride); #else accel_lift_op4s_inv_main_dl_stride_s(addr1_s(ptr, offset, stride), (to_even(N-offset)-4)/2, -dwt_cdf97_u2_s, dwt_cdf97_p2_s, -dwt_cdf97_u1_s, dwt_cdf97_p1_s, dwt_cdf97_s1_s, -1, stride); #endif } static void dwt_cdf53_i_ex_stride_inplace_part_core_s( float *ptr, int N, int stride ) { const int offset = 0; accel_lift_op2s_inv_main_stride_s(addr1_s(ptr, offset, stride), (to_even(N-offset)-2)/2, -dwt_cdf53_u1_s, dwt_cdf53_p1_s, dwt_cdf53_s1_s, -1, stride); } static void dwt_cdf97_i_ex_stride_inplace_part_epilog_s( float *ptr, int N, int stride ) { const int offset = 0; accel_lift_op4s_epilog_stride_s(ptr, offset, N, -dwt_cdf97_u2_s, dwt_cdf97_p2_s, -dwt_cdf97_u1_s, dwt_cdf97_p1_s, dwt_cdf97_s1_s, -1, stride); } // hole static void dwt_cdf97_i_ex_stride_inplace_part_epilog_hole_s( float *ptr, int N, int stride ) { const int offset = 0; accel_lift_op4s_epilog_stride_hole_s(ptr, offset, N, -dwt_cdf97_u2_s, dwt_cdf97_p2_s, -dwt_cdf97_u1_s, dwt_cdf97_p1_s, dwt_cdf97_s1_s, -1, stride); } // zero static void dwt_cdf97_i_ex_stride_inplace_part_epilog_zero_s( float *ptr, int N, int stride ) { const int offset = 0; accel_lift_op4s_epilog_stride_zero_s(ptr, offset, N, -dwt_cdf97_u2_s, dwt_cdf97_p2_s, -dwt_cdf97_u1_s, dwt_cdf97_p1_s, dwt_cdf97_s1_s, -1, stride); } void dwt_cdf97_i_ex_stride_i( const int *src_l, const int *src_h, int *dst, int *tmp, int N, int stride) { assert( N >= 0 && NULL != src_l && NULL != src_h && NULL != dst && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) return; // copy src into tmp dwt_util_memcpy_stride_i(tmp+0, 2*sizeof(int), src_l, stride, ceil_div2(N)); dwt_util_memcpy_stride_i(tmp+1, 2*sizeof(int), src_h, stride, floor_div2(N)); // backward update 2 + backward predict 2 for(int i=2; i<N-(N&1); i+=2) tmp[i] -= ( 1817*(tmp[i-1]+tmp[i+1]) + (1<<11) ) >> 12; tmp[0] -= ( 1817*(tmp[1]+tmp[1]) + (1<<11) ) >> 12; if(is_odd(N)) tmp[N-1] -= ( 1817*(tmp[N-2]+tmp[N-2]) + (1<<11) ) >> 12; else tmp[N-1] += ( -113*(tmp[N-2]+tmp[N-2]) - (1<<6) ) >> 7; for(int i=1; i<N-2+(N&1); i+=2) tmp[i] += ( -113*(tmp[i-1]+tmp[i+1]) - (1<<6) ) >> 7; // backward update 1 + backward predict 1 for(int i=2; i<N-(N&1); i+=2) tmp[i] -= ( -217*(tmp[i-1]+tmp[i+1]) + (1<<11) ) >> 12; tmp[0] -= ( -217*(tmp[1]+tmp[1]) + (1<<11) ) >> 12; if(is_odd(N)) tmp[N-1] -= ( -217*(tmp[N-2]+tmp[N-2]) + (1<<11) ) >> 12; else tmp[N-1] += ( +203*(tmp[N-2]+tmp[N-2]) - (1<<6) ) >> 7; for(int i=1; i<N-2+(N&1); i+=2) tmp[i] += ( +203*(tmp[i-1]+tmp[i+1]) - (1<<6) ) >> 7; // copy tmp into dst dwt_util_memcpy_stride_i(dst, stride, tmp, sizeof(int), N); } void dwt_cdf53_i_ex_stride_i( const int *src_l, const int *src_h, int *dst, int *tmp, int N, int stride) { assert( N >= 0 && NULL != src_l && NULL != src_h && NULL != dst && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) return; // copy src into tmp dwt_util_memcpy_stride_i(tmp+0, 2*sizeof(int), src_l, stride, ceil_div2(N)); dwt_util_memcpy_stride_i(tmp+1, 2*sizeof(int), src_h, stride, floor_div2(N)); // backward update 1 + backward predict 1 for(int i=2; i<N-(N&1); i+=2) tmp[i] -= ( (tmp[i-1] + tmp[i+1]) + 2 ) >> 2; tmp[0] -= (tmp[1] + 1) >> 1; if(is_odd(N)) tmp[N-1] -= (tmp[N-2] + 1) >> 1; else tmp[N-1] += tmp[N-2]; for(int i=1; i<N-2+(N&1); i+=2) tmp[i] += ( tmp[i-1] + tmp[i+1] ) >> 1; // copy tmp into dst dwt_util_memcpy_stride_i(dst, stride, tmp, sizeof(int), N); } void dwt_cdf53_i_ex_stride_s( const float *src_l, const float *src_h, float *dst, float *tmp, int N, int stride) { assert( N >= 0 && NULL != src_l && NULL != src_h && NULL != dst && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) { if(1 == N) dst[0] = src_l[0] * dwt_cdf53_s2_s; return; } // copy src into tmp dwt_util_memcpy_stride_s(tmp+0, 2*sizeof(float), src_l, stride, ceil_div2(N)); dwt_util_memcpy_stride_s(tmp+1, 2*sizeof(float), src_h, stride, floor_div2(N)); // inverse scale for(int i=0; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s2_s; for(int i=1; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s1_s; // backward update 1 + backward predict 1 for(int i=2; i<N-(N&1); i+=2) tmp[i] -= dwt_cdf53_u1_s * (tmp[i-1] + tmp[i+1]); tmp[0] -= 2 * dwt_cdf53_u1_s * tmp[1]; if(is_odd(N)) tmp[N-1] -= 2 * dwt_cdf53_u1_s * tmp[N-2]; else tmp[N-1] += 2 * dwt_cdf53_p1_s * tmp[N-2]; for(int i=1; i<N-2+(N&1); i+=2) tmp[i] += dwt_cdf53_p1_s * (tmp[i-1] + tmp[i+1]); // copy tmp into dst dwt_util_memcpy_stride_s(dst, stride, tmp, sizeof(float), N); } void dwt_cdf53_i_ex_stride_inplace_s( float *tmp, int N, int stride ) { assert( N >= 0 && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) { if(1 == N) *addr1_s(tmp, 0, stride) *= dwt_cdf53_s2_s; return; } // inverse scale for(int i=0; i<N; i+=2) *addr1_s(tmp, i, stride) *= dwt_cdf53_s2_s; for(int i=1; i<N; i+=2) *addr1_s(tmp, i, stride) *= dwt_cdf53_s1_s; // backward update 1 + backward predict 1 for(int i=2; i<N-(N&1); i+=2) *addr1_s(tmp, i, stride) -= dwt_cdf53_u1_s * (*addr1_s(tmp, i-1, stride) + *addr1_s(tmp, i+1, stride)); *addr1_s(tmp, 0, stride) -= 2 * dwt_cdf53_u1_s * *addr1_s(tmp, 1, stride); if( is_odd(N) ) *addr1_s(tmp, N-1, stride) -= 2 * dwt_cdf53_u1_s * *addr1_s(tmp, N-2, stride); else *addr1_s(tmp, N-1, stride) += 2 * dwt_cdf53_p1_s * *addr1_s(tmp, N-2, stride); for(int i=1; i<N-2+(N&1); i+=2) *addr1_s(tmp, i, stride) += dwt_cdf53_p1_s * (*addr1_s(tmp, i-1, stride) + *addr1_s(tmp, i+1, stride)); } void dwt_eaw53_i_ex_stride_s( const float *src_l, const float *src_h, float *dst, float *tmp, int N, int stride, float *w // float w[N] ) { assert( N >= 0 && NULL != src_l && NULL != src_h && NULL != dst && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) { if(1 == N) dst[0] = src_l[0] * dwt_cdf53_s2_s; return; } // copy src into tmp dwt_util_memcpy_stride_s(tmp+0, 2*sizeof(float), src_l, stride, ceil_div2(N)); dwt_util_memcpy_stride_s(tmp+1, 2*sizeof(float), src_h, stride, floor_div2(N)); // inverse scale for(int i=0; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s2_s; for(int i=1; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s1_s; // backward update 1 + backward predict 1 for(int i=2; i<N-(N&1); i+=2) { float wL = w[i-1]; float wR = w[i+0]; tmp[i] -= ( wL*tmp[i-1] + wR*tmp[i+1] ) / ( 2.f*(wL+wR) ); } { float wL = w[0]; float wR = w[0]; tmp[0] -= (wL * tmp[1] + wR * tmp[1]) / ( 2.f * (wL+wR) ); } if( is_odd(N) ) { float wL = w[N-2]; float wR = w[N-2]; tmp[N-1] -= (wL * tmp[N-2] + wR * tmp[N-2]) / ( 2.f * (wL+wR) ); } else { float wL = w[N-2]; float wR = w[N-2]; tmp[N-1] += (wL * tmp[N-2] + wR * tmp[N-2]) / (wL+wR); } for(int i=1; i<N-2+(N&1); i+=2) { float wL = w[i-1]; float wR = w[i+0]; tmp[i] += ( wL*tmp[i-1] + wR*tmp[i+1] ) / (wL+wR); } // copy tmp into dst dwt_util_memcpy_stride_s(dst, stride, tmp, sizeof(float), N); } void dwt_eaw53_i_ex_stride_inplace_s( float *tmp, int N, int stride, float *w // float w[N] ) { assert( N >= 0 && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) { if(1 == N) *addr1_s(tmp, 0, stride) *= dwt_cdf53_s2_s; return; } // inverse scale for(int i=0; i<N; i+=2) *addr1_s(tmp, i, stride) *= dwt_cdf53_s2_s; for(int i=1; i<N; i+=2) *addr1_s(tmp, i, stride) *= dwt_cdf53_s1_s; // backward update 1 + backward predict 1 for(int i=2; i<N-(N&1); i+=2) { float wL = w[i-1]; float wR = w[i+0]; *addr1_s(tmp, i, stride) -= ( wL * *addr1_s(tmp, i-1, stride) + wR * *addr1_s(tmp, i+1, stride) ) / ( 2.f*(wL+wR) ); } { float wL = w[0]; float wR = w[0]; *addr1_s(tmp, 0, stride) -= (wL * *addr1_s(tmp, 1, stride) + wR * *addr1_s(tmp, 1, stride)) / ( 2.f * (wL+wR) ); } if( is_odd(N) ) { float wL = w[N-2]; float wR = w[N-2]; *addr1_s(tmp, N-1, stride) -= (wL * *addr1_s(tmp, N-2, stride) + wR * *addr1_s(tmp, N-2, stride)) / ( 2.f * (wL+wR) ); } else { float wL = w[N-2]; float wR = w[N-2]; *addr1_s(tmp, N-1, stride) += (wL * *addr1_s(tmp, N-2, stride) + wR * *addr1_s(tmp, N-2, stride)) / (wL+wR); } for(int i=1; i<N-2+(N&1); i+=2) { float wL = w[i-1]; float wR = w[i+0]; *addr1_s(tmp, i, stride) += ( wL * *addr1_s(tmp, i-1, stride) + wR * *addr1_s(tmp, i+1, stride) ) / (wL+wR); } } void dwt_interp53_i_ex_stride_s( const float *src_l, const float *src_h, float *dst, float *tmp, int N, int stride) { assert( N >= 0 && NULL != src_l && NULL != src_h && NULL != dst && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) { if(1 == N) dst[0] = src_l[0] * dwt_cdf53_s2_s; return; } // copy src into tmp dwt_util_memcpy_stride_s(tmp+0, 2*sizeof(float), src_l, stride, ceil_div2(N)); dwt_util_memcpy_stride_s(tmp+1, 2*sizeof(float), src_h, stride, floor_div2(N)); // inverse scale for(int i=0; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s2_s; for(int i=1; i<N; i+=2) tmp[i] = tmp[i] * dwt_cdf53_s1_s; // backward update 1 + backward predict 1 if(is_odd(N)) ; else tmp[N-1] += 2 * dwt_cdf53_p1_s * tmp[N-2]; for(int i=1; i<N-2+(N&1); i+=2) tmp[i] += dwt_cdf53_p1_s * (tmp[i-1] + tmp[i+1]); // copy tmp into dst dwt_util_memcpy_stride_s(dst, stride, tmp, sizeof(float), N); } void dwt_zero_padding_f_d( double *dst_l, double *dst_h, int N, int N_dst_L, int N_dst_H) { dwt_zero_padding_f_stride_d( dst_l, dst_h, N, N_dst_L, N_dst_H, sizeof(double) ); } void dwt_zero_padding_f_s( float *dst_l, float *dst_h, int N, int N_dst_L, int N_dst_H) { dwt_zero_padding_f_stride_s( dst_l, dst_h, N, N_dst_L, N_dst_H, sizeof(float) ); } void dwt_zero_padding_f_stride_d( double *dst_l, double *dst_h, int N, int N_dst_L, int N_dst_H, int stride) { assert( N >= 0 && N_dst_L >= 0 && N_dst_H >= 0 && 0 == ((N_dst_L-N_dst_H)&~1) && NULL != dst_l && NULL != dst_h && 0 != stride ); // FIXME: 0 == ((N_dst_L-N_dst_H)&~1) if(N_dst_L || N_dst_H) { const double zero = 0; dwt_util_memcpy_stride_d(addr1_d(dst_l, ceil_div2(N), stride), stride, &zero, 0, N_dst_L - ceil_div2(N)); dwt_util_memcpy_stride_d(addr1_d(dst_h, floor_div2(N), stride), stride, &zero, 0, N_dst_H - floor_div2(N)); } } void dwt_zero_padding_f_stride_i( int *dst_l, int *dst_h, int N, int N_dst_L, int N_dst_H, int stride) { assert( N >= 0 && N_dst_L >= 0 && N_dst_H >= 0 && 0 == ((N_dst_L-N_dst_H)&~1) && NULL != dst_l && NULL != dst_h && 0 != stride ); // FIXME: 0 == ((N_dst_L-N_dst_H)&~1) if(N_dst_L || N_dst_H) { const float zero = 0; dwt_util_memcpy_stride_i(addr1_i(dst_l, ceil_div2(N), stride), stride, &zero, 0, N_dst_L - ceil_div2(N)); dwt_util_memcpy_stride_i(addr1_i(dst_h, floor_div2(N), stride), stride, &zero, 0, N_dst_H - floor_div2(N)); } } void dwt_zero_padding_f_stride_s( float *dst_l, float *dst_h, int N, int N_dst_L, int N_dst_H, int stride) { assert( N >= 0 && N_dst_L >= 0 && N_dst_H >= 0 && 0 == ((N_dst_L-N_dst_H)&~1) && NULL != dst_l && NULL != dst_h && 0 != stride ); // FIXME: 0 == ((N_dst_L-N_dst_H)&~1) if(N_dst_L || N_dst_H) { const float zero = 0; dwt_util_memcpy_stride_s(addr1_s(dst_l, ceil_div2(N), stride), stride, &zero, 0, N_dst_L - ceil_div2(N)); dwt_util_memcpy_stride_s(addr1_s(dst_h, floor_div2(N), stride), stride, &zero, 0, N_dst_H - floor_div2(N)); } } void dwt_zero_padding_i_d( double *dst_l, int N, int N_dst) { dwt_zero_padding_i_stride_d( dst_l, N, N_dst, sizeof(double) ); } void dwt_zero_padding_i_s( float *dst_l, int N, int N_dst) { dwt_zero_padding_i_stride_s( dst_l, N, N_dst, sizeof(float) ); } void dwt_zero_padding_i_stride_d( double *dst_l, int N, int N_dst, int stride) { assert( N >= 0 && N_dst >= 0 && NULL != dst_l && 0 != stride ); const double zero = 0; dwt_util_memcpy_stride_d( addr1_d(dst_l, N, stride), stride, &zero, 0, N_dst - N); } void dwt_zero_padding_i_stride_i( int *dst_l, int N, int N_dst, int stride) { assert( N >= 0 && N_dst >= 0 && NULL != dst_l && 0 != stride ); const int zero = 0; dwt_util_memcpy_stride_i( addr1_i(dst_l, N, stride), stride, &zero, 0, N_dst - N); } void dwt_zero_padding_i_stride_s( float *dst_l, int N, int N_dst, int stride) { assert( N >= 0 && N_dst >= 0 && NULL != dst_l && 0 != stride ); const float zero = 0; dwt_util_memcpy_stride_s( addr1_s(dst_l, N, stride), stride, &zero, 0, N_dst - N); } void dwt_util_switch_op( enum dwt_op op) { FUNC_BEGIN; #ifdef __asvp__ if( op == dwt_util_global_active_op ) return; //WAL_CHECK( wal_mb2pb(worker, 0) ); //WAL_CHECK( wal_bce_jk_sync_operation(worker) ); for(int w = 0; w < get_total_workers(); w++) { WAL_CHECK( wal_reset_worker(worker[w]) ); } switch(op) { case DWT_OP_LIFT4SA: { for(int w = 0; w < get_total_workers(); w++) { WAL_CHECK( wal_start_operation(worker[w], WAL_PBID_P0) ); } float alpha = -dwt_cdf97_p1_s, beta = dwt_cdf97_u1_s, gamma = -dwt_cdf97_p2_s, delta = dwt_cdf97_u2_s, zeta = dwt_cdf97_s1_s; const int size = 12; // FIXME(ASVP): for these coeeficients, use memory bank "D" const float coeffs[12] = { delta, 0.0f, gamma, 0.0f, beta, 0.0f, alpha, 0.0f, zeta, 0.0f, 1/zeta, 0.0f }; float *addr = dwt_util_allocate_vec_s(size); if(!addr) { dwt_util_log(LOG_ERR, "Failed to allocate vector of %i floats.\n", size); dwt_util_abort(); } if( dwt_util_copy_vec_s(coeffs, addr, size) ) dwt_util_abort(); assert( is_even(size) ); assert( is_aligned_8(addr) ); for(int w = 0; w < get_total_workers(); w++) { WAL_CHECK( wal_dma_configure(worker[w], 0, addr, 0, WAL_BCE_JSY_DMEM_B, 0, size) ); WAL_CHECK( wal_dma_start(worker[w], 0, WAL_DMA_REQ_RD) ); while( wal_dma_isbusy(worker[w], 0x01) ) ; } free(addr); } break; case DWT_OP_LIFT4SB: { for(int w = 0; w < get_total_workers(); w++) { WAL_CHECK( wal_start_operation(worker[w], WAL_PBID_P1) ); } float alpha = -dwt_cdf97_u2_s, beta = dwt_cdf97_p2_s, gamma = -dwt_cdf97_u1_s, delta = dwt_cdf97_p1_s, zeta = dwt_cdf97_s1_s; const int size = 12; // FIXME(ASVP): for these coeeficients, use memory bank "D" const float coeffs[12] = { delta, 0.0f, gamma, 0.0f, beta, 0.0f, alpha, 0.0f, zeta, 0.0f, 1/zeta, 0.0f }; float *addr = dwt_util_allocate_vec_s(size); if(!addr) { dwt_util_log(LOG_ERR, "Failed to allocate vector of %i floats.\n", size); dwt_util_abort(); } if( dwt_util_copy_vec_s(coeffs, addr, size) ) dwt_util_abort(); assert( is_even(size) ); assert( is_aligned_8(addr) ); for(int w = 0; w < get_total_workers(); w++) { WAL_CHECK( wal_dma_configure(worker[w], 0, addr, 0, WAL_BCE_JSY_DMEM_B, 0, size) ); WAL_CHECK( wal_dma_start(worker[w], 0, WAL_DMA_REQ_RD) ); while( wal_dma_isbusy(worker[w], 0x01) ) ; } free(addr); } break; default: { dwt_util_log(LOG_ERR, "Unknown operation.\n"); dwt_util_abort(); } } dwt_util_global_active_op = op; #else UNUSED(op); #endif FUNC_END; } /** allocated memory aligned on current platform for type of size of elem_size bytes */ static void *alloc_aligned( int elements, size_t elem_size ) { assert( is_pow2(elem_size) ); // alignment for type of given size const size_t align = alignment(elem_size); const size_t size = elements * elem_size; void *addr = (void *)0; addr = (void *)memalign(align, size); assert( is_aligned(addr, align) ); return addr; } void *dwt_util_alloc( int elems, size_t elem_size ) { return malloc(elems*elem_size); } static void **alloc_temp( int threads, int elements, size_t elem_size ) { void **temp; temp = (void **)malloc( sizeof(void*) * threads ); if( !temp ) dwt_util_error("malloc fails!\n"); for(int t = 0; t < threads; t++) { temp[t] = alloc_aligned(elements, elem_size); if( !temp[t] ) dwt_util_error("Unable to allocate temp[] buffer!\n"); } return temp; } static float **alloc_temp_s( int threads, int elements ) { return (float **)alloc_temp(threads, elements, sizeof(float)); } static double **alloc_temp_d( int threads, int elements ) { return (double **)alloc_temp(threads, elements, sizeof(double)); } static int **alloc_temp_i( int threads, int elements ) { return (int **)alloc_temp(threads, elements, sizeof(int)); } static void free_temp( int threads, void **temp ) { for(int t = 0; t < threads; t++) free(temp[t]); free(temp); } static void free_temp_s( int threads, float **temp ) { free_temp(threads, (void **)temp); } static void free_temp_d( int threads, double **temp ) { free_temp(threads, (void **)temp); } static void free_temp_i( int threads, int **temp ) { free_temp(threads, (void **)temp); } void dwt_cdf97_2f_d( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); // FIXME(microblaze): align on 8 bytes boundary (GCC's __attribure__ is ignored) double temp[size_o_big_max]; if(NULL == temp) abort(); int j = 0; const int j_limit = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j ); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for(int y = 0; y < size_o_src_y; y++) dwt_cdf97_f_ex_stride_d( addr2_d(ptr,y,0,stride_x,stride_y), addr2_d(ptr,y,0,stride_x,stride_y), addr2_d(ptr,y,size_o_dst_x,stride_x,stride_y), temp, size_i_src_x, stride_y); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for(int x = 0; x < size_o_src_x; x++) dwt_cdf97_f_ex_stride_d( addr2_d(ptr,0,x,stride_x,stride_y), addr2_d(ptr,0,x,stride_x,stride_y), addr2_d(ptr,size_o_dst_y,x,stride_x,stride_y), temp, size_i_src_y, stride_x); if(zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for(int y = 0; y < size_o_src_y; y++) dwt_zero_padding_f_stride_d( addr2_d(ptr,y,0,stride_x,stride_y), addr2_d(ptr,y,size_o_dst_x,stride_x,stride_y), size_i_src_x, size_o_dst_x, size_o_src_x-size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for(int x = 0; x < size_o_src_x; x++) dwt_zero_padding_f_stride_d( addr2_d(ptr,0,x,stride_x,stride_y), addr2_d(ptr,size_o_dst_y,x,stride_x,stride_y), size_i_src_y, size_o_dst_y, size_o_src_y-size_o_dst_y, stride_x); } j++; } } void dwt_cdf53_2f_d( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); // FIXME(microblaze): align on 8 bytes boundary (GCC's __attribure__ is ignored) double temp[size_o_big_max]; if(NULL == temp) abort(); int j = 0; const int j_limit = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j ); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for(int y = 0; y < size_o_src_y; y++) dwt_cdf53_f_ex_stride_d( addr2_d(ptr,y,0,stride_x,stride_y), addr2_d(ptr,y,0,stride_x,stride_y), addr2_d(ptr,y,size_o_dst_x,stride_x,stride_y), temp, size_i_src_x, stride_y); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for(int x = 0; x < size_o_src_x; x++) dwt_cdf53_f_ex_stride_d( addr2_d(ptr,0,x,stride_x,stride_y), addr2_d(ptr,0,x,stride_x,stride_y), addr2_d(ptr,size_o_dst_y,x,stride_x,stride_y), temp, size_i_src_y, stride_x); if(zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for(int y = 0; y < size_o_src_y; y++) dwt_zero_padding_f_stride_d( addr2_d(ptr,y,0,stride_x,stride_y), addr2_d(ptr,y,size_o_dst_x,stride_x,stride_y), size_i_src_x, size_o_dst_x, size_o_src_x-size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for(int x = 0; x < size_o_src_x; x++) dwt_zero_padding_f_stride_d( addr2_d(ptr,0,x,stride_x,stride_y), addr2_d(ptr,size_o_dst_y,x,stride_x,stride_y), size_i_src_y, size_o_dst_y, size_o_src_y-size_o_dst_y, stride_x); } j++; } } void dwt_cdf97_2f_s2( const void *src, void *dst, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding) { FUNC_BEGIN; const int threads = dwt_util_get_num_threads(); const int workers = dwt_util_get_num_workers(); #ifdef microblaze dwt_util_switch_op(DWT_OP_LIFT4SA); #endif const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); const int offset = 1; float **temp = alloc_temp_s(threads, calc_and_set_temp_size_s(size_o_big_max, offset) ); int j = 0; const int j_limit = ceil_log2( decompose_one ? size_o_big_max : size_o_big_min ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j ); const int lines_x = size_o_src_x; const int lines_y = size_o_src_y; const int workers_segment_y = floor_div(lines_y, workers); const int workers_segment_x = floor_div(lines_x, workers); #ifdef _OPENMP const int threads_segment_y = ceil_div(workers_segment_y, threads); const int threads_segment_x = ceil_div(workers_segment_x, threads); #endif const int workers_lines_y = workers_segment_y * workers; const int workers_lines_x = workers_segment_x * workers; #ifndef DISABLE_Y if( lines_x > 1 ) { set_data_step_s( stride_x ); #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < workers_lines_y; y += workers) { dwt_cdf97_f_ex_stride_s( addr2_const_s(src,y,0,stride_x,stride_y), addr2_s(dst,y,0,stride_x,stride_y), addr2_s(dst,y,size_o_dst_x,stride_x,stride_y), temp[dwt_util_get_thread_num()], size_i_src_x, stride_y); } dwt_util_set_num_workers(1); for(int y = workers_lines_y; y < lines_y; y++) { dwt_cdf97_f_ex_stride_s( addr2_const_s(src,y,0,stride_x,stride_y), addr2_s(dst,y,0,stride_x,stride_y), addr2_s(dst,y,size_o_dst_x,stride_x,stride_y), temp[dwt_util_get_thread_num()], size_i_src_x, stride_y); } dwt_util_set_num_workers(workers); // in the next iteration, the dst takes the role of src // otherwise, the src will be unaffected in the second iteration src = dst; } #endif #ifndef DISABLE_X if( lines_y > 1 ) { set_data_step_s( stride_y ); #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < workers_lines_x; x += workers) { dwt_cdf97_f_ex_stride_s( addr2_const_s(src,0,x,stride_x,stride_y), addr2_s(dst,0,x,stride_x,stride_y), addr2_s(dst,size_o_dst_y,x,stride_x,stride_y), temp[dwt_util_get_thread_num()], size_i_src_y, stride_x); } dwt_util_set_num_workers(1); for(int x = workers_lines_x; x < lines_x; x++) { dwt_cdf97_f_ex_stride_s( addr2_const_s(src,0,x,stride_x,stride_y), addr2_s(dst,0,x,stride_x,stride_y), addr2_s(dst,size_o_dst_y,x,stride_x,stride_y), temp[dwt_util_get_thread_num()], size_i_src_y, stride_x); } dwt_util_set_num_workers(workers); // in the next iteration, the dst takes the role of src // otherwise, the src will be unaffected in the second iteration src = dst; } #endif if(zero_padding) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_o_src_y; y++) dwt_zero_padding_f_stride_s( addr2_s(dst,y,0,stride_x,stride_y), addr2_s(dst,y,size_o_dst_x,stride_x,stride_y), size_i_src_x, size_o_dst_x, size_o_src_x-size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_o_src_x; x++) dwt_zero_padding_f_stride_s( addr2_s(dst,0,x,stride_x,stride_y), addr2_s(dst,size_o_dst_y,x,stride_x,stride_y), size_i_src_y, size_o_dst_y, size_o_src_y-size_o_dst_y, stride_x); } j++; } free_temp_s(threads, temp); FUNC_END; } void dwt_cdf97_2f_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding) { FUNC_BEGIN; const int threads = dwt_util_get_num_threads(); const int workers = dwt_util_get_num_workers(); #ifdef microblaze dwt_util_switch_op(DWT_OP_LIFT4SA); #endif const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); const int offset = 1; float **temp = alloc_temp_s(threads, calc_and_set_temp_size_s(size_o_big_max, offset) ); int j = 0; const int j_limit = ceil_log2( decompose_one ? size_o_big_max : size_o_big_min ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j ); const int lines_x = size_o_src_x; const int lines_y = size_o_src_y; const int workers_segment_y = floor_div(lines_y, workers); const int workers_segment_x = floor_div(lines_x, workers); #ifdef _OPENMP const int threads_segment_y = ceil_div(workers_segment_y, threads); const int threads_segment_x = ceil_div(workers_segment_x, threads); #endif const int workers_lines_y = workers_segment_y * workers; const int workers_lines_x = workers_segment_x * workers; #ifndef DISABLE_Y if( lines_x > 1 ) { set_data_step_s( stride_x ); #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < workers_lines_y; y += workers) { dwt_cdf97_f_ex_stride_s( addr2_const_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,size_o_dst_x,stride_x,stride_y), temp[dwt_util_get_thread_num()], size_i_src_x, stride_y); } dwt_util_set_num_workers(1); for(int y = workers_lines_y; y < lines_y; y++) { dwt_cdf97_f_ex_stride_s( addr2_const_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,size_o_dst_x,stride_x,stride_y), temp[dwt_util_get_thread_num()], size_i_src_x, stride_y); } dwt_util_set_num_workers(workers); } #endif #ifndef DISABLE_X if( lines_y > 1 ) { set_data_step_s( stride_y ); #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < workers_lines_x; x += workers) { dwt_cdf97_f_ex_stride_s( addr2_const_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,size_o_dst_y,x,stride_x,stride_y), temp[dwt_util_get_thread_num()], size_i_src_y, stride_x); } dwt_util_set_num_workers(1); for(int x = workers_lines_x; x < lines_x; x++) { dwt_cdf97_f_ex_stride_s( addr2_const_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,size_o_dst_y,x,stride_x,stride_y), temp[dwt_util_get_thread_num()], size_i_src_y, stride_x); } dwt_util_set_num_workers(workers); } #endif if(zero_padding) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_o_src_y; y++) dwt_zero_padding_f_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,size_o_dst_x,stride_x,stride_y), size_i_src_x, size_o_dst_x, size_o_src_x-size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_o_src_x; x++) dwt_zero_padding_f_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,size_o_dst_y,x,stride_x,stride_y), size_i_src_y, size_o_dst_y, size_o_src_y-size_o_dst_y, stride_x); } j++; } free_temp_s(threads, temp); FUNC_END; } void dwt_cdf97_2f_inplace_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding) { FUNC_BEGIN; assert( 1 == dwt_util_get_num_workers() ); const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); int j = 0; const int j_limit = ceil_log2( decompose_one ? size_o_big_max : size_o_big_min ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; const int offset = 1; for(;;) { if( *j_max_ptr == j ) break; // const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); // const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); // const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); // const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j ); const int stride_y_j = stride_y * (1 << (j)); const int stride_x_j = stride_x * (1 << (j)); const int size_x = size_i_src_x; const int size_y = size_i_src_y; const int pairs_x = (to_even(size_x-offset)-4)/2; // const int pairs_y = (to_even(size_y-offset)-4)/2; const int max_y = to_even(size_y-offset)+offset; if( size_x > 1 && size_x < 5 ) { for(int y = 0; y < size_y; y++) { dwt_cdf97_f_ex_stride_inplace_part_exceptions_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x, // N stride_y_j); } } if( size_y > 1 && size_y < 5 ) { for(int x = 0; x < size_x; x++) { dwt_cdf97_f_ex_stride_inplace_part_exceptions_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y, // N stride_x_j); } } if( size_x > 1 && size_x >= 5 ) { for(int y = 0; y < size_y; y++) { dwt_cdf97_f_ex_stride_inplace_part_prolog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x, // N stride_y_j); } } if( size_y > 1 && size_y >= 5 ) { for(int x = 0; x < size_x; x++) { dwt_cdf97_f_ex_stride_inplace_part_prolog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y, // N stride_x_j); } } if( size_x > 1 && size_x >= 5 && size_y > 1 && size_y >= 5 ) { // this should be stored in CPU cache float l_buff[4 * size_x]; // for y=0 to offset step 1: horizontal only, no vertical for(int y = 0; y < offset; y += 1) { float *ptr1_x = addr2_s(ptr, y+0, offset, stride_x_j, stride_y_j); accel_lift_op4s_fwd_main_dl_stride_s( ptr1_x, pairs_x, -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, +1, stride_y_j); } // for y=offset to offset+4 step 2: horizontal, vertical_prolog0 for(int y = offset; y < offset+2; y += 2) { float *ptr1_x = addr2_s(ptr, y+0, offset, stride_x_j, stride_y_j); float *ptr2_x = addr2_s(ptr, y+1, offset, stride_x_j, stride_y_j); accel_lift_op4s_fwd_main_dl_stride_s( ptr1_x, pairs_x, -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, +1, stride_y_j); accel_lift_op4s_fwd_main_dl_stride_s( ptr2_x, pairs_x, -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, +1, stride_y_j); for(int x = 0; x < size_x; x++) { float *l4 = &l_buff[4*x]; // input addr for 1st coeff in the pair float *ptr0_y = addr2_s(ptr, y+0, x, stride_x_j, stride_y_j); // input addr for 2nd coeff in the pair float *ptr1_y = addr2_s(ptr, y+1, x, stride_x_j, stride_y_j); // output addr for 1st coeff in the pair float *out0_y = addr2_s(ptr, y+0-4, x, stride_x_j, stride_y_j); // output addr for 2nd coeff in the pair float *out1_y = addr2_s(ptr, y+1-4, x, stride_x_j, stride_y_j); accel_lift_op4s_fwd_main_dl_stride_pair_prolog0_s( ptr0_y, // in ptr1_y, // in out0_y, // out out1_y, // out -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, l4 ); } } // for y=offset to offset+4 step 2: horizontal, vertical_prolog1 for(int y = offset+2; y < offset+4; y += 2) { float *ptr1_x = addr2_s(ptr, y+0, offset, stride_x_j, stride_y_j); float *ptr2_x = addr2_s(ptr, y+1, offset, stride_x_j, stride_y_j); accel_lift_op4s_fwd_main_dl_stride_s( ptr1_x, pairs_x, -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, +1, stride_y_j); accel_lift_op4s_fwd_main_dl_stride_s( ptr2_x, pairs_x, -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, +1, stride_y_j); for(int x = 0; x < size_x; x++) { float *l4 = &l_buff[4*x]; // input addr for 1st coeff in the pair float *ptr0_y = addr2_s(ptr, y+0, x, stride_x_j, stride_y_j); // input addr for 2nd coeff in the pair float *ptr1_y = addr2_s(ptr, y+1, x, stride_x_j, stride_y_j); // output addr for 1st coeff in the pair float *out0_y = addr2_s(ptr, y+0-4, x, stride_x_j, stride_y_j); // output addr for 2nd coeff in the pair float *out1_y = addr2_s(ptr, y+1-4, x, stride_x_j, stride_y_j); accel_lift_op4s_fwd_main_dl_stride_pair_prolog1_s( ptr0_y, // in ptr1_y, // in out0_y, // out out1_y, // out -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, l4 ); } } // for y=offset+4 to max_y step 2: horizontal, vertical_core for(int y = offset+4; y < max_y; y += 2) { float *ptr1_x = addr2_s(ptr, y+0, offset, stride_x_j, stride_y_j); float *ptr2_x = addr2_s(ptr, y+1, offset, stride_x_j, stride_y_j); float *out1_x = addr2_s(ptr, y+0-4, offset, stride_x_j, stride_y_j); float *out2_x = addr2_s(ptr, y+1-4, offset, stride_x_j, stride_y_j); float *l4 = l_buff; // offset column for(int x = 0; x < offset; x++) { // input addr for 1st coeff in the pair float *ptr0_y = addr2_s(ptr, y+0, x, stride_x_j, stride_y_j); // input addr for 2nd coeff in the pair float *ptr1_y = addr2_s(ptr, y+1, x, stride_x_j, stride_y_j); // output addr for 1st coeff in the pair float *out0_y = addr2_s(ptr, y+0-4, x, stride_x_j, stride_y_j); // output addr for 2nd coeff in the pair float *out1_y = addr2_s(ptr, y+1-4, x, stride_x_j, stride_y_j); accel_lift_op4s_fwd_main_dl_stride_pair_core_s( ptr0_y, // in ptr1_y, // in out0_y, // out out1_y, // out -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, l4 ); } l4 += 4; float alpha = -dwt_cdf97_p1_s; float beta = dwt_cdf97_u1_s; float gamma = -dwt_cdf97_p2_s; float delta = dwt_cdf97_u2_s; float zeta = dwt_cdf97_s1_s; float l1[4]; float l2[4]; accel_lift_op4s_fwd_main_dl_stride_pair_prolog0_s( addr1_s(ptr1_x, 0, stride_y_j), addr1_s(ptr1_x, 1, stride_y_j), NULL, NULL, alpha, beta, gamma, delta, zeta, l1 ); accel_lift_op4s_fwd_main_dl_stride_pair_prolog0_s( addr1_s(ptr2_x, 0, stride_y_j), addr1_s(ptr2_x, 1, stride_y_j), NULL, NULL, alpha, beta, gamma, delta, zeta, l2 ); ptr1_x = addr1_s(ptr1_x, 2, stride_y_j); ptr2_x = addr1_s(ptr2_x, 2, stride_y_j); out1_x = addr1_s(out1_x, 2, stride_y_j); out2_x = addr1_s(out2_x, 2, stride_y_j); accel_lift_op4s_fwd_main_dl_stride_pair_prolog1_s( addr1_s(ptr1_x, 0, stride_y_j), addr1_s(ptr1_x, 1, stride_y_j), NULL, NULL, alpha, beta, gamma, delta, zeta, l1 ); accel_lift_op4s_fwd_main_dl_stride_pair_prolog1_s( addr1_s(ptr2_x, 0, stride_y_j), addr1_s(ptr2_x, 1, stride_y_j), NULL, NULL, alpha, beta, gamma, delta, zeta, l2 ); ptr1_x = addr1_s(ptr1_x, 2, stride_y_j); ptr2_x = addr1_s(ptr2_x, 2, stride_y_j); out1_x = addr1_s(out1_x, 2, stride_y_j); out2_x = addr1_s(out2_x, 2, stride_y_j); // loop by pairs from left to right for(int s = 0; s < pairs_x; s++) { float *ptr_y0_x0 = addr1_s(ptr1_x, 0, stride_y_j); float *ptr_y0_x1 = addr1_s(ptr1_x, 1, stride_y_j); float *ptr_y1_x0 = addr1_s(ptr2_x, 0, stride_y_j); float *ptr_y1_x1 = addr1_s(ptr2_x, 1, stride_y_j); float *out_y0_x0 = addr1_s(out1_x, 0-4, stride_y_j); float *out_y0_x1 = addr1_s(out1_x, 1-4, stride_y_j); float *out_y1_x0 = addr1_s(out2_x, 0-4, stride_y_j); float *out_y1_x1 = addr1_s(out2_x, 1-4, stride_y_j); #if 1 accel_lift_op4s_fwd_main_dl_stride_pair_core_2x2_s( ptr_y0_x0, // in ptr_y0_x1, // in ptr_y1_x0, // in ptr_y1_x1, // in out_y0_x0, // out out_y0_x1, // out out_y1_x0, // out out_y1_x1, // out alpha, // w beta, // w gamma, // w delta, // w zeta, // v l1, // [4] l2, // [4] l4+0, // [4] l4+4 // [4] ); #else // BUG: this cannot work with contemporary prologs/epilogs cdf97_fwd_core_dl_sc_sse_2x2_s( ptr_y0_x0, // in ptr_y0_x1, // in ptr_y1_x0, // in ptr_y1_x1, // in out_y0_x0, // out out_y0_x1, // out out_y1_x0, // out out_y1_x1, // out l1, // [4] l2, // [4] l4+0, // [4] l4+4 // [4] ); #endif l4 += 8; // update pointers ptr1_x = addr1_s(ptr1_x, 2, stride_y_j); ptr2_x = addr1_s(ptr2_x, 2, stride_y_j); out1_x = addr1_s(out1_x, 2, stride_y_j); out2_x = addr1_s(out2_x, 2, stride_y_j); } accel_lift_op4s_fwd_main_dl_stride_pair_epilog0_s( NULL, NULL, addr1_s(ptr1_x, 0-4, stride_y_j), addr1_s(ptr1_x, 1-4, stride_y_j), alpha, beta, gamma, delta, zeta, l1 ); accel_lift_op4s_fwd_main_dl_stride_pair_epilog0_s( NULL, NULL, addr1_s(ptr2_x, 0-4, stride_y_j), addr1_s(ptr2_x, 1-4, stride_y_j), alpha, beta, gamma, delta, zeta, l2 ); ptr1_x = addr1_s(ptr1_x, 2, stride_y_j); ptr2_x = addr1_s(ptr2_x, 2, stride_y_j); accel_lift_op4s_fwd_main_dl_stride_pair_epilog1_s( NULL, NULL, addr1_s(ptr1_x, 0-4, stride_y_j), addr1_s(ptr1_x, 1-4, stride_y_j), alpha, beta, gamma, delta, zeta, l1 ); accel_lift_op4s_fwd_main_dl_stride_pair_epilog1_s( NULL, NULL, addr1_s(ptr2_x, 0-4, stride_y_j), addr1_s(ptr2_x, 1-4, stride_y_j), alpha, beta, gamma, delta, zeta, l2 ); // perhaps, this loop can be interleaved with epilog for(int x = 2*pairs_x+offset; x < size_x; x++) { float *l4 = &l_buff[4*x]; // input addr for 1st coeff in the pair float *ptr0_y = addr2_s(ptr, y+0, x, stride_x_j, stride_y_j); // input addr for 2nd coeff in the pair float *ptr1_y = addr2_s(ptr, y+1, x, stride_x_j, stride_y_j); // output addr for 1st coeff in the pair float *out0_y = addr2_s(ptr, y+0-4, x, stride_x_j, stride_y_j); // output addr for 2nd coeff in the pair float *out1_y = addr2_s(ptr, y+1-4, x, stride_x_j, stride_y_j); accel_lift_op4s_fwd_main_dl_stride_pair_core_s( ptr0_y, // in ptr1_y, // in out0_y, // out out1_y, // out -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, l4 ); } } for(int y = max_y; y < size_y; y++) { float *ptr1_x = addr2_s(ptr, y+0, offset, stride_x_j, stride_y_j); accel_lift_op4s_fwd_main_dl_stride_s( ptr1_x, pairs_x, -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, +1, stride_y_j); } // for y=max_y to max_y+4 step 2: no horizontal, vertical_epilog0 only for(int y = max_y; y < max_y+2; y += 2) { for(int x = 0; x < size_x; x++) { float *l4 = &l_buff[4*x]; // input addr for 1st coeff in the pair float *ptr0_y = addr2_s(ptr, y+0, x, stride_x_j, stride_y_j); // input addr for 2nd coeff in the pair float *ptr1_y = addr2_s(ptr, y+1, x, stride_x_j, stride_y_j); // output addr for 1st coeff in the pair float *out0_y = addr2_s(ptr, y+0-4, x, stride_x_j, stride_y_j); // output addr for 2nd coeff in the pair float *out1_y = addr2_s(ptr, y+1-4, x, stride_x_j, stride_y_j); accel_lift_op4s_fwd_main_dl_stride_pair_epilog0_s( ptr0_y, // in ptr1_y, // in out0_y, // out out1_y, // out -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, l4 ); } } // for y=max_y to max_y+4 step 2: no horizontal, vertical_epilog1 only for(int y = max_y+2; y < max_y+4; y += 2) { for(int x = 0; x < size_x; x++) { float *l4 = &l_buff[4*x]; // input addr for 1st coeff in the pair float *ptr0_y = addr2_s(ptr, y+0, x, stride_x_j, stride_y_j); // input addr for 2nd coeff in the pair float *ptr1_y = addr2_s(ptr, y+1, x, stride_x_j, stride_y_j); // output addr for 1st coeff in the pair float *out0_y = addr2_s(ptr, y+0-4, x, stride_x_j, stride_y_j); // output addr for 2nd coeff in the pair float *out1_y = addr2_s(ptr, y+1-4, x, stride_x_j, stride_y_j); accel_lift_op4s_fwd_main_dl_stride_pair_epilog1_s( ptr0_y, // in ptr1_y, // in out0_y, // out out1_y, // out -dwt_cdf97_p1_s, dwt_cdf97_u1_s, -dwt_cdf97_p2_s, dwt_cdf97_u2_s, dwt_cdf97_s1_s, l4 ); } } } else { if( size_x > 1 && size_x >= 5 ) { for(int y = 0; y < size_i_src_y; y++) { dwt_cdf97_f_ex_stride_inplace_part_core_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x, // N stride_y_j); } } if( size_y > 1 && size_y >= 5 ) { for(int x = 0; x < size_x; x++) { dwt_cdf97_f_ex_stride_inplace_part_core_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y, // N stride_x_j); } } } if( size_x > 1 && size_x >= 5 ) { for(int y = 0; y < size_y; y++) { dwt_cdf97_f_ex_stride_inplace_part_epilog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x, // N stride_y_j); } } if( size_y > 1 && size_y >= 5 ) { for(int x = 0; x < size_x; x++) { dwt_cdf97_f_ex_stride_inplace_part_epilog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y, // N stride_x_j); } } // TODO // if(zero_padding) // { // #pragma omp parallel for schedule(static, threads_segment_y) // for(int y = 0; y < size_o_src_y; y++) // dwt_zero_padding_f_stride_s( // addr2_s(ptr,y,0,stride_x,stride_y), // addr2_s(ptr,y,size_o_dst_x,stride_x,stride_y), // size_i_src_x, // size_o_dst_x, // size_o_src_x-size_o_dst_x, // stride_y); // #pragma omp parallel for schedule(static, threads_segment_x) // for(int x = 0; x < size_o_src_x; x++) // dwt_zero_padding_f_stride_s( // addr2_s(ptr,0,x,stride_x,stride_y), // addr2_s(ptr,size_o_dst_y,x,stride_x,stride_y), // size_i_src_y, // size_o_dst_y, // size_o_src_y-size_o_dst_y, // stride_x); // } j++; } FUNC_END; } // TODO: test it // two-loops, not a single loop void dwt_cdf97_2f_inplace_sep_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding) { FUNC_BEGIN; assert( 1 == dwt_util_get_num_workers() ); #ifdef _OPENMP const int threads = dwt_util_get_num_threads(); #endif const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); int j = 0; const int j_limit = ceil_log2( decompose_one ? size_o_big_max : size_o_big_min ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; // const int offset = 1; for(;;) { if( *j_max_ptr == j ) break; // const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); // const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); // const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); // const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j ); const int stride_y_j = stride_y * (1 << (j)); const int stride_x_j = stride_x * (1 << (j)); const int size_x = size_i_src_x; const int size_y = size_i_src_y; #ifdef _OPENMP const int threads_segment_y = ceil_div(size_y, threads); // FIXME: should be size_o_src_y? const int threads_segment_x = ceil_div(size_x, threads); #endif // const int pairs_x = (to_even(size_x-offset)-4)/2; // const int pairs_y = (to_even(size_y-offset)-4)/2; // const int max_y = to_even(size_y-offset)+offset; if( size_x > 1 && size_x < 5 ) { for(int y = 0; y < size_y; y++) { dwt_cdf97_f_ex_stride_inplace_part_exceptions_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x, // N stride_y_j); } } if( size_y > 1 && size_y < 5 ) { for(int x = 0; x < size_x; x++) { dwt_cdf97_f_ex_stride_inplace_part_exceptions_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y, // N stride_x_j); } } if( size_x > 1 && size_x >= 5 ) { for(int y = 0; y < size_y; y++) { dwt_cdf97_f_ex_stride_inplace_part_prolog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x, // N stride_y_j); } } if( size_y > 1 && size_y >= 5 ) { for(int x = 0; x < size_x; x++) { dwt_cdf97_f_ex_stride_inplace_part_prolog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y, // N stride_x_j); } } if( 1 ) { if( size_x > 1 && size_x >= 5 ) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_i_src_y; y++) { dwt_cdf97_f_ex_stride_inplace_part_core_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x, // N stride_y_j); } } if( size_y > 1 && size_y >= 5 ) { #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_x; x++) { dwt_cdf97_f_ex_stride_inplace_part_core_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y, // N stride_x_j); } } } if( size_x > 1 && size_x >= 5 ) { for(int y = 0; y < size_y; y++) { dwt_cdf97_f_ex_stride_inplace_part_epilog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x, // N stride_y_j); } } if( size_y > 1 && size_y >= 5 ) { for(int x = 0; x < size_x; x++) { dwt_cdf97_f_ex_stride_inplace_part_epilog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y, // N stride_x_j); } } j++; } FUNC_END; } // two-loops, not a single loop void dwt_cdf97_2f_inplace_sep_sdl_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding) { FUNC_BEGIN; assert( 1 == dwt_util_get_num_workers() ); #ifdef _OPENMP const int threads = dwt_util_get_num_threads(); #endif const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); int j = 0; const int j_limit = ceil_log2( decompose_one ? size_o_big_max : size_o_big_min ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; // const int offset = 1; for(;;) { if( *j_max_ptr == j ) break; // const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); // const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); // const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); // const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j ); const int stride_y_j = stride_y * (1 << (j)); const int stride_x_j = stride_x * (1 << (j)); const int size_x = size_i_src_x; const int size_y = size_i_src_y; #ifdef _OPENMP const int threads_segment_y = ceil_div(size_y, threads); // FIXME: should be size_o_src_y? const int threads_segment_x = ceil_div(size_x, threads); #endif // const int pairs_x = (to_even(size_x-offset)-4)/2; // const int pairs_y = (to_even(size_y-offset)-4)/2; // const int max_y = to_even(size_y-offset)+offset; if( size_x > 1 && size_x < 5 ) { for(int y = 0; y < size_y; y++) { dwt_cdf97_f_ex_stride_inplace_part_exceptions_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x, // N stride_y_j); } } if( size_y > 1 && size_y < 5 ) { for(int x = 0; x < size_x; x++) { dwt_cdf97_f_ex_stride_inplace_part_exceptions_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y, // N stride_x_j); } } if( size_x > 1 && size_x >= 5 ) { for(int y = 0; y < size_y; y++) { dwt_cdf97_f_ex_stride_inplace_part_prolog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x, // N stride_y_j); } } if( size_y > 1 && size_y >= 5 ) { for(int x = 0; x < size_x; x++) { dwt_cdf97_f_ex_stride_inplace_part_prolog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y, // N stride_x_j); } } if( 1 ) { if( size_x > 1 && size_x >= 5 ) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_i_src_y; y++) { #ifdef __SSE__ dwt_cdf97_f_ex_stride_inplace_part_core_sdl_sse_s( #else dwt_cdf97_f_ex_stride_inplace_part_core_sdl_s( #endif addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x, // N stride_y_j); } } if( size_y > 1 && size_y >= 5 ) { #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_x; x++) { #ifdef __SSE__ dwt_cdf97_f_ex_stride_inplace_part_core_sdl_sse_s( #else dwt_cdf97_f_ex_stride_inplace_part_core_sdl_s( #endif addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y, // N stride_x_j); } } } if( size_x > 1 && size_x >= 5 ) { for(int y = 0; y < size_y; y++) { dwt_cdf97_f_ex_stride_inplace_part_epilog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x, // N stride_y_j); } } if( size_y > 1 && size_y >= 5 ) { for(int x = 0; x < size_x; x++) { dwt_cdf97_f_ex_stride_inplace_part_epilog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y, // N stride_x_j); } } j++; } FUNC_END; } #ifdef __SSE__ static void op4_fwd_sdl_2x1A_s( float *ptrL0, float *ptrL1, float *ptrR0, float *ptrR1, float *out, // output as __m128 in format [ L0 L1 R0 R1 ] const float *w, const float *v, float *lL, float *cL, float *rL, float *lR, float *cR, float *rR ) { __m128 buff; __m128 zL, zR; buff[0] = *ptrL0; buff[1] = *ptrL1; buff[2] = *ptrR0; buff[3] = *ptrR1; op4s_sdl2_shuffle_input_low_s_sse(buff, *(__m128 *)cL, *(__m128 *)rL); op4s_sdl2_shuffle_input_high_s_sse(buff, *(__m128 *)cR, *(__m128 *)rR); op4s_sdl2_op_s_sse(zL, *(__m128 *)cL, *(__m128 *)w, *(__m128 *)lL, *(__m128 *)rL); op4s_sdl2_op_s_sse(zR, *(__m128 *)cR, *(__m128 *)w, *(__m128 *)lR, *(__m128 *)rR); op4s_sdl2_output_low_s_sse(buff, *(__m128 *)lL, zL); op4s_sdl2_output_high_s_sse(buff, *(__m128 *)lR, zR); op4s_sdl2_scale_s_sse(buff, *(__m128 *)v); *(__m128 *)out = buff; op4s_sdl2_update_s_sse(*(__m128 *)cL, *(__m128 *)lL, *(__m128 *)rL, zL); op4s_sdl2_update_s_sse(*(__m128 *)cR, *(__m128 *)lR, *(__m128 *)rR, zR); } #endif #ifdef __SSE__ static void op4_fwd_sdl_2x1B_s( float *ptr, // input as __m128 in format [ L0 L1 R0 R1 ] float *outL0, float *outL1, float *outR0, float *outR1, const float *w, const float *v, float *lL, float *cL, float *rL, float *lR, float *cR, float *rR ) { __m128 buff; __m128 zL, zR; buff = *(__m128 *)ptr; op4s_sdl2_shuffle_input_low_s_sse(buff, *(__m128 *)cL, *(__m128 *)rL); op4s_sdl2_shuffle_input_high_s_sse(buff, *(__m128 *)cR, *(__m128 *)rR); op4s_sdl2_op_s_sse(zL, *(__m128 *)cL, *(__m128 *)w, *(__m128 *)lL, *(__m128 *)rL); op4s_sdl2_op_s_sse(zR, *(__m128 *)cR, *(__m128 *)w, *(__m128 *)lR, *(__m128 *)rR); op4s_sdl2_output_low_s_sse(buff, *(__m128 *)lL, zL); op4s_sdl2_output_high_s_sse(buff, *(__m128 *)lR, zR); op4s_sdl2_scale_s_sse(buff, *(__m128 *)v); *outL0 = buff[0]; *outL1 = buff[1]; *outR0 = buff[2]; *outR1 = buff[3]; op4s_sdl2_update_s_sse(*(__m128 *)cL, *(__m128 *)lL, *(__m128 *)rL, zL); op4s_sdl2_update_s_sse(*(__m128 *)cR, *(__m128 *)lR, *(__m128 *)rR, zR); } #endif #ifdef __SSE__ static void cdf97_fwd_core2_sdl_2x2_sc_sse_s( float *ptrL0, float *ptrL1, float *ptrR0, float *ptrR1, float *outL0, float *outL1, float *outR0, float *outR1, float *lAL, float *cAL, float *rAL, float *lAR, float *cAR, float *rAR, float *lBL, float *cBL, float *rBL, float *lBR, float *cBR, float *rBR ) { UNUSED(cAL); UNUSED(rAL); UNUSED(cAR); UNUSED(rAR); UNUSED(cBL); UNUSED(rBL); UNUSED(cBR); UNUSED(rBR); const __m128 w = { dwt_cdf97_u2_s, -dwt_cdf97_p2_s, dwt_cdf97_u1_s, -dwt_cdf97_p1_s }; const __m128 v_vert = { 1/(dwt_cdf97_s1_s*dwt_cdf97_s1_s), 1.f, 1.f, (dwt_cdf97_s1_s*dwt_cdf97_s1_s) }; __m128 buff; __m128 z; buff[0] = *ptrL0; buff[1] = *ptrL1; buff[2] = *ptrR0; buff[3] = *ptrR1; // A/L+R op4s_sdl2_shuffle_input_low_s_sse(buff, *(__m128 *)(lAL+4), *(__m128 *)(lAL+8)); op4s_sdl2_shuffle_input_high_s_sse(buff, *(__m128 *)(lAR+4), *(__m128 *)(lAR+8)); // A/L op4s_sdl2_op_s_sse(z, *(__m128 *)(lAL+4), w, *(__m128 *)(lAL+0), *(__m128 *)(lAL+8)); op4s_sdl2_output_low_s_sse(buff, *(__m128 *)(lAL+0), z); op4s_sdl2_update_s_sse(*(__m128 *)(lAL+4), *(__m128 *)(lAL+0), *(__m128 *)(lAL+8), z); // A/R op4s_sdl2_op_s_sse(z, *(__m128 *)(lAR+4), w, *(__m128 *)(lAR+0), *(__m128 *)(lAR+8)); op4s_sdl2_output_high_s_sse(buff, *(__m128 *)(lAR+0), z); op4s_sdl2_update_s_sse(*(__m128 *)(lAR+4), *(__m128 *)(lAR+0), *(__m128 *)(lAR+8), z); // swap, this should by done by single shuffle instruction buff = _mm_shuffle_ps(buff, buff, _MM_SHUFFLE(3,1,2,0)); // B/L+R op4s_sdl2_shuffle_input_low_s_sse(buff, *(__m128 *)(lBL+4), *(__m128 *)(lBL+8)); op4s_sdl2_shuffle_input_high_s_sse(buff, *(__m128 *)(lBR+4), *(__m128 *)(lBR+8)); // B/L op4s_sdl2_op_s_sse(z, *(__m128 *)(lBL+4), w, *(__m128 *)(lBL+0), *(__m128 *)(lBL+8)); op4s_sdl2_output_low_s_sse(buff, *(__m128 *)(lBL+0), z); op4s_sdl2_update_s_sse(*(__m128 *)(lBL+4), *(__m128 *)(lBL+0), *(__m128 *)(lBL+8), z); // B/R op4s_sdl2_op_s_sse(z, *(__m128 *)(lBR+4), w, *(__m128 *)(lBR+0), *(__m128 *)(lBR+8)); op4s_sdl2_output_high_s_sse(buff, *(__m128 *)(lBR+0), z); op4s_sdl2_update_s_sse(*(__m128 *)(lBR+4), *(__m128 *)(lBR+0), *(__m128 *)(lBR+8), z); // B/L+R op4s_sdl2_scale_s_sse(buff, v_vert); *outL0 = buff[0]; *outL1 = buff[1]; *outR0 = buff[2]; *outR1 = buff[3]; } #endif #ifdef __SSE__ static void op4_fwd_sdl_2x2_fast_s( float *ptrL0, float *ptrL1, float *ptrR0, float *ptrR1, float *outL0, float *outL1, float *outR0, float *outR1, const float *w, const float *v, float *lAL, float *cAL, float *rAL, float *lAR, float *cAR, float *rAR, float *lBL, float *cBL, float *rBL, float *lBR, float *cBR, float *rBR ) { #if 0 __m128 tmp; // A op4_fwd_sdl_2x1A_s( ptrL0, ptrL1, ptrR0, ptrR1, (float *)&tmp, w, v, lAL, cAL, rAL, lAR, cAR, rAR ); // swap tmp = _mm_shuffle_ps(tmp, tmp, _MM_SHUFFLE(3,1,2,0)); // B op4_fwd_sdl_2x1B_s( (float *)&tmp, outL0, outL1, outR0, outR1, w, v, lBL, cBL, rBL, lBR, cBR, rBR ); #endif #if 0 __m128 buff; __m128 zL, zR; // FIXME: if there is a pressure for number of SSE registers, it is possible to merge zL and zR into z (get rid interleaving) // A buff[0] = *ptrL0; buff[1] = *ptrL1; buff[2] = *ptrR0; buff[3] = *ptrR1; op4s_sdl2_shuffle_input_low_s_sse(buff, *(__m128 *)cAL, *(__m128 *)rAL); op4s_sdl2_shuffle_input_high_s_sse(buff, *(__m128 *)cAR, *(__m128 *)rAR); op4s_sdl2_op_s_sse(zL, *(__m128 *)cAL, *(__m128 *)w, *(__m128 *)lAL, *(__m128 *)rAL); op4s_sdl2_op_s_sse(zR, *(__m128 *)cAR, *(__m128 *)w, *(__m128 *)lAR, *(__m128 *)rAR); op4s_sdl2_output_low_s_sse(buff, *(__m128 *)lAL, zL); op4s_sdl2_output_high_s_sse(buff, *(__m128 *)lAR, zR); op4s_sdl2_scale_s_sse(buff, *(__m128 *)v); op4s_sdl2_update_s_sse(*(__m128 *)cAL, *(__m128 *)lAL, *(__m128 *)rAL, zL); op4s_sdl2_update_s_sse(*(__m128 *)cAR, *(__m128 *)lAR, *(__m128 *)rAR, zR); // swap, this should by done by single shuffle instruction buff = _mm_shuffle_ps(buff, buff, _MM_SHUFFLE(3,1,2,0)); // B op4s_sdl2_shuffle_input_low_s_sse(buff, *(__m128 *)cBL, *(__m128 *)rBL); op4s_sdl2_shuffle_input_high_s_sse(buff, *(__m128 *)cBR, *(__m128 *)rBR); op4s_sdl2_op_s_sse(zL, *(__m128 *)cBL, *(__m128 *)w, *(__m128 *)lBL, *(__m128 *)rBL); op4s_sdl2_op_s_sse(zR, *(__m128 *)cBR, *(__m128 *)w, *(__m128 *)lBR, *(__m128 *)rBR); op4s_sdl2_output_low_s_sse(buff, *(__m128 *)lBL, zL); op4s_sdl2_output_high_s_sse(buff, *(__m128 *)lBR, zR); op4s_sdl2_scale_s_sse(buff, *(__m128 *)v); op4s_sdl2_update_s_sse(*(__m128 *)cBL, *(__m128 *)lBL, *(__m128 *)rBL, zL); op4s_sdl2_update_s_sse(*(__m128 *)cBR, *(__m128 *)lBR, *(__m128 *)rBR, zR); *outL0 = buff[0]; *outL1 = buff[1]; *outR0 = buff[2]; *outR1 = buff[3]; #endif #if 0 __m128 buff; buff[0] = *ptrL0; buff[1] = *ptrL1; buff[2] = *ptrR0; buff[3] = *ptrR1; __m128 z; __m128 t; __asm__ __volatile__( // op4s_sdl2_shuffle_input_low_s_sse(buff, *(__m128 *)cAL, *(__m128 *)rAL); "movaps %[buff], %[t] \n\t"// (t) = (buff); "shufps %[shuffle3210], %[cAL], %[t] \n\t" // (t) = _mm_shuffle_ps((t), (*(__m128 *)cAL), _MM_SHUFFLE(3,2,1,0)); "shufps %[shuffle0321], %[t], %[cAL] \n\t" // (*(__m128 *)cAL) = _mm_shuffle_ps((*(__m128 *)cAL), (t), _MM_SHUFFLE(0,3,2,1)); "shufps %[shuffle3210], %[rAL], %[t] \n\t" // (t) = _mm_shuffle_ps((t), (*(__m128 *)rAL), _MM_SHUFFLE(3,2,1,0)); "shufps %[shuffle1321], %[t], %[rAL] \n\t" // (*(__m128 *)rAL) = _mm_shuffle_ps((*(__m128 *)rAL), (t), _MM_SHUFFLE(1,3,2,1)); // op4s_sdl2_shuffle_input_high_s_sse(buff, *(__m128 *)cAR, *(__m128 *)rAR); "shufps %[shuffle3232], %[cAR], %[buff] \n\t" // (buff) = _mm_shuffle_ps( (buff), (*(__m128 *)cAR), _MM_SHUFFLE(3,2,3,2) ); "shufps %[shuffle0321], %[buff], %[cAR] \n\t" // (*(__m128 *)cAR) = _mm_shuffle_ps( (*(__m128 *)cAR), (buff), _MM_SHUFFLE(0,3,2,1) ); "shufps %[shuffle3210], %[rAR], %[buff] \n\t" // (buff) = _mm_shuffle_ps( (buff), (*(__m128 *)rAR), _MM_SHUFFLE(3,2,1,0) ); "shufps %[shuffle1321], %[buff], %[rAR] \n\t" // (*(__m128 *)rAR) = _mm_shuffle_ps( (*(__m128 *)rAR), (buff), _MM_SHUFFLE(1,3,2,1) ); // op4s_sdl2_op_s_sse(z, *(__m128 *)cAL, *(__m128 *)w, *(__m128 *)lAL, *(__m128 *)rAL); "movaps %[lAL], %[z] \n\t" // (z) = (*(__m128 *)lAL); "addps %[rAL], %[z] \n\t" // (z) = _mm_add_ps((z), (*(__m128 *)rAL)); "mulps %[w], %[z] \n\t" // (z) = _mm_mul_ps((z), (*(__m128 *)w)); "addps %[cAL], %[z] \n\t" // (z) = _mm_add_ps((z), (*(__m128 *)cAL)); // op4s_sdl2_output_low_s_sse(buff, *(__m128 *)lAL, z); "movaps %[lAL], %[buff] \n\t" // (buff) = (*(__m128 *)lAL); "unpcklps %[z], %[buff] \n\t" // (buff) = _mm_unpacklo_ps((buff), (z)); // op4s_sdl2_update_s_sse(*(__m128 *)cAL, *(__m128 *)lAL, *(__m128 *)rAL, z); "movaps %[lAL], %[cAL] \n\t" // (*(__m128 *)cAL) = ( *(__m128 *)lAL); "movaps %[rAL], %[lAL] \n\t" // ( *(__m128 *)lAL) = (*(__m128 *)rAL); "movaps %[z], %[rAL] \n\t" // (*(__m128 *)rAL) = (z); // op4s_sdl2_op_s_sse(z, *(__m128 *)cAR, *(__m128 *)w, *(__m128 *)lAR, *(__m128 *)rAR); "movaps %[lAR], %[z] \n\t" // (z) = (*(__m128 *)lAR); "addps %[rAR], %[z] \n\t" // (z) = _mm_add_ps((z), (*(__m128 *)rAR)); "mulps %[w], %[z] \n\t" // (z) = _mm_mul_ps((z), (*(__m128 *)w)); "addps %[cAR], %[z] \n\t" // (z) = _mm_add_ps((z), (*(__m128 *)cAR)); // op4s_sdl2_output_high_s_sse(buff, *(__m128 *)lAR, z); "movaps %[lAR], %[t] \n\t" // (t) = (*(__m128 *)lAR); "unpcklps %[z], %[t] \n\t" // (t) = _mm_unpacklo_ps((t), (z)); "shufps %[shuffle1010], %[t], %[buff] \n\t" // (buff) = _mm_shuffle_ps((buff), t, _MM_SHUFFLE(1,0,1,0)); // op4s_sdl2_update_s_sse(*(__m128 *)cAR, *(__m128 *)lAR, *(__m128 *)rAR, z); "movaps %[lAR], %[cAR] \n\t" // (*(__m128 *)cAR) = (*(__m128 *)lAR); "movaps %[rAR], %[lAR] \n\t" // (*(__m128 *)lAR) = (*(__m128 *)rAR); "movaps %[z], %[rAR] \n\t" // (*(__m128 *)rAR) = (z); // op4s_sdl2_scale_s_sse(buff, *(__m128 *)v); "mulps %[v], %[buff] \n\t" // (buff) = _mm_mul_ps((buff), (*(__m128 *)v)); // swap "shufps %[shuffle3120], %[buff], %[buff] \n\t" // buff = _mm_shuffle_ps(buff, buff, _MM_SHUFFLE(3,1,2,0)); : /* input/output */ // FIXME: some "&" are maybe unnecessary [t]"=&x"(t), [z]"=&x"(z), [buff]"+&x"(buff), [cAL]"+&x"(*(__m128 *)cAL), [rAL]"+&x"(*(__m128 *)rAL), [cAR]"+&x"(*(__m128 *)cAR), [rAR]"+&x"(*(__m128 *)rAR), [lAL]"+&x"(*(__m128 *)lAL), [lAR]"+&x"(*(__m128 *)lAR) : /* input only */ [w]"x"(*(__m128 *)w), [v]"x"(*(__m128 *)v), [shuffle3210]"i"(_MM_SHUFFLE(3,2,1,0)), [shuffle0321]"i"(_MM_SHUFFLE(0,3,2,1)), [shuffle1321]"i"(_MM_SHUFFLE(1,3,2,1)), [shuffle3232]"i"(_MM_SHUFFLE(3,2,3,2)), [shuffle1010]"i"(_MM_SHUFFLE(1,0,1,0)), [shuffle3120]"i"(_MM_SHUFFLE(3,1,2,0)) : /* clobbered */ ); __asm__ __volatile__( // op4s_sdl2_shuffle_input_low_s_sse(buff, *(__m128 *)cBL, *(__m128 *)rBL); "movaps %[buff], %[t] \n\t" // (t) = (buff); "shufps %[shuffle3210], %[cBL], %[t] \n\t" // (t) = _mm_shuffle_ps((t), (*(__m128 *)cBL), _MM_SHUFFLE(3,2,1,0)); "shufps %[shuffle0321], %[t], %[cBL] \n\t" // (*(__m128 *)cBL) = _mm_shuffle_ps((*(__m128 *)cBL), (t), _MM_SHUFFLE(0,3,2,1)); "shufps %[shuffle3210], %[rBL], %[t] \n\t" // (t) = _mm_shuffle_ps((t), (*(__m128 *)rBL), _MM_SHUFFLE(3,2,1,0)); "shufps %[shuffle1321], %[t], %[rBL] \n\t" // (*(__m128 *)rBL) = _mm_shuffle_ps((*(__m128 *)rBL), (t), _MM_SHUFFLE(1,3,2,1)); // op4s_sdl2_shuffle_input_high_s_sse(buff, *(__m128 *)cBR, *(__m128 *)rBR); "shufps %[shuffle3232], %[cBR], %[buff] \n\t" // (buff) = _mm_shuffle_ps( (buff), (*(__m128 *)cBR), _MM_SHUFFLE(3,2,3,2) ); "shufps %[shuffle0321], %[buff], %[cBR] \n\t" // (*(__m128 *)cBR) = _mm_shuffle_ps( (*(__m128 *)cBR), (buff), _MM_SHUFFLE(0,3,2,1) ); "shufps %[shuffle3210], %[rBR], %[buff] \n\t" // (buff) = _mm_shuffle_ps( (buff), (*(__m128 *)rBR), _MM_SHUFFLE(3,2,1,0) ); "shufps %[shuffle1321], %[buff], %[rBR] \n\t" // (*(__m128 *)rBR) = _mm_shuffle_ps( (*(__m128 *)rBR), (buff), _MM_SHUFFLE(1,3,2,1) ); // op4s_sdl2_op_s_sse(z, *(__m128 *)cBL, *(__m128 *)w, *(__m128 *)lBL, *(__m128 *)rBL); "movaps %[lBL], %[z] \n\t" // (z) = (*(__m128 *)lBL); "addps %[rBL], %[z] \n\t" // (z) = _mm_add_ps((z), (*(__m128 *)rBL)); "mulps %[w], %[z] \n\t" // (z) = _mm_mul_ps((z), (*(__m128 *)w)); "addps %[cBL], %[z] \n\t" // (z) = _mm_add_ps((z), (*(__m128 *)cBL)); // op4s_sdl2_output_low_s_sse(buff, *(__m128 *)lBL, z); "movaps %[lBL], %[buff] \n\t" // (buff) = (*(__m128 *)lBL); "unpcklps %[z], %[buff] \n\t" // (buff) = _mm_unpacklo_ps((buff), (z)); // op4s_sdl2_update_s_sse(*(__m128 *)cBL, *(__m128 *)lBL, *(__m128 *)rBL, z); "movaps %[lBL], %[cBL] \n\t" // (*(__m128 *)cBL) = (*(__m128 *)lBL); "movaps %[rBL], %[lBL] \n\t" // (*(__m128 *)lBL) = (*(__m128 *)rBL); "movaps %[z], %[rBL] \n\t" // (*(__m128 *)rBL) = (z); // op4s_sdl2_op_s_sse(z, *(__m128 *)cBR, *(__m128 *)w, *(__m128 *)lBR, *(__m128 *)rBR); "movaps %[lBR], %[z] \n\t" // (z) = (*(__m128 *)lBR); "addps %[rBR], %[z] \n\t" // (z) = _mm_add_ps((z), (*(__m128 *)rBR)); "mulps %[w], %[z] \n\t" // (z) = _mm_mul_ps((z), (*(__m128 *)w)); "addps %[cBR], %[z] \n\t" // (z) = _mm_add_ps((z), (*(__m128 *)cBR)); // op4s_sdl2_output_high_s_sse(buff, *(__m128 *)lBR, z); "movaps %[lBR], %[t] \n\t" // (t) = (*(__m128 *)lBR); "unpcklps %[z], %[t] \n\t" // (t) = _mm_unpacklo_ps((t), (z)); "shufps %[shuffle1010], %[t], %[buff] \n\t" // (buff) = _mm_shuffle_ps((buff), t, _MM_SHUFFLE(1,0,1,0)); // op4s_sdl2_update_s_sse(*(__m128 *)cBR, *(__m128 *)lBR, *(__m128 *)rBR, z); "movaps %[lBR], %[cBR] \n\t" // (*(__m128 *)cBR) = (*(__m128 *)lBR); "movaps %[rBR], %[lBR] \n\t" // (*(__m128 *)lBR) = (*(__m128 *)rBR); "movaps %[z], %[rBR] \n\t" // (*(__m128 *)rBR) = (z); // op4s_sdl2_scale_s_sse(buff, *(__m128 *)v); "mulps %[v], %[buff] \n\t" // (buff) = _mm_mul_ps((buff), (*(__m128 *)v)); : /* input/output */ [t]"=&x"(t), [z]"=&x"(z), [buff]"+&x"(buff), [cBL]"+&x"(*(__m128 *)cBL), [rBL]"+&x"(*(__m128 *)rBL), [cBR]"+&x"(*(__m128 *)cBR), [rBR]"+&x"(*(__m128 *)rBR), [lBL]"+&x"(*(__m128 *)lBL), [lBR]"+&x"(*(__m128 *)lBR) : /* input only */ [w]"x"(*(__m128 *)w), [v]"x"(*(__m128 *)v), [shuffle3210]"i"(_MM_SHUFFLE(3,2,1,0)), [shuffle0321]"i"(_MM_SHUFFLE(0,3,2,1)), [shuffle1321]"i"(_MM_SHUFFLE(1,3,2,1)), [shuffle3232]"i"(_MM_SHUFFLE(3,2,3,2)), [shuffle1010]"i"(_MM_SHUFFLE(1,0,1,0)) : /* clobbered */ ); // NOTE: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=39847 (error: more than 30 operands in ‘asm’) *outL0 = buff[0]; *outL1 = buff[1]; *outR0 = buff[2]; *outR1 = buff[3]; #endif #if 1 UNUSED(cAL); UNUSED(rAL); UNUSED(cAR); UNUSED(rAR); UNUSED(cBL); UNUSED(rBL); UNUSED(cBR); UNUSED(rBR); __m128 buff; __m128 z; buff[0] = *ptrL0; buff[1] = *ptrL1; buff[2] = *ptrR0; buff[3] = *ptrR1; // A/L+R op4s_sdl2_shuffle_input_low_s_sse(buff, *(__m128 *)(lAL+4), *(__m128 *)(lAL+8)); op4s_sdl2_shuffle_input_high_s_sse(buff, *(__m128 *)(lAR+4), *(__m128 *)(lAR+8)); // A/L op4s_sdl2_op_s_sse(z, *(__m128 *)(lAL+4), *(__m128 *)w, *(__m128 *)(lAL+0), *(__m128 *)(lAL+8)); op4s_sdl2_output_low_s_sse(buff, *(__m128 *)(lAL+0), z); op4s_sdl2_update_s_sse(*(__m128 *)(lAL+4), *(__m128 *)(lAL+0), *(__m128 *)(lAL+8), z); // A/R op4s_sdl2_op_s_sse(z, *(__m128 *)(lAR+4), *(__m128 *)w, *(__m128 *)(lAR+0), *(__m128 *)(lAR+8)); op4s_sdl2_output_high_s_sse(buff, *(__m128 *)(lAR+0), z); op4s_sdl2_update_s_sse(*(__m128 *)(lAR+4), *(__m128 *)(lAR+0), *(__m128 *)(lAR+8), z); // A/L+R op4s_sdl2_scale_s_sse(buff, *(__m128 *)v); // swap, this should by done by single shuffle instruction buff = _mm_shuffle_ps(buff, buff, _MM_SHUFFLE(3,1,2,0)); // B/L+R op4s_sdl2_shuffle_input_low_s_sse(buff, *(__m128 *)(lBL+4), *(__m128 *)(lBL+8)); op4s_sdl2_shuffle_input_high_s_sse(buff, *(__m128 *)(lBR+4), *(__m128 *)(lBR+8)); // B/L op4s_sdl2_op_s_sse(z, *(__m128 *)(lBL+4), *(__m128 *)w, *(__m128 *)(lBL+0), *(__m128 *)(lBL+8)); op4s_sdl2_output_low_s_sse(buff, *(__m128 *)(lBL+0), z); op4s_sdl2_update_s_sse(*(__m128 *)(lBL+4), *(__m128 *)(lBL+0), *(__m128 *)(lBL+8), z); // B/R op4s_sdl2_op_s_sse(z, *(__m128 *)(lBR+4), *(__m128 *)w, *(__m128 *)(lBR+0), *(__m128 *)(lBR+8)); op4s_sdl2_output_high_s_sse(buff, *(__m128 *)(lBR+0), z); op4s_sdl2_update_s_sse(*(__m128 *)(lBR+4), *(__m128 *)(lBR+0), *(__m128 *)(lBR+8), z); // B/L+R op4s_sdl2_scale_s_sse(buff, *(__m128 *)v); *outL0 = buff[0]; *outL1 = buff[1]; *outR0 = buff[2]; *outR1 = buff[3]; #endif } #endif static void op4_fwd_sdl_core_s( const float *ptr0, const float *ptr1, float *out0, float *out1, const float *w, const float *v, float *l, float *c, float *r ) { #ifndef __SSE__ float buff[2]; float t[4]; op4s_sdl_shuffle_s_ref(c, r); buff[0] = *ptr0; buff[1] = *ptr1; op4s_sdl_input_s_ref(buff, c, r); op4s_sdl_op_s_ref(t, c, w, l, r); op4s_sdl_output_s_ref(buff, l, t); op4s_sdl_scale_s_ref(buff, v); *out0 = buff[0]; *out1 = buff[1]; op4s_sdl_update_s_ref(c, l, r, t); #else // NOTE: very stupid SSE implementation __m128 buff; __m128 z; buff[0] = *ptr0; buff[1] = *ptr1; op4s_sdl2_shuffle_input_low_s_sse(buff, *(__m128 *)c, *(__m128 *)r); op4s_sdl2_op_s_sse(z, *(__m128 *)c, *(__m128 *)w, *(__m128 *)l, *(__m128 *)r); op4s_sdl2_output_low_s_sse(buff, *(__m128 *)l, z); op4s_sdl2_scale_s_sse(buff, *(__m128 *)v); op4s_sdl2_update_s_sse(*(__m128 *)c, *(__m128 *)l, *(__m128 *)r, z); *out0 = buff[0]; *out1 = buff[1]; #endif } static void op4_fwd_sdl_2x2_s( float *ptr_y0_x0, float *ptr_y0_x1, float *ptr_y1_x0, float *ptr_y1_x1, float *out_y0_x0, float *out_y0_x1, float *out_y1_x0, float *out_y1_x1, const float *w, const float *v, float *buff_y0, float *buff_y1, float *buff_x0, float *buff_x1 ) { #ifndef __SSE__ float a, b, c, d; op4_fwd_sdl_core_s( ptr_y0_x0, ptr_y0_x1, &a, &b, w, v, buff_y0+0, buff_y0+4, buff_y0+8 ); op4_fwd_sdl_core_s( ptr_y1_x0, ptr_y1_x1, &c, &d, w, v, buff_y1+0, buff_y1+4, buff_y1+8 ); op4_fwd_sdl_core_s( &a, &c, out_y0_x0, out_y1_x0, w, v, buff_x0+0, buff_x0+4, buff_x0+8 ); op4_fwd_sdl_core_s( &b, &d, out_y0_x1, out_y1_x1, w, v, buff_x1+0, buff_x1+4, buff_x1+8 ); #else op4_fwd_sdl_2x2_fast_s( ptr_y0_x0, ptr_y0_x1, ptr_y1_x0, ptr_y1_x1, out_y0_x0, out_y1_x0, out_y0_x1, out_y1_x1, w, v, buff_y0+0, buff_y0+4, buff_y0+8, buff_y1+0, buff_y1+4, buff_y1+8, buff_x0+0, buff_x0+4, buff_x0+8, buff_x1+0, buff_x1+4, buff_x1+8 ); #endif #if 0 cdf97_fwd_core2_sdl_2x2_sc_sse_s( ptr_y0_x0, ptr_y0_x1, ptr_y1_x0, ptr_y1_x1, out_y0_x0, out_y1_x0, out_y0_x1, out_y1_x1, buff_y0+0, buff_y0+4, buff_y0+8, buff_y1+0, buff_y1+4, buff_y1+8, buff_x0+0, buff_x0+4, buff_x0+8, buff_x1+0, buff_x1+4, buff_x1+8 ); #endif } static void op4_fwd_sdl_core_prolog2_2x2_s( /*const*/ float *y0x0, /*const*/ float *y0x1, /*const*/ float *y1x0, /*const*/ float *y1x1, const float *w, const float *v, float *lcr_y0, float *lcr_y1, float *lcr_x0, float *lcr_x1 ) { #ifndef __SSE__ float a, b, c, d; op4_fwd_sdl_core_s( y0x0, y0x1, &a, &b, w, v, lcr_y0+0, lcr_y0+4, lcr_y0+8 ); op4_fwd_sdl_core_s( y1x0, y1x1, &c, &d, w, v, lcr_y1+0, lcr_y1+4, lcr_y1+8 ); op4_fwd_sdl_prolog2_part_s( &a, &c, w, v, lcr_x0+0, lcr_x0+4, lcr_x0+8 ); op4_fwd_sdl_prolog2_part_s( &b, &d, w, v, lcr_x1+0, lcr_x1+4, lcr_x1+8 ); #else // A y float *lAL = lcr_y0+0; float *cAL = lcr_y0+4; float *rAL = lcr_y0+8; float *lAR = lcr_y1+0; float *cAR = lcr_y1+4; float *rAR = lcr_y1+8; // B x float *lBL = lcr_x0+0; float *cBL = lcr_x0+4; float *rBL = lcr_x0+8; float *lBR = lcr_x1+0; float *cBR = lcr_x1+4; float *rBR = lcr_x1+8; __m128 buff; __m128 zL, zR; // A buff[0] = *y0x0; buff[1] = *y0x1; buff[2] = *y1x0; buff[3] = *y1x1; op4s_sdl2_shuffle_input_low_s_sse(buff, *(__m128 *)cAL, *(__m128 *)rAL); op4s_sdl2_shuffle_input_high_s_sse(buff, *(__m128 *)cAR, *(__m128 *)rAR); op4s_sdl2_op_s_sse(zL, *(__m128 *)cAL, *(__m128 *)w, *(__m128 *)lAL, *(__m128 *)rAL); op4s_sdl2_op_s_sse(zR, *(__m128 *)cAR, *(__m128 *)w, *(__m128 *)lAR, *(__m128 *)rAR); op4s_sdl2_output_low_s_sse(buff, *(__m128 *)lAL, zL); op4s_sdl2_output_high_s_sse(buff, *(__m128 *)lAR, zR); op4s_sdl2_scale_s_sse(buff, *(__m128 *)v); op4s_sdl2_update_s_sse(*(__m128 *)cAL, *(__m128 *)lAL, *(__m128 *)rAL, zL); op4s_sdl2_update_s_sse(*(__m128 *)cAR, *(__m128 *)lAR, *(__m128 *)rAR, zR); // swap, this should by done by single shuffle instruction buff = _mm_shuffle_ps(buff, buff, _MM_SHUFFLE(3,1,2,0)); // B op4s_sdl2_shuffle_input_low_s_sse(buff, *(__m128 *)cBL, *(__m128 *)rBL); op4s_sdl2_shuffle_input_high_s_sse(buff, *(__m128 *)cBR, *(__m128 *)rBR); op4s_sdl2_op_s_sse(zL, *(__m128 *)cBL, *(__m128 *)w, *(__m128 *)lBL, *(__m128 *)rBL); op4s_sdl2_op_s_sse(zR, *(__m128 *)cBR, *(__m128 *)w, *(__m128 *)lBR, *(__m128 *)rBR); op4s_sdl2_update_s_sse(*(__m128 *)cBL, *(__m128 *)lBL, *(__m128 *)rBL, zL); op4s_sdl2_update_s_sse(*(__m128 *)cBR, *(__m128 *)lBR, *(__m128 *)rBR, zR); #endif } static void op4_fwd_sdl_core_prolog2_2x1_s( const float *y0x0, const float *y0x1, float *w, float *v, float *lcr_y0, float *lcr_x0, float *lcr_x1, int idx ) { op4_fwd_sdl_core_s( y0x0, y0x1, (lcr_x0+0+idx), (lcr_x1+0+idx), w, v, lcr_y0+0, lcr_y0+4, lcr_y0+8 ); } static void op4_fwd_sdl_prolog2_fast_s( float *ptr0, float *ptr1, float *ptr2, float *ptr3, float *ptr4, float *ptr5, float *ptr6, float *ptr7, float *ptr8, float *ptr9, const float *w, const float *v, float *lcr ) { // part0 // prolog2: import(3) (lcr+0)[3] = *ptr3; // base+3 // part1 // prolog2: pass-prolog op4_fwd_sdl_prolog2_part_s( ptr4, ptr5, w, v, lcr+0, lcr+4, lcr+8 ); // part2 // prolog2: import(2) (lcr+0)[2] = *ptr2; // base+2 // part3 // prolog2: pass-prolog op4_fwd_sdl_prolog2_part_s( ptr6, ptr7, w, v, lcr+0, lcr+4, lcr+8 ); // part4 // prolog2: import(1) (lcr+0)[1] = *ptr1; // base+1 // part5 // prolog2: pass-prolog op4_fwd_sdl_prolog2_part_s( ptr8, ptr9, w, v, lcr+0, lcr+4, lcr+8 ); // part6 // prolog2: import(0) (lcr+0)[0] = *ptr0; // base+0 } static void op4_fwd_sdl_s(float *ptr, int stride, int steps, const float *w, const float *v) { float lcr[12]; float *l = lcr+0; float *c = lcr+4; float *r = lcr+8; op4_fwd_sdl_prolog2_fast_s( addr1_s(ptr, 0, stride), addr1_s(ptr, 1, stride), addr1_s(ptr, 2, stride), addr1_s(ptr, 3, stride), addr1_s(ptr, 4, stride), addr1_s(ptr, 5, stride), addr1_s(ptr, 6, stride), addr1_s(ptr, 7, stride), addr1_s(ptr, 8, stride), addr1_s(ptr, 9, stride), w, v, lcr ); for(int s = 0; s < steps-3; s++) { op4_fwd_sdl_core_s( addr1_s(ptr, 10, stride), addr1_s(ptr, 11, stride), addr1_s(ptr, 0, stride), addr1_s(ptr, 1, stride), w, v, l, c, r ); ptr = addr1_s(ptr, +2, stride); } op4_fwd_sdl_epilog2_fast_s( addr1_s(ptr, 0, stride), addr1_s(ptr, 1, stride), addr1_s(ptr, 2, stride), addr1_s(ptr, 3, stride), addr1_s(ptr, 4, stride), addr1_s(ptr, 5, stride), addr1_s(ptr, 6, stride), addr1_s(ptr, 7, stride), addr1_s(ptr, 8, stride), addr1_s(ptr, 9, stride), w, v, lcr ); } static void op4_fwd_sdl_epilog2_prolog2_10x1_s( const float *w, const float *v, float *lcr_y, float *lcr_x, int idx ) { #if 0 float a[10]; op4_fwd_sdl_epilog2_fast_s( a+0, a+1, a+2, a+3, a+4, a+5, a+6, a+7, a+8, a+9, w, v, lcr_y ); for(int i = 0; i < 10; i++) { op4_fwd_sdl_prolog2_import_s( a+i, lcr_x + i*12, idx ); } #else // epilog2: export(3) *(lcr_x+9*12+0+idx) = *(lcr_y+0+3); // epilog2: pass-epilog op4_fwd_sdl_epilog2_part_s( lcr_x+0*12+0+idx, lcr_x+1*12+0+idx, w, v, (lcr_y+0), (lcr_y+4), (lcr_y+8) ); // epilog2: export(2) *(lcr_x+8*12+0+idx) = *(lcr_y+0+2); // epilog2: pass-epilog op4_fwd_sdl_epilog2_part_s( lcr_x+2*12+0+idx, lcr_x+3*12+0+idx, w, v, (lcr_y+0), (lcr_y+4), (lcr_y+8) ); // epilog2: export(1) *(lcr_x+7*12+0+idx) = *(lcr_y+0+1); // epilog2: pass-epilog op4_fwd_sdl_epilog2_part_s( lcr_x+4*12+0+idx, lcr_x+5*12+0+idx, w, v, (lcr_y+0), (lcr_y+4), (lcr_y+8) ); // epilog2: export(0) *(lcr_x+6*12+0+idx) = *(lcr_y+0+0); #endif } static void op4_fwd_sdl_epilog2_prolog2_2x2_s( const float *w, const float *v, float *y0_lcr, float *y1_lcr, float *x0_lcr, float *x1_lcr ) { #ifndef __SSE__ float tmp[4]; op4_fwd_sdl_epilog2_part_s( tmp+0, tmp+1, w, v, (y0_lcr+0), (y0_lcr+4), (y0_lcr+8) ); op4_fwd_sdl_epilog2_part_s( tmp+2, tmp+3, w, v, (y1_lcr+0), (y1_lcr+4), (y1_lcr+8) ); op4_fwd_sdl_prolog2_part_s( tmp+0, tmp+2, w, v, x0_lcr+0, x0_lcr+4, x0_lcr+8 ); op4_fwd_sdl_prolog2_part_s( tmp+1, tmp+3, w, v, x1_lcr+0, x1_lcr+4, x1_lcr+8 ); #else __m128 tmp; __m128 z0, z1; // can be interleaved with single "z" op4s_sdl2_shuffle_s_sse(*(__m128 *)(y0_lcr+4), *(__m128 *)(y0_lcr+8)); op4s_sdl2_shuffle_s_sse(*(__m128 *)(y1_lcr+4), *(__m128 *)(y1_lcr+8)); op4s_sdl2_op_s_sse(z0, *(__m128 *)(y0_lcr+4), *(__m128 *)w, *(__m128 *)(y0_lcr+0), *(__m128 *)(y0_lcr+8)); op4s_sdl2_op_s_sse(z1, *(__m128 *)(y1_lcr+4), *(__m128 *)w, *(__m128 *)(y1_lcr+0), *(__m128 *)(y1_lcr+8)); op4s_sdl2_output_low_s_sse(tmp, *(__m128 *)(y0_lcr+0), z0); op4s_sdl2_output_high_s_sse(tmp, *(__m128 *)(y1_lcr+0), z1); op4s_sdl2_scale_s_sse(tmp, *(__m128 *)v); op4s_sdl2_update_s_sse(*(__m128 *)(y0_lcr+4), *(__m128 *)(y0_lcr+0), *(__m128 *)(y0_lcr+8), z0); op4s_sdl2_update_s_sse(*(__m128 *)(y1_lcr+4), *(__m128 *)(y1_lcr+0), *(__m128 *)(y1_lcr+8), z1); // swap tmp = _mm_shuffle_ps(tmp, tmp, _MM_SHUFFLE(3,1,2,0)); op4s_sdl2_shuffle_input_low_s_sse(tmp, *(__m128 *)(x0_lcr+4), *(__m128 *)(x0_lcr+8)); op4s_sdl2_shuffle_input_high_s_sse(tmp, *(__m128 *)(x1_lcr+4), *(__m128 *)(x1_lcr+8)); op4s_sdl2_op_s_sse(z0, *(__m128 *)(x0_lcr+4), *(__m128 *)w, *(__m128 *)(x0_lcr+0), *(__m128 *)(x0_lcr+8)); op4s_sdl2_op_s_sse(z1, *(__m128 *)(x1_lcr+4), *(__m128 *)w, *(__m128 *)(x1_lcr+0), *(__m128 *)(x1_lcr+8)); op4s_sdl2_update_s_sse(*(__m128 *)(x0_lcr+4), *(__m128 *)(x0_lcr+0), *(__m128 *)(x0_lcr+8), z0); op4s_sdl2_update_s_sse(*(__m128 *)(x1_lcr+4), *(__m128 *)(x1_lcr+0), *(__m128 *)(x1_lcr+8), z1); #endif } static void op4_fwd_sdl_epilog2_prolog2_10x2_s( float *w, float *v, float *y0_lcr, float *y1_lcr, float *x0_lcr ) { #if 0 float a0[10], a1[10]; op4_fwd_sdl_epilog2_fast_s( a0+0, a0+1, a0+2, a0+3, a0+4, a0+5, a0+6, a0+7, a0+8, a0+9, w, v, y0_lcr ); op4_fwd_sdl_epilog2_fast_s( a1+0, a1+1, a1+2, a1+3, a1+4, a1+5, a1+6, a1+7, a1+8, a1+9, w, v, y1_lcr ); for(int i = 0; i < 10; i++) { op4_fwd_sdl_prolog2_part_s( a0+i, a1+i, w, v, x0_lcr+i*12+0, x0_lcr+i*12+4, x0_lcr+i*12+8 ); } #else // epilog2: export(3) op4_fwd_sdl_prolog2_part_s( (y0_lcr+0+3), (y1_lcr+0+3), w, v, x0_lcr+9*12+0, x0_lcr+9*12+4, x0_lcr+9*12+8 ); // epilog2: pass-epilog op4_fwd_sdl_epilog2_prolog2_2x2_s( w, v, y0_lcr, y1_lcr, x0_lcr+0*12, x0_lcr+1*12 ); // epilog2: export(2) op4_fwd_sdl_prolog2_part_s( (y0_lcr+0+2), (y1_lcr+0+2), w, v, x0_lcr+8*12+0, x0_lcr+8*12+4, x0_lcr+8*12+8 ); // epilog2: pass-epilog op4_fwd_sdl_epilog2_prolog2_2x2_s( w, v, y0_lcr, y1_lcr, x0_lcr+2*12, x0_lcr+3*12 ); // epilog2: export(1) op4_fwd_sdl_prolog2_part_s( (y0_lcr+0+1), (y1_lcr+0+1), w, v, x0_lcr+7*12+0, x0_lcr+7*12+4, x0_lcr+7*12+8 ); // epilog2: pass-epilog op4_fwd_sdl_epilog2_prolog2_2x2_s( w, v, y0_lcr, y1_lcr, x0_lcr+4*12, x0_lcr+5*12 ); // epilog2: export(0) op4_fwd_sdl_prolog2_part_s( (y0_lcr+0+0), (y1_lcr+0+0), w, v, x0_lcr+6*12+0, x0_lcr+6*12+4, x0_lcr+6*12+8 ); #endif } // TODO: improve single-loop approach void dwt_cdf97_2f_inplace_sdl_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding) { FUNC_BEGIN; assert( 1 == dwt_util_get_num_workers() ); const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); int j = 0; const int j_limit = ceil_log2( decompose_one ? size_o_big_max : size_o_big_min ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; const int offset = 1; for(;;) { if( *j_max_ptr == j ) break; // const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); // const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); // const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); // const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j ); const int stride_y_j = stride_y * (1 << (j)); const int stride_x_j = stride_x * (1 << (j)); const int size_x = size_i_src_x; const int size_y = size_i_src_y; const int pairs_x = (to_even(size_x-offset)-4)/2; const int pairs_y = (to_even(size_y-offset)-4)/2; // const int max_y = to_even(size_y-offset)+offset; if( size_x > 1 && size_x < 5 ) { for(int y = 0; y < size_y; y++) { dwt_cdf97_f_ex_stride_inplace_part_exceptions_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x, // N stride_y_j); } } if( size_y > 1 && size_y < 5 ) { for(int x = 0; x < size_x; x++) { dwt_cdf97_f_ex_stride_inplace_part_exceptions_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y, // N stride_x_j); } } if( size_x > 1 && size_x >= 5 ) { for(int y = 0; y < size_y; y++) { dwt_cdf97_f_ex_stride_inplace_part_prolog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x, // N stride_y_j); } } if( size_y > 1 && size_y >= 5 ) { for(int x = 0; x < size_x; x++) { dwt_cdf97_f_ex_stride_inplace_part_prolog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y, // N stride_x_j); } } const int offset = 1; //#define DISABLE_SLOW //#define DISABLE_CORE if( size_x > 1 && size_x >= 5 && size_y > 1 && size_y >= 5 // single-loop block && pairs_x > 3 && pairs_y > 3 // exception (steps<3) ) { const float w[4] __attribute__ ((aligned (16))) = { dwt_cdf97_u2_s, -dwt_cdf97_p2_s, dwt_cdf97_u1_s, -dwt_cdf97_p1_s }; const float v[4] __attribute__ ((aligned (16))) = { 1/dwt_cdf97_s1_s, dwt_cdf97_s1_s, 1/dwt_cdf97_s1_s, dwt_cdf97_s1_s }; // single const int prolog2_coeffs = 10; // const int epilog2_coeffs = 10; int max_y = to_even(size_y - offset) + offset; float y0_lcr[12] __attribute__ ((aligned (16))); // float *y0_l __attribute__ ((aligned (16))) = &y0_lcr[0]; // float *y0_c __attribute__ ((aligned (16))) = &y0_lcr[4]; // float *y0_r __attribute__ ((aligned (16))) = &y0_lcr[8]; float y1_lcr[12] __attribute__ ((aligned (16))); // float *y1_l __attribute__ ((aligned (16))) = &y1_lcr[0]; // float *y1_c __attribute__ ((aligned (16))) = &y1_lcr[4]; // float *y1_r __attribute__ ((aligned (16))) = &y1_lcr[8]; float x0_lcr[4*3*size_x] __attribute__ ((aligned (16))); // FIXME: long array on the stack // horizontal full + vertical none (offset) for(int y = 0; y < 0+offset; y++) { float *ptr0_x = addr2_s(ptr, y, offset, stride_x_j, stride_y_j); op4_fwd_sdl_s(ptr0_x, stride_y_j, pairs_x, w, v); // FIXME: slow } // horizontal full + vertical prolog2 (10) #if 1 for(int y = 0+offset; y < 0+offset+prolog2_coeffs; y++) { float *ptr0_x = addr2_s(ptr, y+0, offset, stride_x_j, stride_y_j); op4_fwd_sdl_s(ptr0_x, stride_y_j, pairs_x, w, v); } for(int x = 0; x < size_x; x++) { float *ptr0_y = addr2_s(ptr, offset, x, stride_x_j, stride_y_j); op4_fwd_sdl_prolog2_s( addr1_s(ptr0_y, 0, stride_x_j), addr1_s(ptr0_y, 1, stride_x_j), addr1_s(ptr0_y, 2, stride_x_j), addr1_s(ptr0_y, 3, stride_x_j), addr1_s(ptr0_y, 4, stride_x_j), addr1_s(ptr0_y, 5, stride_x_j), addr1_s(ptr0_y, 6, stride_x_j), addr1_s(ptr0_y, 7, stride_x_j), addr1_s(ptr0_y, 8, stride_x_j), addr1_s(ptr0_y, 9, stride_x_j), w, v, &x0_lcr[12*x+0], &x0_lcr[12*x+4], &x0_lcr[12*x+8] ); } #else // part0 { int row = 3; int idx = 3; int y0 = 0+offset+row; float *buff0_x0_lcr = x0_lcr; float *ptr0_x = addr2_s(ptr, y0+0, 0, stride_x_j, stride_y_j); for(int x = 0; x < offset; x++) { op4_fwd_sdl_prolog2_import_s( ptr0_x, buff0_x0_lcr, idx ); buff0_x0_lcr += 1*12; ptr0_x = addr1_s(ptr0_x, +1, stride_y_j); } op4_fwd_sdl_prolog2_fast_s( addr1_s(ptr0_x, 0, stride_y_j), addr1_s(ptr0_x, 1, stride_y_j), addr1_s(ptr0_x, 2, stride_y_j), addr1_s(ptr0_x, 3, stride_y_j), addr1_s(ptr0_x, 4, stride_y_j), addr1_s(ptr0_x, 5, stride_y_j), addr1_s(ptr0_x, 6, stride_y_j), addr1_s(ptr0_x, 7, stride_y_j), addr1_s(ptr0_x, 8, stride_y_j), addr1_s(ptr0_x, 9, stride_y_j), w, v, y0_lcr ); for(int s = 0; s < pairs_x-3; s++) { op4_fwd_sdl_core_prolog2_2x1_s( addr1_s(ptr0_x, 10, stride_y_j), addr1_s(ptr0_x, 11, stride_y_j), w, v, y0_lcr, buff0_x0_lcr, buff0_x0_lcr+12, idx ); buff0_x0_lcr += 2*12; ptr0_x = addr1_s(ptr0_x, +2, stride_y_j); } op4_fwd_sdl_epilog2_prolog2_10x1_s( w, v, y0_lcr, buff0_x0_lcr, idx ); buff0_x0_lcr += 10*12; ptr0_x = addr1_s(ptr0_x, +10, stride_y_j); if( is_even(size_x) ) { op4_fwd_sdl_prolog2_import_s( ptr0_x, buff0_x0_lcr, idx ); } } // part1 { int row0 = 4; int row1 = 5; int y0 = 0+offset+row0; int y1 = 0+offset+row1; float *buff0_x0_lcr = x0_lcr; float *ptr0_x = addr2_s(ptr, y0+0, 0, stride_x_j, stride_y_j); float *ptr1_x = addr2_s(ptr, y1+0, 0, stride_x_j, stride_y_j); for(int x = 0; x < offset; x++) { op4_fwd_sdl_prolog2_part_s( ptr0_x, ptr1_x, w, v, buff0_x0_lcr+0, buff0_x0_lcr+4, buff0_x0_lcr+8 ); } buff0_x0_lcr += offset*12; ptr0_x = addr1_s(ptr0_x, offset, stride_y_j); ptr1_x = addr1_s(ptr1_x, offset, stride_y_j); // FIXME: merge these into single function op4_fwd_sdl_prolog2_fast_s( addr1_s(ptr0_x, 0, stride_y_j), addr1_s(ptr0_x, 1, stride_y_j), addr1_s(ptr0_x, 2, stride_y_j), addr1_s(ptr0_x, 3, stride_y_j), addr1_s(ptr0_x, 4, stride_y_j), addr1_s(ptr0_x, 5, stride_y_j), addr1_s(ptr0_x, 6, stride_y_j), addr1_s(ptr0_x, 7, stride_y_j), addr1_s(ptr0_x, 8, stride_y_j), addr1_s(ptr0_x, 9, stride_y_j), w, v, y0_lcr ); op4_fwd_sdl_prolog2_fast_s( addr1_s(ptr1_x, 0, stride_y_j), addr1_s(ptr1_x, 1, stride_y_j), addr1_s(ptr1_x, 2, stride_y_j), addr1_s(ptr1_x, 3, stride_y_j), addr1_s(ptr1_x, 4, stride_y_j), addr1_s(ptr1_x, 5, stride_y_j), addr1_s(ptr1_x, 6, stride_y_j), addr1_s(ptr1_x, 7, stride_y_j), addr1_s(ptr1_x, 8, stride_y_j), addr1_s(ptr1_x, 9, stride_y_j), w, v, y1_lcr ); for(int s = 0; s < pairs_x-3; s++) { op4_fwd_sdl_core_prolog2_2x2_s( addr1_s(ptr0_x, 10, stride_y_j), addr1_s(ptr0_x, 11, stride_y_j), addr1_s(ptr1_x, 10, stride_y_j), addr1_s(ptr1_x, 11, stride_y_j), w, v, y0_lcr, y1_lcr, buff0_x0_lcr, buff0_x0_lcr+12 ); buff0_x0_lcr += 2*12; ptr0_x = addr1_s(ptr0_x, +2, stride_y_j); ptr1_x = addr1_s(ptr1_x, +2, stride_y_j); } op4_fwd_sdl_epilog2_prolog2_10x2_s( w, v, y0_lcr, y1_lcr, buff0_x0_lcr ); buff0_x0_lcr += 10*12; ptr0_x = addr1_s(ptr0_x, +10, stride_y_j); ptr1_x = addr1_s(ptr1_x, +10, stride_y_j); if( is_even(size_x) ) { op4_fwd_sdl_prolog2_part_s( ptr0_x, ptr1_x, w, v, buff0_x0_lcr+0, buff0_x0_lcr+4, buff0_x0_lcr+8 ); } } // part2 { int row = 2; int idx = 2; int y0 = 0+offset+row; float *buff0_x0_lcr = x0_lcr; float *ptr0_x = addr2_s(ptr, y0+0, 0, stride_x_j, stride_y_j); for(int x = 0; x < offset; x++) { op4_fwd_sdl_prolog2_import_s( ptr0_x, buff0_x0_lcr, idx ); buff0_x0_lcr += 1*12; ptr0_x = addr1_s(ptr0_x, +1, stride_y_j); } op4_fwd_sdl_prolog2_fast_s( addr1_s(ptr0_x, 0, stride_y_j), addr1_s(ptr0_x, 1, stride_y_j), addr1_s(ptr0_x, 2, stride_y_j), addr1_s(ptr0_x, 3, stride_y_j), addr1_s(ptr0_x, 4, stride_y_j), addr1_s(ptr0_x, 5, stride_y_j), addr1_s(ptr0_x, 6, stride_y_j), addr1_s(ptr0_x, 7, stride_y_j), addr1_s(ptr0_x, 8, stride_y_j), addr1_s(ptr0_x, 9, stride_y_j), w, v, y0_lcr ); for(int s = 0; s < pairs_x-3; s++) { op4_fwd_sdl_core_prolog2_2x1_s( addr1_s(ptr0_x, 10, stride_y_j), addr1_s(ptr0_x, 11, stride_y_j), w, v, y0_lcr, buff0_x0_lcr, buff0_x0_lcr+12, idx ); buff0_x0_lcr += 2*12; ptr0_x = addr1_s(ptr0_x, +2, stride_y_j); } op4_fwd_sdl_epilog2_prolog2_10x1_s( w, v, y0_lcr, buff0_x0_lcr, idx ); buff0_x0_lcr += 10*12; ptr0_x = addr1_s(ptr0_x, +10, stride_y_j); if( is_even(size_x) ) { op4_fwd_sdl_prolog2_import_s( ptr0_x, buff0_x0_lcr, idx ); } } // part3 { int row0 = 6; int row1 = 7; int y0 = 0+offset+row0; int y1 = 0+offset+row1; float *buff0_x0_lcr = x0_lcr; float *ptr0_x = addr2_s(ptr, y0+0, 0, stride_x_j, stride_y_j); float *ptr1_x = addr2_s(ptr, y1+0, 0, stride_x_j, stride_y_j); for(int x = 0; x < offset; x++) { op4_fwd_sdl_prolog2_part_s( ptr0_x, ptr1_x, w, v, buff0_x0_lcr+0, buff0_x0_lcr+4, buff0_x0_lcr+8 ); } buff0_x0_lcr += offset*12; ptr0_x = addr1_s(ptr0_x, offset, stride_y_j); ptr1_x = addr1_s(ptr1_x, offset, stride_y_j); // FIXME: merge these into single function op4_fwd_sdl_prolog2_fast_s( addr1_s(ptr0_x, 0, stride_y_j), addr1_s(ptr0_x, 1, stride_y_j), addr1_s(ptr0_x, 2, stride_y_j), addr1_s(ptr0_x, 3, stride_y_j), addr1_s(ptr0_x, 4, stride_y_j), addr1_s(ptr0_x, 5, stride_y_j), addr1_s(ptr0_x, 6, stride_y_j), addr1_s(ptr0_x, 7, stride_y_j), addr1_s(ptr0_x, 8, stride_y_j), addr1_s(ptr0_x, 9, stride_y_j), w, v, y0_lcr ); op4_fwd_sdl_prolog2_fast_s( addr1_s(ptr1_x, 0, stride_y_j), addr1_s(ptr1_x, 1, stride_y_j), addr1_s(ptr1_x, 2, stride_y_j), addr1_s(ptr1_x, 3, stride_y_j), addr1_s(ptr1_x, 4, stride_y_j), addr1_s(ptr1_x, 5, stride_y_j), addr1_s(ptr1_x, 6, stride_y_j), addr1_s(ptr1_x, 7, stride_y_j), addr1_s(ptr1_x, 8, stride_y_j), addr1_s(ptr1_x, 9, stride_y_j), w, v, y1_lcr ); for(int s = 0; s < pairs_x-3; s++) { op4_fwd_sdl_core_prolog2_2x2_s( addr1_s(ptr0_x, 10, stride_y_j), addr1_s(ptr0_x, 11, stride_y_j), addr1_s(ptr1_x, 10, stride_y_j), addr1_s(ptr1_x, 11, stride_y_j), w, v, y0_lcr, y1_lcr, buff0_x0_lcr, buff0_x0_lcr+12 ); buff0_x0_lcr += 2*12; ptr0_x = addr1_s(ptr0_x, +2, stride_y_j); ptr1_x = addr1_s(ptr1_x, +2, stride_y_j); } op4_fwd_sdl_epilog2_prolog2_10x2_s( w, v, y0_lcr, y1_lcr, buff0_x0_lcr ); buff0_x0_lcr += 10*12; ptr0_x = addr1_s(ptr0_x, +10, stride_y_j); ptr1_x = addr1_s(ptr1_x, +10, stride_y_j); if( is_even(size_x) ) { op4_fwd_sdl_prolog2_part_s( ptr0_x, ptr1_x, w, v, buff0_x0_lcr+0, buff0_x0_lcr+4, buff0_x0_lcr+8 ); } } // part4 { int row = 1; int idx = 1; int y0 = 0+offset+row; float *buff0_x0_lcr = x0_lcr; float *ptr0_x = addr2_s(ptr, y0+0, 0, stride_x_j, stride_y_j); for(int x = 0; x < offset; x++) { op4_fwd_sdl_prolog2_import_s( ptr0_x, buff0_x0_lcr, idx ); buff0_x0_lcr += 1*12; ptr0_x = addr1_s(ptr0_x, +1, stride_y_j); } op4_fwd_sdl_prolog2_fast_s( addr1_s(ptr0_x, 0, stride_y_j), addr1_s(ptr0_x, 1, stride_y_j), addr1_s(ptr0_x, 2, stride_y_j), addr1_s(ptr0_x, 3, stride_y_j), addr1_s(ptr0_x, 4, stride_y_j), addr1_s(ptr0_x, 5, stride_y_j), addr1_s(ptr0_x, 6, stride_y_j), addr1_s(ptr0_x, 7, stride_y_j), addr1_s(ptr0_x, 8, stride_y_j), addr1_s(ptr0_x, 9, stride_y_j), w, v, y0_lcr ); for(int s = 0; s < pairs_x-3; s++) { op4_fwd_sdl_core_prolog2_2x1_s( addr1_s(ptr0_x, 10, stride_y_j), addr1_s(ptr0_x, 11, stride_y_j), w, v, y0_lcr, buff0_x0_lcr, buff0_x0_lcr+12, idx ); buff0_x0_lcr += 2*12; ptr0_x = addr1_s(ptr0_x, +2, stride_y_j); } op4_fwd_sdl_epilog2_prolog2_10x1_s( w, v, y0_lcr, buff0_x0_lcr, idx ); buff0_x0_lcr += 10*12; ptr0_x = addr1_s(ptr0_x, +10, stride_y_j); if( is_even(size_x) ) { op4_fwd_sdl_prolog2_import_s( ptr0_x, buff0_x0_lcr, idx ); } } // part5 { int row0 = 8; int row1 = 9; int y0 = 0+offset+row0; int y1 = 0+offset+row1; float *buff0_x0_lcr = x0_lcr; float *ptr0_x = addr2_s(ptr, y0+0, 0, stride_x_j, stride_y_j); float *ptr1_x = addr2_s(ptr, y1+0, 0, stride_x_j, stride_y_j); for(int x = 0; x < offset; x++) { op4_fwd_sdl_prolog2_part_s( ptr0_x, ptr1_x, w, v, buff0_x0_lcr+0, buff0_x0_lcr+4, buff0_x0_lcr+8 ); } buff0_x0_lcr += offset*12; ptr0_x = addr1_s(ptr0_x, offset, stride_y_j); ptr1_x = addr1_s(ptr1_x, offset, stride_y_j); // FIXME: merge these into single function op4_fwd_sdl_prolog2_fast_s( addr1_s(ptr0_x, 0, stride_y_j), addr1_s(ptr0_x, 1, stride_y_j), addr1_s(ptr0_x, 2, stride_y_j), addr1_s(ptr0_x, 3, stride_y_j), addr1_s(ptr0_x, 4, stride_y_j), addr1_s(ptr0_x, 5, stride_y_j), addr1_s(ptr0_x, 6, stride_y_j), addr1_s(ptr0_x, 7, stride_y_j), addr1_s(ptr0_x, 8, stride_y_j), addr1_s(ptr0_x, 9, stride_y_j), w, v, y0_lcr ); op4_fwd_sdl_prolog2_fast_s( addr1_s(ptr1_x, 0, stride_y_j), addr1_s(ptr1_x, 1, stride_y_j), addr1_s(ptr1_x, 2, stride_y_j), addr1_s(ptr1_x, 3, stride_y_j), addr1_s(ptr1_x, 4, stride_y_j), addr1_s(ptr1_x, 5, stride_y_j), addr1_s(ptr1_x, 6, stride_y_j), addr1_s(ptr1_x, 7, stride_y_j), addr1_s(ptr1_x, 8, stride_y_j), addr1_s(ptr1_x, 9, stride_y_j), w, v, y1_lcr ); for(int s = 0; s < pairs_x-3; s++) { op4_fwd_sdl_core_prolog2_2x2_s( addr1_s(ptr0_x, 10, stride_y_j), addr1_s(ptr0_x, 11, stride_y_j), addr1_s(ptr1_x, 10, stride_y_j), addr1_s(ptr1_x, 11, stride_y_j), w, v, y0_lcr, y1_lcr, buff0_x0_lcr, buff0_x0_lcr+12 ); buff0_x0_lcr += 2*12; ptr0_x = addr1_s(ptr0_x, +2, stride_y_j); ptr1_x = addr1_s(ptr1_x, +2, stride_y_j); } op4_fwd_sdl_epilog2_prolog2_10x2_s( w, v, y0_lcr, y1_lcr, buff0_x0_lcr ); buff0_x0_lcr += 10*12; ptr0_x = addr1_s(ptr0_x, +10, stride_y_j); ptr1_x = addr1_s(ptr1_x, +10, stride_y_j); if( is_even(size_x) ) { op4_fwd_sdl_prolog2_part_s( ptr0_x, ptr1_x, w, v, buff0_x0_lcr+0, buff0_x0_lcr+4, buff0_x0_lcr+8 ); } } // part6 { int row = 0; int idx = 0; int y0 = 0+offset+row; float *buff0_x0_lcr = x0_lcr; float *ptr0_x = addr2_s(ptr, y0+0, 0, stride_x_j, stride_y_j); for(int x = 0; x < offset; x++) { op4_fwd_sdl_prolog2_import_s( ptr0_x, buff0_x0_lcr, idx ); buff0_x0_lcr += 1*12; ptr0_x = addr1_s(ptr0_x, +1, stride_y_j); } op4_fwd_sdl_prolog2_fast_s( addr1_s(ptr0_x, 0, stride_y_j), addr1_s(ptr0_x, 1, stride_y_j), addr1_s(ptr0_x, 2, stride_y_j), addr1_s(ptr0_x, 3, stride_y_j), addr1_s(ptr0_x, 4, stride_y_j), addr1_s(ptr0_x, 5, stride_y_j), addr1_s(ptr0_x, 6, stride_y_j), addr1_s(ptr0_x, 7, stride_y_j), addr1_s(ptr0_x, 8, stride_y_j), addr1_s(ptr0_x, 9, stride_y_j), w, v, y0_lcr ); for(int s = 0; s < pairs_x-3; s++) { op4_fwd_sdl_core_prolog2_2x1_s( addr1_s(ptr0_x, 10, stride_y_j), addr1_s(ptr0_x, 11, stride_y_j), w, v, y0_lcr, buff0_x0_lcr, buff0_x0_lcr+12, idx ); buff0_x0_lcr += 2*12; ptr0_x = addr1_s(ptr0_x, +2, stride_y_j); } op4_fwd_sdl_epilog2_prolog2_10x1_s( w, v, y0_lcr, buff0_x0_lcr, idx ); buff0_x0_lcr += 10*12; ptr0_x = addr1_s(ptr0_x, +10, stride_y_j); if( is_even(size_x) ) { op4_fwd_sdl_prolog2_import_s( ptr0_x, buff0_x0_lcr, idx ); } } #endif // horizontal full + vertical core (*) for(int y = 0+offset+prolog2_coeffs; y < max_y; y+=2) { float *ptr0_x = addr2_s(ptr, y+0, offset, stride_x_j, stride_y_j); float *ptr1_x = addr2_s(ptr, y+1, offset, stride_x_j, stride_y_j); float *out0_x = addr2_s(ptr, y+0-10, offset, stride_x_j, stride_y_j); float *out1_x = addr2_s(ptr, y+1-10, offset, stride_x_j, stride_y_j); op4_fwd_sdl_prolog2_fast_s( addr1_s(ptr0_x, 0, stride_y_j), addr1_s(ptr0_x, 1, stride_y_j), addr1_s(ptr0_x, 2, stride_y_j), addr1_s(ptr0_x, 3, stride_y_j), addr1_s(ptr0_x, 4, stride_y_j), addr1_s(ptr0_x, 5, stride_y_j), addr1_s(ptr0_x, 6, stride_y_j), addr1_s(ptr0_x, 7, stride_y_j), addr1_s(ptr0_x, 8, stride_y_j), addr1_s(ptr0_x, 9, stride_y_j), w, v, y0_lcr ); op4_fwd_sdl_prolog2_fast_s( addr1_s(ptr1_x, 0, stride_y_j), addr1_s(ptr1_x, 1, stride_y_j), addr1_s(ptr1_x, 2, stride_y_j), addr1_s(ptr1_x, 3, stride_y_j), addr1_s(ptr1_x, 4, stride_y_j), addr1_s(ptr1_x, 5, stride_y_j), addr1_s(ptr1_x, 6, stride_y_j), addr1_s(ptr1_x, 7, stride_y_j), addr1_s(ptr1_x, 8, stride_y_j), addr1_s(ptr1_x, 9, stride_y_j), w, v, y1_lcr ); float *buff_x0_lcr = x0_lcr+12*(offset+0); for(int s = 0; s < pairs_x-3; s++) { #ifndef DISABLE_CORE op4_fwd_sdl_2x2_s( addr1_s(ptr0_x, 10, stride_y_j), addr1_s(ptr0_x, 11, stride_y_j), addr1_s(ptr1_x, 10, stride_y_j), addr1_s(ptr1_x, 11, stride_y_j), addr1_s(out0_x, 0, stride_y_j), addr1_s(out0_x, 1, stride_y_j), addr1_s(out1_x, 0, stride_y_j), addr1_s(out1_x, 1, stride_y_j), w, v, y0_lcr, y1_lcr, buff_x0_lcr+0, buff_x0_lcr+12 ); #endif buff_x0_lcr += 2*12; ptr0_x = addr1_s(ptr0_x, +2, stride_y_j); ptr1_x = addr1_s(ptr1_x, +2, stride_y_j); out0_x = addr1_s(out0_x, +2, stride_y_j); out1_x = addr1_s(out1_x, +2, stride_y_j); } op4_fwd_sdl_epilog2_fast_s( addr1_s(ptr0_x, 0, stride_y_j), addr1_s(ptr0_x, 1, stride_y_j), addr1_s(ptr0_x, 2, stride_y_j), addr1_s(ptr0_x, 3, stride_y_j), addr1_s(ptr0_x, 4, stride_y_j), addr1_s(ptr0_x, 5, stride_y_j), addr1_s(ptr0_x, 6, stride_y_j), addr1_s(ptr0_x, 7, stride_y_j), addr1_s(ptr0_x, 8, stride_y_j), addr1_s(ptr0_x, 9, stride_y_j), w, v, y0_lcr ); op4_fwd_sdl_epilog2_fast_s( addr1_s(ptr1_x, 0, stride_y_j), addr1_s(ptr1_x, 1, stride_y_j), addr1_s(ptr1_x, 2, stride_y_j), addr1_s(ptr1_x, 3, stride_y_j), addr1_s(ptr1_x, 4, stride_y_j), addr1_s(ptr1_x, 5, stride_y_j), addr1_s(ptr1_x, 6, stride_y_j), addr1_s(ptr1_x, 7, stride_y_j), addr1_s(ptr1_x, 8, stride_y_j), addr1_s(ptr1_x, 9, stride_y_j), w, v, y1_lcr ); // right border for(int i = 0; i < 10+is_even(size_x); i++) { op4_fwd_sdl_core_s( addr1_s(ptr0_x, i, stride_y_j), // in addr1_s(ptr1_x, i, stride_y_j), // in addr1_s(out0_x, i, stride_y_j), // out addr1_s(out1_x, i, stride_y_j), // out w, v, buff_x0_lcr+0, buff_x0_lcr+4, buff_x0_lcr+8 ); buff_x0_lcr += 1*12; } } // horizontal full + vertical core (last one) for(int y = max_y; y < size_y; y++) { float *ptr0_x = addr2_s(ptr, y, offset, stride_x_j, stride_y_j); op4_fwd_sdl_s(ptr0_x, stride_y_j, pairs_x, w, v); // FIXME: slow } // the left most column (only if offset == 1) for(int s = 0; s < pairs_y-3; s++) { for(int x = 0; x < offset; x++) { float *ptr0_y = addr2_s(ptr, offset, x, stride_x_j, stride_y_j); ptr0_y = addr1_s(ptr0_y, 2*s, stride_x_j); op4_fwd_sdl_core_s( addr1_s(ptr0_y, 10, stride_x_j), addr1_s(ptr0_y, 11, stride_x_j), addr1_s(ptr0_y, 0, stride_x_j), addr1_s(ptr0_y, 1, stride_x_j), w, v, &x0_lcr[12*x+0], &x0_lcr[12*x+4], &x0_lcr[12*x+8] ); } } // horizontal none + vertical epilog2 for(int x = 0; x < size_x; x++) { float *ptr0_y = addr2_s(ptr, offset, x, stride_x_j, stride_y_j); ptr0_y = addr1_s(ptr0_y, (pairs_y-3)*2, stride_x_j); #ifndef DISABLE_SLOW op4_fwd_sdl_epilog2_fast_s( addr1_s(ptr0_y, 0, stride_x_j), addr1_s(ptr0_y, 1, stride_x_j), addr1_s(ptr0_y, 2, stride_x_j), addr1_s(ptr0_y, 3, stride_x_j), addr1_s(ptr0_y, 4, stride_x_j), addr1_s(ptr0_y, 5, stride_x_j), addr1_s(ptr0_y, 6, stride_x_j), addr1_s(ptr0_y, 7, stride_x_j), addr1_s(ptr0_y, 8, stride_x_j), addr1_s(ptr0_y, 9, stride_x_j), w, v, &x0_lcr[12*x] ); #endif } } else { // double if( size_x > 1 && size_x >= 5 ) { for(int y = 0; y < size_y; y++) { dwt_cdf97_f_ex_stride_inplace_part_core_sdl_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x, // N stride_y_j); } } if( size_y > 1 && size_y >= 5 ) { for(int x = 0; x < size_x; x++) { dwt_cdf97_f_ex_stride_inplace_part_core_sdl_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y, // N stride_x_j); } } } if( size_x > 1 && size_x >= 5 ) { for(int y = 0; y < size_y; y++) { dwt_cdf97_f_ex_stride_inplace_part_epilog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x, // N stride_y_j); } } if( size_y > 1 && size_y >= 5 ) { for(int x = 0; x < size_x; x++) { dwt_cdf97_f_ex_stride_inplace_part_epilog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y, // N stride_x_j); } } j++; } FUNC_END; } void dwt_cdf97_1i_s( void *ptr, int stride_y, int size_o_big_x, int size_i_big_x, int j_max, int zero_padding ) { FUNC_BEGIN; assert( 1 == dwt_util_get_num_workers() ); const int threads = 1; #ifdef microblaze dwt_util_switch_op(DWT_OP_LIFT4SB); #endif const int offset = 0; float **temp = alloc_temp_s(threads, calc_and_set_temp_size_s(size_o_big_x, offset) ); int j = ceil_log2( size_o_big_x ); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j-1); const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int lines_x = size_o_dst_x; if( lines_x > 1 ) { dwt_cdf97_i_ex_stride_s( addr1_const_s(ptr,0,stride_y), addr1_const_s(ptr,size_o_src_x,stride_y), addr1_s(ptr,0,stride_y), temp[0], size_i_dst_x, stride_y); } if(zero_padding) { dwt_zero_padding_i_stride_s( addr1_s(ptr,0,stride_y), size_i_dst_x, size_o_dst_x, stride_y); } j--; } free_temp_s(threads, temp); FUNC_END; } void dwt_cdf53_1i_s( void *ptr, int stride_y, int size_o_big_x, int size_i_big_x, int j_max, int zero_padding ) { FUNC_BEGIN; assert( 1 == dwt_util_get_num_workers() ); const int threads = 1; const int offset = 0; float **temp = alloc_temp_s(threads, calc_and_set_temp_size_s(size_o_big_x, offset) ); int j = ceil_log2( size_o_big_x ); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j-1); const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int lines_x = size_o_dst_x; if( lines_x > 1 ) { dwt_cdf53_i_ex_stride_s( addr1_const_s(ptr,0,stride_y), addr1_const_s(ptr,size_o_src_x,stride_y), addr1_s(ptr,0,stride_y), temp[0], size_i_dst_x, stride_y); } if(zero_padding) { dwt_zero_padding_i_stride_s( addr1_s(ptr,0,stride_y), size_i_dst_x, size_o_dst_x, stride_y); } j--; } free_temp_s(threads, temp); FUNC_END; } void dwt_interp53_1i_s( void *ptr, int stride_y, int size_o_big_x, int size_i_big_x, int j_max, int zero_padding ) { FUNC_BEGIN; assert( 1 == dwt_util_get_num_workers() ); const int threads = 1; const int offset = 0; float **temp = alloc_temp_s(threads, calc_and_set_temp_size_s(size_o_big_x, offset) ); int j = ceil_log2( size_o_big_x ); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j-1); const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int lines_x = size_o_dst_x; if( lines_x > 1 ) { dwt_interp53_i_ex_stride_s( addr1_const_s(ptr,0,stride_y), addr1_const_s(ptr,size_o_src_x,stride_y), addr1_s(ptr,0,stride_y), temp[0], size_i_dst_x, stride_y); } if(zero_padding) { dwt_zero_padding_i_stride_s( addr1_s(ptr,0,stride_y), size_i_dst_x, size_o_dst_x, stride_y); } j--; } free_temp_s(threads, temp); FUNC_END; } void dwt_cdf97_2f1_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int zero_padding ) { UNUSED(size_o_big_y); for(int y = 0; y < size_i_big_y; y++) { int x = 0; void *y_ptr = addr2_s(ptr, y, x, stride_x, stride_y); // stride? dwt_cdf97_1f_s( y_ptr, stride_y, // stride? size_o_big_x, size_i_big_x, j_max_ptr, zero_padding ); } } void dwt_cdf53_2f1_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int zero_padding ) { UNUSED(size_o_big_y); for(int y = 0; y < size_i_big_y; y++) { int x = 0; void *y_ptr = addr2_s(ptr, y, x, stride_x, stride_y); dwt_cdf53_1f_s( y_ptr, stride_y, size_o_big_x, size_i_big_x, j_max_ptr, zero_padding ); } } void dwt_cdf97_1f_s( void *ptr, int stride_y, int size_o_big_x, int size_i_big_x, int *j_max_ptr, int zero_padding ) { FUNC_BEGIN; assert( 1 == dwt_util_get_num_workers() ); const int threads = 1; #ifdef microblaze dwt_util_switch_op(DWT_OP_LIFT4SA); #endif const int offset = 1; float **temp = alloc_temp_s(threads, calc_and_set_temp_size_s(size_o_big_x, offset) ); int j = 0; const int j_limit = ceil_log2( size_o_big_x ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int lines_x = size_o_src_x; if( lines_x > 1 ) { dwt_cdf97_f_ex_stride_s( addr1_const_s(ptr,0,stride_y), addr1_s(ptr,0,stride_y), addr1_s(ptr,size_o_dst_x,stride_y), temp[0], size_i_src_x, stride_y); } if(zero_padding) { dwt_zero_padding_f_stride_s( addr1_s(ptr,0,stride_y), addr1_s(ptr,size_o_dst_x,stride_y), size_i_src_x, size_o_dst_x, size_o_src_x-size_o_dst_x, stride_y); } j++; } free_temp_s(threads, temp); FUNC_END; } void dwt_cdf53_1f_s( void *ptr, int stride_y, int size_o_big_x, int size_i_big_x, int *j_max_ptr, int zero_padding ) { FUNC_BEGIN; assert( 1 == dwt_util_get_num_workers() ); const int threads = 1; const int offset = 1; float **temp = alloc_temp_s(threads, calc_and_set_temp_size_s(size_o_big_x, offset) ); int j = 0; const int j_limit = ceil_log2( size_o_big_x ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int lines_x = size_o_src_x; if( lines_x > 1 ) { dwt_cdf53_f_ex_stride_s( addr1_const_s(ptr,0,stride_y), addr1_s(ptr,0,stride_y), addr1_s(ptr,size_o_dst_x,stride_y), temp[0], size_i_src_x, stride_y); } if(zero_padding) { dwt_zero_padding_f_stride_s( addr1_s(ptr,0,stride_y), addr1_s(ptr,size_o_dst_x,stride_y), size_i_src_x, size_o_dst_x, size_o_src_x-size_o_dst_x, stride_y); } j++; } free_temp_s(threads, temp); FUNC_END; } void dwt_interp53_1f_s( void *ptr, int stride_y, int size_o_big_x, int size_i_big_x, int *j_max_ptr, int zero_padding ) { FUNC_BEGIN; assert( 1 == dwt_util_get_num_workers() ); const int threads = 1; const int offset = 1; float **temp = alloc_temp_s(threads, calc_and_set_temp_size_s(size_o_big_x, offset) ); int j = 0; const int j_limit = ceil_log2( size_o_big_x ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int lines_x = size_o_src_x; if( lines_x > 1 ) { dwt_interp53_f_ex_stride_s( addr1_const_s(ptr,0,stride_y), addr1_s(ptr,0,stride_y), addr1_s(ptr,size_o_dst_x,stride_y), temp[0], size_i_src_x, stride_y); } if(zero_padding) { dwt_zero_padding_f_stride_s( addr1_s(ptr,0,stride_y), addr1_s(ptr,size_o_dst_x,stride_y), size_i_src_x, size_o_dst_x, size_o_src_x-size_o_dst_x, stride_y); } j++; } free_temp_s(threads, temp); FUNC_END; } void dwt_interp2_1f_s( void *ptr, int stride_y, int size_o_big_x, int size_i_big_x, int *j_max_ptr, int zero_padding ) { FUNC_BEGIN; assert( 1 == dwt_util_get_num_workers() ); const int threads = 1; const int offset = 1; float **temp = alloc_temp_s(threads, calc_and_set_temp_size_s(size_o_big_x, offset) ); int j = 0; const int j_limit = ceil_log2( size_o_big_x ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int lines_x = size_o_src_x; if( lines_x > 1 ) { dwt_interp2_f_ex_stride_s( addr1_const_s(ptr,0,stride_y), addr1_s(ptr,0,stride_y), addr1_s(ptr,size_o_dst_x,stride_y), temp[0], size_i_src_x, stride_y); } if(zero_padding) { dwt_zero_padding_f_stride_s( addr1_s(ptr,0,stride_y), addr1_s(ptr,size_o_dst_x,stride_y), size_i_src_x, size_o_dst_x, size_o_src_x-size_o_dst_x, stride_y); } j++; } free_temp_s(threads, temp); FUNC_END; } void dwt_cdf53_2f_i( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); int temp[size_o_big_max]; if(NULL == temp) abort(); int j = 0; const int j_limit = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j ); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for(int y = 0; y < size_o_src_y; y++) dwt_cdf53_f_ex_stride_i( addr2_i(ptr,y,0,stride_x,stride_y), addr2_i(ptr,y,0,stride_x,stride_y), addr2_i(ptr,y,size_o_dst_x,stride_x,stride_y), temp, size_i_src_x, stride_y); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for(int x = 0; x < size_o_src_x; x++) dwt_cdf53_f_ex_stride_i( addr2_i(ptr,0,x,stride_x,stride_y), addr2_i(ptr,0,x,stride_x,stride_y), addr2_i(ptr,size_o_dst_y,x,stride_x,stride_y), temp, size_i_src_y, stride_x); if(zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for(int y = 0; y < size_o_src_y; y++) dwt_zero_padding_f_stride_i( addr2_i(ptr,y,0,stride_x,stride_y), addr2_i(ptr,y,size_o_dst_x,stride_x,stride_y), size_i_src_x, size_o_dst_x, size_o_src_x-size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for(int x = 0; x < size_o_src_x; x++) dwt_zero_padding_f_stride_i( addr2_i(ptr,0,x,stride_x,stride_y), addr2_i(ptr,size_o_dst_y,x,stride_x,stride_y), size_i_src_y, size_o_dst_y, size_o_src_y-size_o_dst_y, stride_x); } j++; } } void dwt_cdf97_2f_i( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); int temp[size_o_big_max]; if(NULL == temp) abort(); int j = 0; const int j_limit = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j ); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for(int y = 0; y < size_o_src_y; y++) dwt_cdf97_f_ex_stride_i( addr2_i(ptr,y,0,stride_x,stride_y), addr2_i(ptr,y,0,stride_x,stride_y), addr2_i(ptr,y,size_o_dst_x,stride_x,stride_y), temp, size_i_src_x, stride_y); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for(int x = 0; x < size_o_src_x; x++) dwt_cdf97_f_ex_stride_i( addr2_i(ptr,0,x,stride_x,stride_y), addr2_i(ptr,0,x,stride_x,stride_y), addr2_i(ptr,size_o_dst_y,x,stride_x,stride_y), temp, size_i_src_y, stride_x); if(zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for(int y = 0; y < size_o_src_y; y++) dwt_zero_padding_f_stride_i( addr2_i(ptr,y,0,stride_x,stride_y), addr2_i(ptr,y,size_o_dst_x,stride_x,stride_y), size_i_src_x, size_o_dst_x, size_o_src_x-size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for(int x = 0; x < size_o_src_x; x++) dwt_zero_padding_f_stride_i( addr2_i(ptr,0,x,stride_x,stride_y), addr2_i(ptr,size_o_dst_y,x,stride_x,stride_y), size_i_src_y, size_o_dst_y, size_o_src_y-size_o_dst_y, stride_x); } j++; } } void dwt_cdf53_2f_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); float temp[size_o_big_max]; if(NULL == temp) abort(); int j = 0; const int j_limit = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j ); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for(int y = 0; y < size_o_src_y; y++) dwt_cdf53_f_ex_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,size_o_dst_x,stride_x,stride_y), temp, size_i_src_x, stride_y); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for(int x = 0; x < size_o_src_x; x++) dwt_cdf53_f_ex_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,size_o_dst_y,x,stride_x,stride_y), temp, size_i_src_y, stride_x); if(zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for(int y = 0; y < size_o_src_y; y++) dwt_zero_padding_f_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,size_o_dst_x,stride_x,stride_y), size_i_src_x, size_o_dst_x, size_o_src_x-size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for(int x = 0; x < size_o_src_x; x++) dwt_zero_padding_f_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,size_o_dst_y,x,stride_x,stride_y), size_i_src_y, size_o_dst_y, size_o_src_y-size_o_dst_y, stride_x); } j++; } } void dwt_cdf53_2f_inplace_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding ) { const int size_o_big_min = min(size_o_big_x, size_o_big_y); const int size_o_big_max = max(size_o_big_x, size_o_big_y); int j = 0; const int j_limit = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j ); const int stride_y_j = stride_y * (1 << (j)); const int stride_x_j = stride_x * (1 << (j)); for(int y = 0; y < size_i_src_y; y++) dwt_cdf53_f_ex_stride_inplace_s( addr2_s(ptr,y,0,stride_x_j,stride_y_j), size_i_src_x, stride_y_j); for(int x = 0; x < size_i_src_x; x++) dwt_cdf53_f_ex_stride_inplace_s( addr2_s(ptr,0,x,stride_x_j,stride_y_j), size_i_src_y, stride_x_j); j++; } } void dwt_eaw53_2f_inplace_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding, float *wH[], float *wV[], float alpha ) { const int size_o_big_min = min(size_o_big_x, size_o_big_y); const int size_o_big_max = max(size_o_big_x, size_o_big_y); int j = 0; const int j_limit = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j ); const int stride_y_j = stride_y * (1 << (j)); const int stride_x_j = stride_x * (1 << (j)); wH[j] = dwt_util_alloc(size_i_src_y * size_i_src_x, sizeof(float)); wV[j] = dwt_util_alloc(size_i_src_x * size_i_src_y, sizeof(float)); for(int y = 0; y < size_i_src_y; y++) dwt_eaw53_f_ex_stride_inplace_s( addr2_s(ptr,y,0,stride_x_j,stride_y_j), size_i_src_x, stride_y_j, &wH[j][y*size_i_src_x], alpha ); for(int x = 0; x < size_i_src_x; x++) dwt_eaw53_f_ex_stride_inplace_s( addr2_s(ptr,0,x,stride_x_j,stride_y_j), size_i_src_y, stride_x_j, &wV[j][x*size_i_src_y], alpha ); j++; } } void dwt_eaw53_2f_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding, float *wH[], float *wV[], float alpha ) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); float temp[size_o_big_max]; if(NULL == temp) abort(); int j = 0; const int j_limit = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j ); wH[j] = dwt_util_alloc(size_o_src_y * size_i_src_x, sizeof(float)); wV[j] = dwt_util_alloc(size_o_src_x * size_i_src_y, sizeof(float)); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for(int y = 0; y < size_o_src_y; y++) dwt_eaw53_f_ex_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,size_o_dst_x,stride_x,stride_y), temp, size_i_src_x, // N stride_y, &wH[j][y*size_i_src_x], alpha ); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for(int x = 0; x < size_o_src_x; x++) dwt_eaw53_f_ex_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,size_o_dst_y,x,stride_x,stride_y), temp, size_i_src_y, // N stride_x, &wV[j][x*size_i_src_y], alpha ); if(zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for(int y = 0; y < size_o_src_y; y++) dwt_zero_padding_f_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,size_o_dst_x,stride_x,stride_y), size_i_src_x, size_o_dst_x, size_o_src_x-size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for(int x = 0; x < size_o_src_x; x++) dwt_zero_padding_f_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,size_o_dst_y,x,stride_x,stride_y), size_i_src_y, size_o_dst_y, size_o_src_y-size_o_dst_y, stride_x); } j++; } } void dwt_eaw53_2f_dummy_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one ) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); const int j_limit = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; } void dwt_cdf53_2f_dummy_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one ) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); const int j_limit = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; } void dwt_interp53_2f_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); float temp[size_o_big_max]; if(NULL == temp) abort(); int j = 0; const int j_limit = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j+1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j+1); const int size_i_src_x = ceil_div_pow2(size_i_big_x, j ); const int size_i_src_y = ceil_div_pow2(size_i_big_y, j ); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for(int y = 0; y < size_o_src_y; y++) dwt_interp53_f_ex_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,size_o_dst_x,stride_x,stride_y), temp, size_i_src_x, stride_y); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for(int x = 0; x < size_o_src_x; x++) dwt_interp53_f_ex_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,size_o_dst_y,x,stride_x,stride_y), temp, size_i_src_y, stride_x); if(zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_src_y, omp_get_num_threads())) for(int y = 0; y < size_o_src_y; y++) dwt_zero_padding_f_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,size_o_dst_x,stride_x,stride_y), size_i_src_x, size_o_dst_x, size_o_src_x-size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_src_x, omp_get_num_threads())) for(int x = 0; x < size_o_src_x; x++) dwt_zero_padding_f_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,size_o_dst_y,x,stride_x,stride_y), size_i_src_y, size_o_dst_y, size_o_src_y-size_o_dst_y, stride_x); } j++; } } void dwt_cdf97_2i_d( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); // FIXME(microblaze): align on 8 bytes boundary (GCC's __attribure__ is ignored) double temp[size_o_big_max]; if(NULL == temp) abort(); int j = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j-1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j-1); const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j-1); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for(int y = 0; y < size_o_dst_y; y++) dwt_cdf97_i_ex_stride_d( addr2_d(ptr,y,0,stride_x,stride_y), addr2_d(ptr,y,size_o_src_x,stride_x,stride_y), addr2_d(ptr,y,0,stride_x,stride_y), temp, size_i_dst_x, stride_y); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for(int x = 0; x < size_o_dst_x; x++) dwt_cdf97_i_ex_stride_d( addr2_d(ptr,0,x,stride_x,stride_y), addr2_d(ptr,size_o_src_y,x,stride_x,stride_y), addr2_d(ptr,0,x,stride_x,stride_y), temp, size_i_dst_y, stride_x); if(zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for(int y = 0; y < size_o_dst_y; y++) dwt_zero_padding_i_stride_d( addr2_d(ptr,y,0,stride_x,stride_y), size_i_dst_x, size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for(int x = 0; x < size_o_dst_x; x++) dwt_zero_padding_i_stride_d( addr2_d(ptr,0,x,stride_x,stride_y), size_i_dst_y, size_o_dst_y, stride_x); } j--; } } void dwt_cdf53_2i_d( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); // FIXME(microblaze): align on 8 bytes boundary (GCC's __attribure__ is ignored) double temp[size_o_big_max]; if(NULL == temp) abort(); int j = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j-1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j-1); const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j-1); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for(int y = 0; y < size_o_dst_y; y++) dwt_cdf53_i_ex_stride_d( addr2_d(ptr,y,0,stride_x,stride_y), addr2_d(ptr,y,size_o_src_x,stride_x,stride_y), addr2_d(ptr,y,0,stride_x,stride_y), temp, size_i_dst_x, stride_y); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for(int x = 0; x < size_o_dst_x; x++) dwt_cdf53_i_ex_stride_d( addr2_d(ptr,0,x,stride_x,stride_y), addr2_d(ptr,size_o_src_y,x,stride_x,stride_y), addr2_d(ptr,0,x,stride_x,stride_y), temp, size_i_dst_y, stride_x); if(zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for(int y = 0; y < size_o_dst_y; y++) dwt_zero_padding_i_stride_d( addr2_d(ptr,y,0,stride_x,stride_y), size_i_dst_x, size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for(int x = 0; x < size_o_dst_x; x++) dwt_zero_padding_i_stride_d( addr2_d(ptr,0,x,stride_x,stride_y), size_i_dst_y, size_o_dst_y, stride_x); } j--; } } void dwt_cdf97_2i_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding) { FUNC_BEGIN; const int threads = dwt_util_get_num_threads(); const int workers = dwt_util_get_num_workers(); const int offset = 0; #ifdef microblaze dwt_util_switch_op(DWT_OP_LIFT4SB); #endif const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); float **temp = alloc_temp_s(threads, calc_and_set_temp_size_s(size_o_big_max, offset) ); int j = ceil_log2( decompose_one ? size_o_big_max : size_o_big_min ); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j-1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j-1); const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j-1); const int lines_y = size_o_dst_y; const int lines_x = size_o_dst_x; const int workers_segment_y = floor_div(lines_y, workers); const int workers_segment_x = floor_div(lines_x, workers); #ifdef _OPENMP const int threads_segment_y = ceil_div(workers_segment_y, threads); const int threads_segment_x = ceil_div(workers_segment_x, threads); #endif const int workers_lines_y = workers_segment_y * workers; const int workers_lines_x = workers_segment_x * workers; if( lines_x > 1 ) { set_data_step_s( stride_x ); #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < workers_lines_y; y += workers) { dwt_cdf97_i_ex_stride_s( addr2_const_s(ptr,y,0,stride_x,stride_y), addr2_const_s(ptr,y,size_o_src_x,stride_x,stride_y), addr2_s(ptr,y,0,stride_x,stride_y), temp[dwt_util_get_thread_num()], size_i_dst_x, stride_y); } dwt_util_set_num_workers(1); for(int y = workers_lines_y; y < lines_y; y++) { dwt_cdf97_i_ex_stride_s( addr2_const_s(ptr,y,0,stride_x,stride_y), addr2_const_s(ptr,y,size_o_src_x,stride_x,stride_y), addr2_s(ptr,y,0,stride_x,stride_y), temp[dwt_util_get_thread_num()], size_i_dst_x, stride_y); } dwt_util_set_num_workers(workers); } if( lines_y > 1 ) { set_data_step_s( stride_y ); #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < workers_lines_x; x += workers) { dwt_cdf97_i_ex_stride_s( addr2_const_s(ptr,0,x,stride_x,stride_y), addr2_const_s(ptr,size_o_src_y,x,stride_x,stride_y), addr2_s(ptr,0,x,stride_x,stride_y), temp[dwt_util_get_thread_num()], size_i_dst_y, stride_x); } dwt_util_set_num_workers(1); for(int x = workers_lines_x; x < lines_x; x++) { dwt_cdf97_i_ex_stride_s( addr2_const_s(ptr,0,x,stride_x,stride_y), addr2_const_s(ptr,size_o_src_y,x,stride_x,stride_y), addr2_s(ptr,0,x,stride_x,stride_y), temp[dwt_util_get_thread_num()], size_i_dst_y, stride_x); } dwt_util_set_num_workers(workers); } if(zero_padding) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_o_dst_y; y++) dwt_zero_padding_i_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), size_i_dst_x, size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_o_dst_x; x++) dwt_zero_padding_i_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), size_i_dst_y, size_o_dst_y, stride_x); } j--; } free_temp_s(threads, temp); FUNC_END; } void dwt_cdf97_1i_inplace_s( void *ptr, int stride, int size, int j_max ) { int j = ceil_log2( size ); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if( 0 == j ) break; const int size_j = ceil_div_pow2(size, j-1); const int stride_j = stride * (1 << (j-1)); if( size_j > 1 && size_j < 4 ) { dwt_cdf97_i_ex_stride_inplace_part_exceptions_s( ptr, size_j, stride_j ); } if( size_j >= 4 ) { dwt_cdf97_i_ex_stride_inplace_part_prolog_s( ptr, size_j, stride_j ); dwt_cdf97_i_ex_stride_inplace_part_core_s( ptr, size_j, stride_j ); dwt_cdf97_i_ex_stride_inplace_part_epilog_s( ptr, size_j, stride_j ); } j--; } } void dwt_cdf97_i_ex_stride_inplace_i( int *tmp, int N, int stride ) { assert( N >= 0 && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) return; #if 0 // backward update 2 + backward predict 2 for(int i=2; i<N-(N&1); i+=2) *addr1_i(tmp, i, stride) -= ( 1817*(*addr1_i(tmp, i-1, stride)+*addr1_i(tmp, i+1, stride)) + (1<<11) ) >> 12; *addr1_i(tmp, 0, stride) -= ( 1817*(*addr1_i(tmp, 1, stride)+*addr1_i(tmp, 1, stride)) + (1<<11) ) >> 12; if(is_odd(N)) *addr1_i(tmp, N-1, stride) -= ( 1817*(*addr1_i(tmp, N-2, stride)+*addr1_i(tmp, N-2, stride)) + (1<<11) ) >> 12; else *addr1_i(tmp, N-1, stride) += ( -113*(*addr1_i(tmp, N-2, stride)+*addr1_i(tmp, N-2, stride)) - (1<<6) ) >> 7; for(int i=1; i<N-2+(N&1); i+=2) *addr1_i(tmp, i, stride) += ( -113*(*addr1_i(tmp, i-1, stride)+*addr1_i(tmp, i+1, stride)) - (1<<6) ) >> 7; // backward update 1 + backward predict 1 for(int i=2; i<N-(N&1); i+=2) *addr1_i(tmp, i, stride) -= ( -217*(*addr1_i(tmp, i-1, stride)+*addr1_i(tmp, i+1, stride)) + (1<<11) ) >> 12; *addr1_i(tmp, 0, stride) -= ( -217*(*addr1_i(tmp, 1, stride)+*addr1_i(tmp, 1, stride)) + (1<<11) ) >> 12; if(is_odd(N)) *addr1_i(tmp, N-1, stride) -= ( -217*(*addr1_i(tmp, N-2, stride)+*addr1_i(tmp, N-2, stride)) + (1<<11) ) >> 12; else *addr1_i(tmp, N-1, stride) += ( +203*(*addr1_i(tmp, N-2, stride)+*addr1_i(tmp, N-2, stride)) - (1<<6) ) >> 7; for(int i=1; i<N-2+(N&1); i+=2) *addr1_i(tmp, i, stride) += ( +203*(*addr1_i(tmp, i-1, stride)+*addr1_i(tmp, i+1, stride)) - (1<<6) ) >> 7; #else // backward update 2 + backward predict 2 for(int i=2; i<N-(N&1); i+=2) *addr1_i(tmp, i, stride) -= ( 1817*(*addr1_i(tmp, i-1, stride)+*addr1_i(tmp, i+1, stride)) + (1<<11) ) >> 12; *addr1_i(tmp, 0, stride) -= ( 1817*(*addr1_i(tmp, 1, stride)+*addr1_i(tmp, 1, stride)) + (1<<11) ) >> 12; if(is_odd(N)) *addr1_i(tmp, N-1, stride) -= ( 1817*(*addr1_i(tmp, N-2, stride)+*addr1_i(tmp, N-2, stride)) + (1<<11) ) >> 12; else *addr1_i(tmp, N-1, stride) -= ( +113*(*addr1_i(tmp, N-2, stride)+*addr1_i(tmp, N-2, stride)) + (1<<6) ) >> 7; for(int i=1; i<N-2+(N&1); i+=2) *addr1_i(tmp, i, stride) -= ( +113*(*addr1_i(tmp, i-1, stride)+*addr1_i(tmp, i+1, stride)) + (1<<6) ) >> 7; // backward update 1 + backward predict 1 for(int i=2; i<N-(N&1); i+=2) *addr1_i(tmp, i, stride) -= ( -217*(*addr1_i(tmp, i-1, stride)+*addr1_i(tmp, i+1, stride)) + (1<<11) ) >> 12; *addr1_i(tmp, 0, stride) -= ( -217*(*addr1_i(tmp, 1, stride)+*addr1_i(tmp, 1, stride)) + (1<<11) ) >> 12; if(is_odd(N)) *addr1_i(tmp, N-1, stride) -= ( -217*(*addr1_i(tmp, N-2, stride)+*addr1_i(tmp, N-2, stride)) + (1<<11) ) >> 12; else *addr1_i(tmp, N-1, stride) -= ( -203*(*addr1_i(tmp, N-2, stride)+*addr1_i(tmp, N-2, stride)) + (1<<6) ) >> 7; for(int i=1; i<N-2+(N&1); i+=2) *addr1_i(tmp, i, stride) -= ( -203*(*addr1_i(tmp, i-1, stride)+*addr1_i(tmp, i+1, stride)) + (1<<6) ) >> 7; #endif } // TODO: tested only with j=1 void dwt_cdf97_2i_inplace_i( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding ) { const int size_o_big_min = min(size_o_big_x, size_o_big_y); const int size_o_big_max = max(size_o_big_x, size_o_big_y); int j = ceil_log2(decompose_one ? size_o_big_max : size_o_big_min); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if( 0 == j ) break; const int size_x_j = ceil_div_pow2(size_i_big_x, j-1); const int size_y_j = ceil_div_pow2(size_i_big_y, j-1); #pragma omp parallel for schedule(static, ceil_div(size_x_j, omp_get_num_threads())) for(int x = 0; x < size_x_j; x++) dwt_cdf97_i_ex_stride_inplace_i( addr2_i(ptr, 0, x, stride_x, stride_y), size_y_j, stride_x ); #pragma omp parallel for schedule(static, ceil_div(size_y_j, omp_get_num_threads())) for(int y = 0; y < size_y_j; y++) dwt_cdf97_i_ex_stride_inplace_i( addr2_i(ptr, y, 0, stride_x, stride_y), size_x_j, stride_y ); j--; } } void dwt_cdf97_f_ex_stride_inplace_i( int *tmp, int N, int stride ) { assert( N >= 0 && NULL != tmp && 0 != stride ); // fix for small N if(N < 2) return; #if 0 // predict 1 + update 1 for(int i=1; i<N-2+(N&1); i+=2) *addr1_i(tmp, i, stride) -= ( +203*(*addr1_i(tmp, i-1, stride)+*addr1_i(tmp, i+1, stride)) - (1<<6) ) >> 7; if(is_odd(N)) *addr1_i(tmp, N-1, stride) += ( -217*(*addr1_i(tmp, N-2, stride)+*addr1_i(tmp, N-2, stride)) + (1<<11) ) >> 12; else *addr1_i(tmp, N-1, stride) -= ( +203*(*addr1_i(tmp, N-2, stride)+*addr1_i(tmp, N-2, stride)) - (1<<6) ) >> 7; *addr1_i(tmp, 0, stride) += ( -217*(*addr1_i(tmp, 1, stride)+*addr1_i(tmp, 1, stride)) + (1<<11) ) >> 12; for(int i=2; i<N-(N&1); i+=2) *addr1_i(tmp, i, stride) += ( -217*(*addr1_i(tmp, i-1, stride)+*addr1_i(tmp, i+1, stride)) + (1<<11) ) >> 12; // predict 2 + update 2 for(int i=1; i<N-2+(N&1); i+=2) *addr1_i(tmp, i, stride) -= ( -113*(*addr1_i(tmp, i-1, stride)+*addr1_i(tmp, i+1, stride)) - (1<<6) ) >> 7; if(is_odd(N)) *addr1_i(tmp, N-1, stride) += ( 1817*(*addr1_i(tmp, N-2, stride)+*addr1_i(tmp, N-2, stride)) + (1<<11) ) >> 12; else *addr1_i(tmp, N-1, stride) -= ( -113*(*addr1_i(tmp, N-2, stride)+*addr1_i(tmp, N-2, stride)) - (1<<6) ) >> 7; *addr1_i(tmp, 0, stride) += ( 1817*(*addr1_i(tmp, 1, stride)+*addr1_i(tmp, 1, stride)) + (1<<11) ) >> 12; for(int i=2; i<N-(N&1); i+=2) *addr1_i(tmp, i, stride) += ( 1817*(*addr1_i(tmp, i-1, stride)+*addr1_i(tmp, i+1, stride)) + (1<<11) ) >> 12; #else // predict 1 + update 1 for(int i=1; i<N-2+(N&1); i+=2) *addr1_i(tmp, i, stride) += ( -203*(*addr1_i(tmp, i-1, stride)+*addr1_i(tmp, i+1, stride)) + (1<<6) ) >> 7; if(is_odd(N)) *addr1_i(tmp, N-1, stride) += ( -217*(*addr1_i(tmp, N-2, stride)+*addr1_i(tmp, N-2, stride)) + (1<<11) ) >> 12; else *addr1_i(tmp, N-1, stride) += ( -203*(*addr1_i(tmp, N-2, stride)+*addr1_i(tmp, N-2, stride)) + (1<<6) ) >> 7; *addr1_i(tmp, 0, stride) += ( -217*(*addr1_i(tmp, 1, stride)+*addr1_i(tmp, 1, stride)) + (1<<11) ) >> 12; for(int i=2; i<N-(N&1); i+=2) *addr1_i(tmp, i, stride) += ( -217*(*addr1_i(tmp, i-1, stride)+*addr1_i(tmp, i+1, stride)) + (1<<11) ) >> 12; // predict 2 + update 2 for(int i=1; i<N-2+(N&1); i+=2) *addr1_i(tmp, i, stride) += ( +113*(*addr1_i(tmp, i-1, stride)+*addr1_i(tmp, i+1, stride)) + (1<<6) ) >> 7; if(is_odd(N)) *addr1_i(tmp, N-1, stride) += ( 1817*(*addr1_i(tmp, N-2, stride)+*addr1_i(tmp, N-2, stride)) + (1<<11) ) >> 12; else *addr1_i(tmp, N-1, stride) += ( +113*(*addr1_i(tmp, N-2, stride)+*addr1_i(tmp, N-2, stride)) + (1<<6) ) >> 7; *addr1_i(tmp, 0, stride) += ( 1817*(*addr1_i(tmp, 1, stride)+*addr1_i(tmp, 1, stride)) + (1<<11) ) >> 12; for(int i=2; i<N-(N&1); i+=2) *addr1_i(tmp, i, stride) += ( 1817*(*addr1_i(tmp, i-1, stride)+*addr1_i(tmp, i+1, stride)) + (1<<11) ) >> 12; #endif } // TODO: tested only with j=1 void dwt_cdf97_2f_inplace_i( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int *j_max_ptr, int decompose_one, int zero_padding ) { const int size_o_big_min = min(size_o_big_x, size_o_big_y); const int size_o_big_max = max(size_o_big_x, size_o_big_y); int j = 0; const int j_limit = ceil_log2(decompose_one ? size_o_big_max : size_o_big_min); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_x_j = ceil_div_pow2(size_i_big_x, j ); const int size_y_j = ceil_div_pow2(size_i_big_y, j ); #pragma omp parallel for schedule(static, ceil_div(size_y_j, omp_get_num_threads())) for(int y = 0; y < size_y_j; y++) dwt_cdf97_f_ex_stride_inplace_i( addr2_i(ptr, y, 0, stride_x, stride_y), size_x_j, stride_y ); #pragma omp parallel for schedule(static, ceil_div(size_x_j, omp_get_num_threads())) for(int x = 0; x < size_x_j; x++) dwt_cdf97_f_ex_stride_inplace_i( addr2_i(ptr, 0, x, stride_x, stride_y), size_y_j, stride_x ); j++; } } void dwt_cdf97_2i_inplace_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding) { FUNC_BEGIN; assert( 1 == dwt_util_get_num_workers() ); const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); int j = ceil_log2( decompose_one ? size_o_big_max : size_o_big_min ); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; // const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); // const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); // const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j-1); // const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j-1); const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j-1); const int lines_y = size_i_dst_y; const int lines_x = size_i_dst_x; const int stride_y_j = stride_y * (1 << (j-1)); const int stride_x_j = stride_x * (1 << (j-1)); if( lines_x > 1 && lines_x < 4 ) { for(int y = 0; y < lines_y; y++) { dwt_cdf97_i_ex_stride_inplace_part_exceptions_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), lines_x, // N stride_y_j); } } if( lines_y > 1 && lines_y < 4 ) { for(int x = 0; x < size_i_dst_x; x++) { dwt_cdf97_i_ex_stride_inplace_part_exceptions_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), lines_y, // N stride_x_j); } } if( lines_x > 1 && lines_x >= 4 ) { for(int y = 0; y < lines_y; y++) { dwt_cdf97_i_ex_stride_inplace_part_prolog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), lines_x, stride_y_j); } } if( lines_y > 1 && lines_y >= 4 ) { for(int x = 0; x < lines_x; x++) { dwt_cdf97_i_ex_stride_inplace_part_prolog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), lines_y, stride_x_j); } } if( lines_x > 1 && lines_x >= 4 ) { for(int y = 0; y < lines_y; y++) { dwt_cdf97_i_ex_stride_inplace_part_core_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), lines_x, stride_y_j); } } if( lines_y > 1 && lines_y >= 4 ) { for(int x = 0; x < lines_x; x++) { dwt_cdf97_i_ex_stride_inplace_part_core_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), lines_y, stride_x_j); } } if( lines_x > 1 && lines_x >= 4 ) { for(int y = 0; y < lines_y; y++) { dwt_cdf97_i_ex_stride_inplace_part_epilog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), lines_x, stride_y_j); } } if( lines_y > 1 && lines_y >= 4 ) { for(int x = 0; x < lines_x; x++) { dwt_cdf97_i_ex_stride_inplace_part_epilog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), lines_y, stride_x_j); } } // if(zero_padding) // { // #pragma omp parallel for schedule(static, threads_segment_y) // for(int y = 0; y < size_o_dst_y; y++) // dwt_zero_padding_i_stride_s( // addr2_s(ptr,y,0,stride_x,stride_y), // size_i_dst_x, // size_o_dst_x, // stride_y); // #pragma omp parallel for schedule(static, threads_segment_x) // for(int x = 0; x < size_o_dst_x; x++) // dwt_zero_padding_i_stride_s( // addr2_s(ptr,0,x,stride_x,stride_y), // size_i_dst_y, // size_o_dst_y, // stride_x); // } j--; } FUNC_END; } // hole void dwt_cdf97_2i_inplace_hole_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding) { FUNC_BEGIN; assert( 1 == dwt_util_get_num_workers() ); const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); int j = ceil_log2( decompose_one ? size_o_big_max : size_o_big_min ); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j-1); const int lines_y = size_i_dst_y; const int lines_x = size_i_dst_x; const int stride_y_j = stride_y * (1 << (j-1)); const int stride_x_j = stride_x * (1 << (j-1)); if( lines_x > 1 && lines_x < 4 ) { for(int y = 0; y < lines_y; y++) { // FIXME: _hole dwt_cdf97_i_ex_stride_inplace_part_exceptions_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), lines_x, // N stride_y_j); } } if( lines_y > 1 && lines_y < 4 ) { for(int x = 0; x < size_i_dst_x; x++) { // FIXME: _hole dwt_cdf97_i_ex_stride_inplace_part_exceptions_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), lines_y, // N stride_x_j); } } if( lines_x > 1 && lines_x >= 4 ) { for(int y = 0; y < lines_y; y++) { dwt_cdf97_i_ex_stride_inplace_part_prolog_hole_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), lines_x, stride_y_j); } } if( lines_y > 1 && lines_y >= 4 ) { for(int x = 0; x < lines_x; x++) { dwt_cdf97_i_ex_stride_inplace_part_prolog_hole_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), lines_y, stride_x_j); } } if( lines_x > 1 && lines_x >= 4 ) { for(int y = 0; y < lines_y; y++) { dwt_cdf97_i_ex_stride_inplace_part_core_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), lines_x, stride_y_j); } } if( lines_y > 1 && lines_y >= 4 ) { for(int x = 0; x < lines_x; x++) { dwt_cdf97_i_ex_stride_inplace_part_core_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), lines_y, stride_x_j); } } if( lines_x > 1 && lines_x >= 4 ) { for(int y = 0; y < lines_y; y++) { dwt_cdf97_i_ex_stride_inplace_part_epilog_hole_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), lines_x, stride_y_j); } } if( lines_y > 1 && lines_y >= 4 ) { for(int x = 0; x < lines_x; x++) { dwt_cdf97_i_ex_stride_inplace_part_epilog_hole_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), lines_y, stride_x_j); } } j--; } FUNC_END; } // zero void dwt_cdf97_2i_inplace_zero_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding) { FUNC_BEGIN; assert( 1 == dwt_util_get_num_workers() ); const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); int j = ceil_log2( decompose_one ? size_o_big_max : size_o_big_min ); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j-1); const int lines_y = size_i_dst_y; const int lines_x = size_i_dst_x; const int stride_y_j = stride_y * (1 << (j-1)); const int stride_x_j = stride_x * (1 << (j-1)); if( lines_x > 1 && lines_x < 4 ) { for(int y = 0; y < lines_y; y++) { // FIXME: _zero dwt_cdf97_i_ex_stride_inplace_part_exceptions_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), lines_x, // N stride_y_j); } } if( lines_y > 1 && lines_y < 4 ) { for(int x = 0; x < size_i_dst_x; x++) { // FIXME: _zero dwt_cdf97_i_ex_stride_inplace_part_exceptions_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), lines_y, // N stride_x_j); } } if( lines_x > 1 && lines_x >= 4 ) { for(int y = 0; y < lines_y; y++) { dwt_cdf97_i_ex_stride_inplace_part_prolog_zero_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), lines_x, stride_y_j); } } if( lines_y > 1 && lines_y >= 4 ) { for(int x = 0; x < lines_x; x++) { dwt_cdf97_i_ex_stride_inplace_part_prolog_zero_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), lines_y, stride_x_j); } } if( lines_x > 1 && lines_x >= 4 ) { for(int y = 0; y < lines_y; y++) { dwt_cdf97_i_ex_stride_inplace_part_core_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), lines_x, stride_y_j); } } if( lines_y > 1 && lines_y >= 4 ) { for(int x = 0; x < lines_x; x++) { dwt_cdf97_i_ex_stride_inplace_part_core_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), lines_y, stride_x_j); } } if( lines_x > 1 && lines_x >= 4 ) { for(int y = 0; y < lines_y; y++) { dwt_cdf97_i_ex_stride_inplace_part_epilog_zero_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), lines_x, stride_y_j); } } if( lines_y > 1 && lines_y >= 4 ) { for(int x = 0; x < lines_x; x++) { dwt_cdf97_i_ex_stride_inplace_part_epilog_zero_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), lines_y, stride_x_j); } } j--; } FUNC_END; } void dwt_cdf53_2i_inplace_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding) { const int size_o_big_min = min(size_o_big_x, size_o_big_y); const int size_o_big_max = max(size_o_big_x, size_o_big_y); int j = ceil_log2(decompose_one ? size_o_big_max : size_o_big_min); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j-1); const int stride_y_j = stride_y * (1 << (j-1)); const int stride_x_j = stride_x * (1 << (j-1)); for(int y = 0; y < size_i_dst_y; y++) dwt_cdf53_i_ex_stride_inplace_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_i_dst_x, stride_y_j); for(int x = 0; x < size_i_dst_x; x++) dwt_cdf53_i_ex_stride_inplace_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_i_dst_y, stride_x_j); j--; } } void dwt_eaw53_2i_inplace_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding, float *wH[], float *wV[] ) { const int size_o_big_min = min(size_o_big_x, size_o_big_y); const int size_o_big_max = max(size_o_big_x, size_o_big_y); int j = ceil_log2(decompose_one ? size_o_big_max : size_o_big_min); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j-1); const int stride_y_j = stride_y * (1 << (j-1)); const int stride_x_j = stride_x * (1 << (j-1)); for(int x = 0; x < size_i_dst_x; x++) dwt_eaw53_i_ex_stride_inplace_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_i_dst_y, stride_x_j, &wV[j-1][x*size_i_dst_y] ); for(int y = 0; y < size_i_dst_y; y++) dwt_eaw53_i_ex_stride_inplace_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_i_dst_x, stride_y_j, &wH[j-1][y*size_i_dst_x] ); j--; } } void dwt_cdf97_2i_s2( const void *src, void *dst, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding) { FUNC_BEGIN; // NOTE: this hack copies the input image into dst dwt_util_copy_i( src, dst, stride_x, stride_y, size_i_big_x, size_i_big_y); const int threads = dwt_util_get_num_threads(); const int workers = dwt_util_get_num_workers(); const int offset = 0; #ifdef microblaze dwt_util_switch_op(DWT_OP_LIFT4SB); #endif const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); float **temp = alloc_temp_s(threads, calc_and_set_temp_size_s(size_o_big_max, offset) ); int j = ceil_log2( decompose_one ? size_o_big_max : size_o_big_min ); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j-1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j-1); const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j-1); const int lines_y = size_o_dst_y; const int lines_x = size_o_dst_x; const int workers_segment_y = floor_div(lines_y, workers); const int workers_segment_x = floor_div(lines_x, workers); #ifdef _OPENMP const int threads_segment_y = ceil_div(workers_segment_y, threads); const int threads_segment_x = ceil_div(workers_segment_x, threads); #endif const int workers_lines_y = workers_segment_y * workers; const int workers_lines_x = workers_segment_x * workers; if( lines_x > 1 ) { set_data_step_s( stride_x ); #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < workers_lines_y; y += workers) { dwt_cdf97_i_ex_stride_s( addr2_const_s(dst,y,0,stride_x,stride_y), addr2_const_s(dst,y,size_o_src_x,stride_x,stride_y), addr2_s(dst,y,0,stride_x,stride_y), temp[dwt_util_get_thread_num()], size_i_dst_x, stride_y); } dwt_util_set_num_workers(1); for(int y = workers_lines_y; y < lines_y; y++) { dwt_cdf97_i_ex_stride_s( addr2_const_s(dst,y,0,stride_x,stride_y), addr2_const_s(dst,y,size_o_src_x,stride_x,stride_y), addr2_s(dst,y,0,stride_x,stride_y), temp[dwt_util_get_thread_num()], size_i_dst_x, stride_y); } dwt_util_set_num_workers(workers); } if( lines_y > 1 ) { set_data_step_s( stride_y ); #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < workers_lines_x; x += workers) { dwt_cdf97_i_ex_stride_s( addr2_const_s(dst,0,x,stride_x,stride_y), addr2_const_s(dst,size_o_src_y,x,stride_x,stride_y), addr2_s(dst,0,x,stride_x,stride_y), temp[dwt_util_get_thread_num()], size_i_dst_y, stride_x); } dwt_util_set_num_workers(1); for(int x = workers_lines_x; x < lines_x; x++) { dwt_cdf97_i_ex_stride_s( addr2_const_s(dst,0,x,stride_x,stride_y), addr2_const_s(dst,size_o_src_y,x,stride_x,stride_y), addr2_s(dst,0,x,stride_x,stride_y), temp[dwt_util_get_thread_num()], size_i_dst_y, stride_x); } dwt_util_set_num_workers(workers); } if(zero_padding) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_o_dst_y; y++) dwt_zero_padding_i_stride_s( addr2_s(dst,y,0,stride_x,stride_y), size_i_dst_x, size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_o_dst_x; x++) dwt_zero_padding_i_stride_s( addr2_s(dst,0,x,stride_x,stride_y), size_i_dst_y, size_o_dst_y, stride_x); } j--; } free_temp_s(threads, temp); FUNC_END; } /* http://www.ece.uvic.ca/~frodo/publications/phdthesis.pdf Unlike in the case of conventional (linear) versions of transforms, however, the order in which rows and columns are transformed is important. That is, the inverse transform must operate on rows and columns in the reverse order from that used in the forward transform; otherwise, invertibility cannot be guaranteed. */ void dwt_cdf53_2i_i( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); int temp[size_o_big_max]; if(NULL == temp) abort(); int j = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j-1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j-1); const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j-1); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for(int x = 0; x < size_o_dst_x; x++) dwt_cdf53_i_ex_stride_i( addr2_i(ptr,0,x,stride_x,stride_y), addr2_i(ptr,size_o_src_y,x,stride_x,stride_y), addr2_i(ptr,0,x,stride_x,stride_y), temp, size_i_dst_y, stride_x); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for(int y = 0; y < size_o_dst_y; y++) dwt_cdf53_i_ex_stride_i( addr2_i(ptr,y,0,stride_x,stride_y), addr2_i(ptr,y,size_o_src_x,stride_x,stride_y), addr2_i(ptr,y,0,stride_x,stride_y), temp, size_i_dst_x, stride_y); if(zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for(int y = 0; y < size_o_dst_y; y++) dwt_zero_padding_i_stride_i( addr2_i(ptr,y,0,stride_x,stride_y), size_i_dst_x, size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for(int x = 0; x < size_o_dst_x; x++) dwt_zero_padding_i_stride_i( addr2_i(ptr,0,x,stride_x,stride_y), size_i_dst_y, size_o_dst_y, stride_x); } j--; } } void dwt_cdf97_2i_i( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); int temp[size_o_big_max]; if(NULL == temp) abort(); int j = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j-1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j-1); const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j-1); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for(int x = 0; x < size_o_dst_x; x++) dwt_cdf97_i_ex_stride_i( addr2_i(ptr,0,x,stride_x,stride_y), addr2_i(ptr,size_o_src_y,x,stride_x,stride_y), addr2_i(ptr,0,x,stride_x,stride_y), temp, size_i_dst_y, stride_x); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for(int y = 0; y < size_o_dst_y; y++) dwt_cdf97_i_ex_stride_i( addr2_i(ptr,y,0,stride_x,stride_y), addr2_i(ptr,y,size_o_src_x,stride_x,stride_y), addr2_i(ptr,y,0,stride_x,stride_y), temp, size_i_dst_x, stride_y); if(zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for(int y = 0; y < size_o_dst_y; y++) dwt_zero_padding_i_stride_i( addr2_i(ptr,y,0,stride_x,stride_y), size_i_dst_x, size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for(int x = 0; x < size_o_dst_x; x++) dwt_zero_padding_i_stride_i( addr2_i(ptr,0,x,stride_x,stride_y), size_i_dst_y, size_o_dst_y, stride_x); } j--; } } void dwt_cdf53_2i_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); float temp[size_o_big_max]; if(NULL == temp) abort(); int j = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j-1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j-1); const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j-1); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for(int y = 0; y < size_o_dst_y; y++) dwt_cdf53_i_ex_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,size_o_src_x,stride_x,stride_y), addr2_s(ptr,y,0,stride_x,stride_y), temp, size_i_dst_x, stride_y); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for(int x = 0; x < size_o_dst_x; x++) dwt_cdf53_i_ex_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,size_o_src_y,x,stride_x,stride_y), addr2_s(ptr,0,x,stride_x,stride_y), temp, size_i_dst_y, stride_x); if(zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for(int y = 0; y < size_o_dst_y; y++) dwt_zero_padding_i_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), size_i_dst_x, size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for(int x = 0; x < size_o_dst_x; x++) dwt_zero_padding_i_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), size_i_dst_y, size_o_dst_y, stride_x); } j--; } } void dwt_eaw53_2i_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding, float *wH[], float *wV[] ) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); float temp[size_o_big_max]; if(NULL == temp) abort(); int j = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j-1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j-1); const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j-1); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for(int x = 0; x < size_o_dst_x; x++) dwt_eaw53_i_ex_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,size_o_src_y,x,stride_x,stride_y), addr2_s(ptr,0,x,stride_x,stride_y), temp, size_i_dst_y, // N stride_x, &wV[j-1][x*size_i_dst_y] ); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for(int y = 0; y < size_o_dst_y; y++) dwt_eaw53_i_ex_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,size_o_src_x,stride_x,stride_y), addr2_s(ptr,y,0,stride_x,stride_y), temp, size_i_dst_x, // N stride_y, &wH[j-1][y*size_i_dst_x] ); if(zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for(int y = 0; y < size_o_dst_y; y++) dwt_zero_padding_i_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), size_i_dst_x, size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for(int x = 0; x < size_o_dst_x; x++) dwt_zero_padding_i_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), size_i_dst_y, size_o_dst_y, stride_x); } j--; } } void dwt_interp53_2i_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding) { const int size_o_big_min = min(size_o_big_x,size_o_big_y); const int size_o_big_max = max(size_o_big_x,size_o_big_y); float temp[size_o_big_max]; if(NULL == temp) abort(); int j = ceil_log2(decompose_one?size_o_big_max:size_o_big_min); if( j_max >= 0 && j_max < j ) j = j_max; for(;;) { if(0 == j) break; const int size_o_src_x = ceil_div_pow2(size_o_big_x, j ); const int size_o_src_y = ceil_div_pow2(size_o_big_y, j ); const int size_o_dst_x = ceil_div_pow2(size_o_big_x, j-1); const int size_o_dst_y = ceil_div_pow2(size_o_big_y, j-1); const int size_i_dst_x = ceil_div_pow2(size_i_big_x, j-1); const int size_i_dst_y = ceil_div_pow2(size_i_big_y, j-1); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for(int y = 0; y < size_o_dst_y; y++) dwt_interp53_i_ex_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), addr2_s(ptr,y,size_o_src_x,stride_x,stride_y), addr2_s(ptr,y,0,stride_x,stride_y), temp, size_i_dst_x, stride_y); #pragma omp parallel for private(temp) schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for(int x = 0; x < size_o_dst_x; x++) dwt_interp53_i_ex_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), addr2_s(ptr,size_o_src_y,x,stride_x,stride_y), addr2_s(ptr,0,x,stride_x,stride_y), temp, size_i_dst_y, stride_x); if(zero_padding) { #pragma omp parallel for schedule(static, ceil_div(size_o_dst_y, omp_get_num_threads())) for(int y = 0; y < size_o_dst_y; y++) dwt_zero_padding_i_stride_s( addr2_s(ptr,y,0,stride_x,stride_y), size_i_dst_x, size_o_dst_x, stride_y); #pragma omp parallel for schedule(static, ceil_div(size_o_dst_x, omp_get_num_threads())) for(int x = 0; x < size_o_dst_x; x++) dwt_zero_padding_i_stride_s( addr2_s(ptr,0,x,stride_x,stride_y), size_i_dst_y, size_o_dst_y, stride_x); } j--; } } int dwt_util_clock_autoselect() { #ifdef ENABLE_TIME_CLOCK_GETTIME return DWT_TIME_CLOCK_GETTIME; #endif #ifdef ENABLE_TIME_TIMES return DWT_TIME_TIMES; #endif #ifdef ENABLE_TIME_CLOCK return DWT_TIME_CLOCK; #endif #ifdef ENABLE_TIME_GETRUSAGE return DWT_TIME_GETRUSAGE; #endif #ifdef ENABLE_TIME_GETTIMEOFDAY return DWT_TIME_GETTIMEOFDAY; #endif #ifdef ENABLE_TIME_IOCTL_RTC return DWT_TIME_IOCTL_RTC; #endif // fallback return DWT_TIME_AUTOSELECT; } dwt_clock_t dwt_util_get_frequency( int type) { if(DWT_TIME_AUTOSELECT == type) type = dwt_util_clock_autoselect(); dwt_clock_t return_freq; switch(type) { case DWT_TIME_CLOCK_GETTIME: { #ifdef ENABLE_TIME_CLOCK_GETTIME return_freq = (dwt_clock_t)1000000000; #else abort(); #endif } break; case DWT_TIME_CLOCK_GETTIME_REALTIME: { #ifdef ENABLE_TIME_CLOCK_GETTIME_REALTIME return_freq = (dwt_clock_t)1000000000; #else abort(); #endif } break; case DWT_TIME_CLOCK_GETTIME_MONOTONIC: { #ifdef ENABLE_TIME_CLOCK_GETTIME_MONOTONIC return_freq = (dwt_clock_t)1000000000; #else abort(); #endif } break; case DWT_TIME_CLOCK_GETTIME_MONOTONIC_RAW: { #ifdef ENABLE_TIME_CLOCK_GETTIME_MONOTONIC_RAW return_freq = (dwt_clock_t)1000000000; #else abort(); #endif } break; case DWT_TIME_CLOCK_GETTIME_PROCESS_CPUTIME_ID: { #ifdef ENABLE_TIME_CLOCK_GETTIME_PROCESS_CPUTIME_ID return_freq = (dwt_clock_t)1000000000; #else abort(); #endif } break; case DWT_TIME_CLOCK_GETTIME_THREAD_CPUTIME_ID: { #ifdef ENABLE_TIME_CLOCK_GETTIME_THREAD_CPUTIME_ID return_freq = (dwt_clock_t)1000000000; #else abort(); #endif } break; case DWT_TIME_CLOCK: { #ifdef ENABLE_TIME_CLOCK return_freq = (dwt_clock_t)CLOCKS_PER_SEC; #else abort(); #endif } break; case DWT_TIME_TIMES: { #ifdef ENABLE_TIME_TIMES return_freq = (dwt_clock_t)sysconf(_SC_CLK_TCK); #else abort(); #endif } break; case DWT_TIME_GETRUSAGE: { #ifdef ENABLE_TIME_GETRUSAGE return_freq = (dwt_clock_t)1000000000; #else abort(); #endif } break; case DWT_TIME_IOCTL_RTC: { #ifdef ENABLE_TIME_IOCTL_RTC return_freq = (dwt_clock_t)1; #else abort(); #endif } break; case DWT_TIME_GETTIMEOFDAY: { #ifdef ENABLE_TIME_GETTIMEOFDAY return_freq = (dwt_clock_t)1000000000; #else abort(); #endif } break; case DWT_TIME_GETRUSAGE_SELF: { #ifdef ENABLE_TIME_GETRUSAGE_SELF return_freq = (dwt_clock_t)1000000000; #else abort(); #endif } break; case DWT_TIME_GETRUSAGE_CHILDREN: { #ifdef ENABLE_TIME_GETRUSAGE_CHILDREN return_freq = (dwt_clock_t)1000000000; #else abort(); #endif } break; case DWT_TIME_GETRUSAGE_THREAD: { #ifdef ENABLE_TIME_GETRUSAGE_THREAD return_freq = (dwt_clock_t)1000000000; #else abort(); #endif } break; default: abort(); } return return_freq; } dwt_clock_t dwt_util_get_clock( int type) { if(DWT_TIME_AUTOSELECT == type) type = dwt_util_clock_autoselect(); dwt_clock_t return_time; switch(type) { case DWT_TIME_CLOCK_GETTIME: { #ifdef ENABLE_TIME_CLOCK_GETTIME clockid_t clk_id = CLOCK_REALTIME; struct timespec ts; if(clock_gettime(clk_id, &ts)) abort(); return_time = (dwt_clock_t)ts.tv_sec * 1000000000 + ts.tv_nsec; #else abort(); #endif } break; case DWT_TIME_CLOCK_GETTIME_REALTIME: { #ifdef ENABLE_TIME_CLOCK_GETTIME_REALTIME clockid_t clk_id = CLOCK_REALTIME; struct timespec ts; if(clock_gettime(clk_id, &ts)) abort(); return_time = (dwt_clock_t)ts.tv_sec * 1000000000 + ts.tv_nsec; #else abort(); #endif } break; case DWT_TIME_CLOCK_GETTIME_MONOTONIC: { #ifdef ENABLE_TIME_CLOCK_GETTIME_MONOTONIC clockid_t clk_id = CLOCK_MONOTONIC; struct timespec ts; if(clock_gettime(clk_id, &ts)) abort(); return_time = (dwt_clock_t)ts.tv_sec * 1000000000 + ts.tv_nsec; #else abort(); #endif } break; case DWT_TIME_CLOCK_GETTIME_MONOTONIC_RAW: { #ifdef ENABLE_TIME_CLOCK_GETTIME_MONOTONIC_RAW clockid_t clk_id = CLOCK_MONOTONIC_RAW; struct timespec ts; if(clock_gettime(clk_id, &ts)) abort(); return_time = (dwt_clock_t)ts.tv_sec * 1000000000 + ts.tv_nsec; #else abort(); #endif } break; case DWT_TIME_CLOCK_GETTIME_PROCESS_CPUTIME_ID: { #ifdef ENABLE_TIME_CLOCK_GETTIME_PROCESS_CPUTIME_ID clockid_t clk_id = CLOCK_PROCESS_CPUTIME_ID; struct timespec ts; if(clock_gettime(clk_id, &ts)) abort(); return_time = (dwt_clock_t)ts.tv_sec * 1000000000 + ts.tv_nsec; #else abort(); #endif } break; case DWT_TIME_CLOCK_GETTIME_THREAD_CPUTIME_ID: { #ifdef ENABLE_TIME_CLOCK_GETTIME_THREAD_CPUTIME_ID clockid_t clk_id = CLOCK_THREAD_CPUTIME_ID; struct timespec ts; if(clock_gettime(clk_id, &ts)) abort(); return_time = (dwt_clock_t)ts.tv_sec * 1000000000 + ts.tv_nsec; #else abort(); #endif } break; case DWT_TIME_CLOCK: { #ifdef ENABLE_TIME_CLOCK clock_t time; time = clock(); return_time = (dwt_clock_t)time; #else abort(); #endif } break; case DWT_TIME_TIMES: { #ifdef ENABLE_TIME_TIMES struct tms tms_i; if( (clock_t)-1 == times(&tms_i) ) abort(); return_time = (dwt_clock_t)tms_i.tms_utime; #else abort(); #endif } break; case DWT_TIME_GETRUSAGE: { #ifdef ENABLE_TIME_GETRUSAGE int who = RUSAGE_SELF; struct rusage rusage_i; struct timespec ts; if( -1 == getrusage(who, &rusage_i) ) abort(); TIMEVAL_TO_TIMESPEC(&rusage_i.ru_utime, &ts); return_time = (dwt_clock_t)ts.tv_sec * 1000000000 + ts.tv_nsec; #else abort(); #endif } break; case DWT_TIME_IOCTL_RTC: { #ifdef ENABLE_TIME_IOCTL_RTC int fd = open("/dev/rtc", O_RDONLY|O_NONBLOCK); if( -1 == fd ) abort(); struct rtc_time rtc_time_i; if( -1 == ioctl(fd, RTC_RD_TIME, &rtc_time_i) ) abort(); if( -1 == close(fd) ) abort(); time_t time = mktime( (struct tm *)&rtc_time_i ); if( (time_t)-1 == time ) abort(); return_time = (dwt_clock_t)time; #else abort(); #endif } break; case DWT_TIME_GETTIMEOFDAY: { #ifdef ENABLE_TIME_GETTIMEOFDAY struct timeval tv; struct timespec ts; if( -1 == gettimeofday(&tv, NULL) ) abort(); TIMEVAL_TO_TIMESPEC(&tv, &ts); return_time = (dwt_clock_t)ts.tv_sec * 1000000000 + ts.tv_nsec; #else abort(); #endif } break; case DWT_TIME_GETRUSAGE_SELF: { #ifdef ENABLE_TIME_GETRUSAGE_SELF int who = RUSAGE_SELF; struct rusage rusage_i; struct timespec ts; if( -1 == getrusage(who, &rusage_i) ) abort(); TIMEVAL_TO_TIMESPEC(&rusage_i.ru_utime, &ts); return_time = (dwt_clock_t)ts.tv_sec * 1000000000 + ts.tv_nsec; #else abort(); #endif } break; case DWT_TIME_GETRUSAGE_CHILDREN: { #ifdef ENABLE_TIME_GETRUSAGE_CHILDREN int who = RUSAGE_CHILDREN; struct rusage rusage_i; struct timespec ts; if( -1 == getrusage(who, &rusage_i) ) abort(); TIMEVAL_TO_TIMESPEC(&rusage_i.ru_utime, &ts); return_time = (dwt_clock_t)ts.tv_sec * 1000000000 + ts.tv_nsec; #else abort(); #endif } break; case DWT_TIME_GETRUSAGE_THREAD: { #ifdef ENABLE_TIME_GETRUSAGE_THREAD int who = RUSAGE_THREAD; struct rusage rusage_i; struct timespec ts; if( -1 == getrusage(who, &rusage_i) ) abort(); TIMEVAL_TO_TIMESPEC(&rusage_i.ru_utime, &ts); return_time = (dwt_clock_t)ts.tv_sec * 1000000000 + ts.tv_nsec; #else abort(); #endif } break; default: abort(); } return return_time; } int dwt_util_clock_available( int type) { switch(type) { case DWT_TIME_CLOCK_GETTIME: { #ifdef ENABLE_TIME_CLOCK_GETTIME return 0; #endif } break; case DWT_TIME_CLOCK_GETTIME_REALTIME: { #ifdef ENABLE_TIME_CLOCK_GETTIME_REALTIME return 0; #endif } break; case DWT_TIME_CLOCK_GETTIME_MONOTONIC: { #ifdef ENABLE_TIME_CLOCK_GETTIME_MONOTONIC return 0; #endif } break; case DWT_TIME_CLOCK_GETTIME_MONOTONIC_RAW: { #ifdef ENABLE_TIME_CLOCK_GETTIME_MONOTONIC_RAW return 0; #endif } break; case DWT_TIME_CLOCK_GETTIME_PROCESS_CPUTIME_ID: { #ifdef ENABLE_TIME_CLOCK_GETTIME_PROCESS_CPUTIME_ID return 0; #endif } break; case DWT_TIME_CLOCK_GETTIME_THREAD_CPUTIME_ID: { #ifdef ENABLE_TIME_CLOCK_GETTIME_THREAD_CPUTIME_ID return 0; #endif } break; case DWT_TIME_CLOCK: { #ifdef ENABLE_TIME_CLOCK return 0; #endif } break; case DWT_TIME_TIMES: { #ifdef ENABLE_TIME_TIMES return 0; #endif } break; case DWT_TIME_GETRUSAGE: { #ifdef ENABLE_TIME_GETRUSAGE return 0; #endif } break; case DWT_TIME_GETRUSAGE_SELF: { #ifdef ENABLE_TIME_GETRUSAGE_SELF return 0; #endif } break; case DWT_TIME_GETRUSAGE_CHILDREN: { #ifdef ENABLE_TIME_GETRUSAGE_CHILDREN return 0; #endif } break; case DWT_TIME_GETRUSAGE_THREAD: { #ifdef ENABLE_TIME_GETRUSAGE_THREAD return 0; #endif } break; case DWT_TIME_GETTIMEOFDAY: { #ifdef ENABLE_TIME_GETTIMEOFDAY return 0; #endif } break; case DWT_TIME_IOCTL_RTC: { #ifdef ENABLE_TIME_IOCTL_RTC return 0; #endif } break; case DWT_TIME_AUTOSELECT: { #ifdef ENABLE_TIME_AUTOSELECT return 0; #endif } break; } return -1; } void dwt_util_wait(int ms) { assert( ms > 0 ); const int type = dwt_util_clock_autoselect(); const dwt_clock_t freq = dwt_util_get_frequency(type); const dwt_clock_t start = dwt_util_get_clock(type); while( 1000.0f * (dwt_util_get_clock(type) - start) / freq < (float)ms ) ; } int dwt_util_get_thread_num() { #ifdef _OPENMP return omp_get_thread_num(); #else return 0; #endif } int dwt_util_get_max_threads() { #ifdef _OPENMP return omp_get_max_threads(); #else /* _OPENMP */ return 1; #endif /* _OPENMP */ } int dwt_util_get_max_workers() { #ifdef __asvp__ return get_total_workers(); #else /* microblaze */ return 1; #endif /* microblaze */ } void dwt_util_set_num_threads( int num_threads) { assert( num_threads > 0 ); #ifdef _OPENMP omp_set_num_threads(num_threads); #else UNUSED(num_threads); #endif } void dwt_util_set_num_workers( int num_workers) { assert( num_workers > 0 ); set_active_workers(num_workers); } int dwt_util_get_num_threads() { #ifdef _OPENMP int num_threads; #pragma omp parallel { #pragma omp master { num_threads = omp_get_num_threads(); } } return num_threads; #else return 1; #endif } int dwt_util_get_num_workers() { return get_active_workers(); } void dwt_util_init() { FUNC_BEGIN; #ifdef __asvp__ for(int w = 0; w < get_total_workers(); w++) { WAL_CHECK( wal_init_worker(worker[w]) ); // FIXME(ASVP): translate DWT_OP_LIFT4SA into WAL_PBID_P0 by function WAL_CHECK( wal_set_firmware(worker[w], WAL_PBID_P0 /*DWT_OP_LIFT4SA*/, fw_fp01_lift4sa, -1) ); WAL_CHECK( wal_set_firmware(worker[w], WAL_PBID_P1 /*DWT_OP_LIFT4SB*/, fw_fp01_lift4sb, -1) ); // TODO(ASVP): call switch_op() WAL_CHECK( wal_reset_worker(worker[w]) ); WAL_CHECK( wal_start_operation(worker[w], WAL_PBID_P0) ); } dwt_util_set_accel(1); #endif /* microblaze */ FUNC_END; } void dwt_util_finish() { FUNC_BEGIN; #ifdef __asvp__ for(int w = 0; w < get_total_workers(); w++) { WAL_CHECK( wal_done_worker(worker[w]) ); } #endif FUNC_END; } void dwt_util_abort() { FUNC_BEGIN; #ifdef __asvp__ for(int w = 0; w < get_total_workers(); w++) { // FIXME(ASVP): is this legal? although the operation was not running? wal_end_operation(worker[w]); // deinitialize worker wal_done_worker(worker[w]); } #endif /* microblaze */ abort(); FUNC_END; } int dwt_util_dump_i( const void *ptr, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y) { assert( size_i_big_x >= 0 && size_i_big_y >= 0 ); FILE *file = stdout; for(int y = 0; y < size_i_big_y; y++) { for(int x = 0; x < size_i_big_x; x++) { const int px = *addr2_const_i(ptr, y, x, stride_x, stride_y); fprintf(file, "%i ", px); } fprintf(file, "\n"); } return 0; } int dwt_util_save_to_pgm_i( const char *filename, int max_value, const void *ptr, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y) { assert( max_value != 0 && size_i_big_x >= 0 && size_i_big_y >= 0 ); const int target_max_value = 255; FILE *file = fopen(filename, "w"); if(NULL == file) return 1; fprintf(file, "P2\n%i %i\n%i\n", size_i_big_x, size_i_big_y, target_max_value); int err = 0; for(int y = 0; y < size_i_big_y; y++) { for(int x = 0; x < size_i_big_x; x++) { const int px = *addr2_const_i(ptr, y, x, stride_x, stride_y); int val = (target_max_value*px/max_value); if( px > max_value ) { if( !err++ ) dwt_util_log(LOG_WARN, "%s: Maximum pixel intensity exceeded (%i > %i) at (y=%i, x=%i). Such an incident will be reported only once.\n", __FUNCTION__, px, max_value, y, x); } if( px > max_value ) { val = target_max_value; } if( px < 0 ) { if( !err++ ) dwt_util_log(LOG_WARN, "%s: Minimum pixel intensity exceeded (%i < %i) at (y=%i, x=%i). Such an incident will be reported only once.\n", __FUNCTION__, px, 0, y, x); } if( px < 0 ) { val = 0; } if( fprintf(file, "%i\n", val) < 0 ) { dwt_util_log(LOG_WARN, "%s: error writing into file.\n", __FUNCTION__); fclose(file); return 1; } } } fclose(file); if( err ) dwt_util_log(LOG_WARN, "%s: %i errors ocurred while saving a file.\n", __FUNCTION__, err); return 0; } int dwt_util_save_to_pgm_i16( const char *filename, int16_t max_value, const void *ptr, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y ) { assert( max_value != 0 && size_i_big_x >= 0 && size_i_big_y >= 0 ); const int target_max_value = 255; FILE *file = fopen(filename, "w"); if(NULL == file) return 1; fprintf(file, "P2\n%i %i\n%i\n", size_i_big_x, size_i_big_y, target_max_value); int err = 0; for(int y = 0; y < size_i_big_y; y++) { for(int x = 0; x < size_i_big_x; x++) { const int16_t px = *addr2_const_i16(ptr, y, x, stride_x, stride_y); int val = (target_max_value*px/max_value); if( px > max_value ) { if( !err++ ) dwt_util_log(LOG_WARN, "%s: Maximum pixel intensity exceeded (%i > %i) at (y=%i, x=%i). Such an incident will be reported only once.\n", __FUNCTION__, px, max_value, y, x); } if( px > max_value ) { val = target_max_value; } if( px < 0 ) { if( !err++ ) dwt_util_log(LOG_WARN, "%s: Minimum pixel intensity exceeded (%i < %i) at (y=%i, x=%i). Such an incident will be reported only once.\n", __FUNCTION__, px, 0, y, x); } if( px < 0 ) { val = 0; } if( fprintf(file, "%i\n", val) < 0 ) { dwt_util_log(LOG_WARN, "%s: error writing into file.\n", __FUNCTION__); fclose(file); return 1; } } } fclose(file); if( err ) dwt_util_log(LOG_WARN, "%s: %i errors ocurred while saving a file.\n", __FUNCTION__, err); return 0; } static int skip_mess(FILE *file) { assert( file ); int c; while(1) { c = fgetc(file); if( EOF == c ) return EOF; if( isspace(c) ) continue; if( '#' == c ) { // comment to EOL while(1) { c = fgetc(file); if( EOF == c ) return EOF; if( '\n' == c ) break; } continue; } break; } if( EOF == ungetc(c, file) ) return EOF; return 0; } int dwt_util_load_from_pgm_s( const char *filename, float max_value, void **pptr, int *pstride_x, int *pstride_y, int *psize_x, int *psize_y) { assert( filename && pptr && pstride_x && pstride_y && psize_x && psize_y ); FILE *file = fopen(filename, "r"); if(NULL == file) { dwt_util_log(LOG_ERR, "Cannot open file '%s'.\n", filename); return 1; } int target_max_value; if( 'P' != fgetc(file) || '2' != fgetc(file) ) { dwt_util_log(LOG_ERR, "Invalid file header.\n"); return 2; } skip_mess(file); if( 1 != fscanf(file, "%i", psize_x) ) { dwt_util_log(LOG_ERR, "Invalid file metadata.\n"); return 2; } skip_mess(file); if( 1 != fscanf(file, "%i", psize_y) ) { dwt_util_log(LOG_ERR, "Invalid file metadata.\n"); return 2; } skip_mess(file); if( 1 != fscanf(file, "%i", &target_max_value) ) { dwt_util_log(LOG_ERR, "Invalid file metadata.\n"); return 2; } if( target_max_value >= 65536 || target_max_value <= 0 ) { dwt_util_log(LOG_ERR, "Invalid depth.\n"); return 3; } #ifdef DEBUG dwt_util_log(LOG_DBG, "%s: going to read (%ix%i) image of depth %i...\n", __FUNCTION__, *psize_x, *psize_y, target_max_value); #endif *pstride_y = sizeof(float); *pstride_x = dwt_util_get_opt_stride(*pstride_y * *psize_x); #ifdef DEBUG dwt_util_log(LOG_DBG, "%s: with strides (%i,%i)...\n", __FUNCTION__, *pstride_x, *pstride_y); #endif dwt_util_alloc_image(pptr, *pstride_x, *pstride_y, *psize_x, *psize_y); for(int y = 0; y < *psize_y; y++) { for(int x = 0; x < *psize_x; x++) { float *ppx = addr2_s(*pptr, y, x, *pstride_x, *pstride_y); int val; skip_mess(file); if( 1 != fscanf(file, "%i", &val) ) { dwt_util_log(LOG_ERR, "Invalid data.\n"); return 4; } if( val < 0 || val > target_max_value ) { dwt_util_log(LOG_ERR, "Invalid data depth.\n"); return 5; } *ppx = max_value*val/target_max_value; } } fclose(file); return 0; } int dwt_util_load_from_pgm_i( const char *filename, int max_value, void **pptr, int *pstride_x, int *pstride_y, int *psize_x, int *psize_y) { assert( filename && pptr && pstride_x && pstride_y && psize_x && psize_y ); FILE *file = fopen(filename, "r"); if(NULL == file) { dwt_util_log(LOG_ERR, "Cannot open file '%s'.\n", filename); return 1; } int target_max_value; if( 'P' != fgetc(file) || '2' != fgetc(file) ) { dwt_util_log(LOG_ERR, "Invalid file header.\n"); return 2; } skip_mess(file); if( 1 != fscanf(file, "%i", psize_x) ) { dwt_util_log(LOG_ERR, "Invalid file metadata.\n"); return 2; } skip_mess(file); if( 1 != fscanf(file, "%i", psize_y) ) { dwt_util_log(LOG_ERR, "Invalid file metadata.\n"); return 2; } skip_mess(file); if( 1 != fscanf(file, "%i", &target_max_value) ) { dwt_util_log(LOG_ERR, "Invalid file metadata.\n"); return 2; } if( target_max_value >= 65536 || target_max_value <= 0 ) { dwt_util_log(LOG_ERR, "Invalid depth.\n"); return 3; } #ifdef DEBUG dwt_util_log(LOG_DBG, "%s: going to read (%ix%i) image of depth %i...\n", __FUNCTION__, *psize_x, *psize_y, target_max_value); #endif *pstride_y = sizeof(int); *pstride_x = dwt_util_get_opt_stride(*pstride_y * *psize_x); #ifdef DEBUG dwt_util_log(LOG_DBG, "%s: with strides (%i,%i)...\n", __FUNCTION__, *pstride_x, *pstride_y); #endif dwt_util_alloc_image(pptr, *pstride_x, *pstride_y, *psize_x, *psize_y); for(int y = 0; y < *psize_y; y++) { for(int x = 0; x < *psize_x; x++) { int *ppx = addr2_i(*pptr, y, x, *pstride_x, *pstride_y); int val; skip_mess(file); if( 1 != fscanf(file, "%i", &val) ) { dwt_util_log(LOG_ERR, "Invalid data.\n"); return 4; } if( val < 0 || val > target_max_value ) { dwt_util_log(LOG_ERR, "Invalid data depth.\n"); return 5; } *ppx = max_value*val/target_max_value; } } fclose(file); return 0; } int dwt_util_load_from_pgm_i16( const char *filename, int16_t max_value, void **pptr, int *pstride_x, int *pstride_y, int *psize_x, int *psize_y ) { assert( filename && pptr && pstride_x && pstride_y && psize_x && psize_y ); FILE *file = fopen(filename, "r"); if(NULL == file) { dwt_util_log(LOG_ERR, "Cannot open file '%s'.\n", filename); return 1; } int target_max_value; if( 'P' != fgetc(file) || '2' != fgetc(file) ) { dwt_util_log(LOG_ERR, "Invalid file header.\n"); return 2; } skip_mess(file); if( 1 != fscanf(file, "%i", psize_x) ) { dwt_util_log(LOG_ERR, "Invalid file metadata.\n"); return 2; } skip_mess(file); if( 1 != fscanf(file, "%i", psize_y) ) { dwt_util_log(LOG_ERR, "Invalid file metadata.\n"); return 2; } skip_mess(file); if( 1 != fscanf(file, "%i", &target_max_value) ) { dwt_util_log(LOG_ERR, "Invalid file metadata.\n"); return 2; } if( target_max_value >= 65536 || target_max_value <= 0 ) { dwt_util_log(LOG_ERR, "Invalid depth.\n"); return 3; } #ifdef DEBUG dwt_util_log(LOG_DBG, "%s: going to read (%ix%i) image of depth %i...\n", __FUNCTION__, *psize_x, *psize_y, target_max_value); #endif *pstride_y = sizeof(int16_t); *pstride_x = dwt_util_get_opt_stride(*pstride_y * *psize_x); #ifdef DEBUG dwt_util_log(LOG_DBG, "%s: with strides (%i,%i)...\n", __FUNCTION__, *pstride_x, *pstride_y); #endif dwt_util_alloc_image(pptr, *pstride_x, *pstride_y, *psize_x, *psize_y); for(int y = 0; y < *psize_y; y++) { for(int x = 0; x < *psize_x; x++) { int16_t *ppx = addr2_i16(*pptr, y, x, *pstride_x, *pstride_y); int16_t val; skip_mess(file); if( 1 != fscanf(file, "%hi", &val) ) { dwt_util_log(LOG_ERR, "Invalid data.\n"); return 4; } if( val < 0 || val > (int16_t)target_max_value ) { dwt_util_log(LOG_ERR, "Invalid data depth.\n"); return 5; } *ppx = (int16_t)( (int32_t)max_value*val/target_max_value ); } } fclose(file); return 0; } int dwt_util_save_log_to_pgm_s( const char *path, const void *ptr, int stride_x, int stride_y, int size_x, int size_y ) { // alloc temp void *temp; dwt_util_alloc_image(&temp, stride_x, stride_y, size_x, size_y); // temp = log(abs(input)) dwt_util_conv_show_s(ptr, temp, stride_x, stride_y, size_x, size_y); // find min, max float minv, maxv; dwt_util_find_min_max_s( temp, size_x, size_y, stride_x, stride_y, &minv, &maxv ); #if 0 // scale + save dwt_util_shift_s( temp, size_x, size_y, stride_x, stride_y, -minv ); dwt_util_save_to_pgm_s( path, (-minv + maxv), temp, stride_x, stride_y, size_x, size_y ); #else // scale + save dwt_util_save_to_pgm_s( path, maxv, temp, stride_x, stride_y, size_x, size_y ); #endif // free dwt_util_free_image(&temp); return 0; } int dwt_util_save_to_pgm_s( const char *filename, float max_value, const void *ptr, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y) { assert( max_value != 0.0f && size_i_big_x >= 0 && size_i_big_y >= 0 ); const int target_max_value = 255; FILE *file = fopen(filename, "w"); if(NULL == file) return 1; fprintf(file, "P2\n%i %i\n%i\n", size_i_big_x, size_i_big_y, target_max_value); int err = 0; for(int y = 0; y < size_i_big_y; y++) { for(int x = 0; x < size_i_big_x; x++) { const float px = *addr2_const_s(ptr, y, x, stride_x, stride_y); int val = (target_max_value*px/max_value); if( px - 1e-3f > max_value ) { if( !err++ ) dwt_util_log(LOG_WARN, "%s: Maximum pixel intensity exceeded (%f > %f) at (y=%i, x=%i). Such an incident will be reported only once.\n", __FUNCTION__, px, max_value, y, x); } // isnan if( px != px ) { if( !err++ ) dwt_util_log(LOG_WARN, "%s: NaN value (%f) at (y=%i, x=%i). Such an incident will be reported only once.\n", __FUNCTION__, px, y, x); val = 0; } if( px > max_value ) { val = target_max_value; } if( px + 1e-3f < 0.0f ) { if( !err++ ) dwt_util_log(LOG_WARN, "%s: Minimum pixel intensity exceeded (%f < %f) at (y=%i, x=%i). Such an incident will be reported only once.\n", __FUNCTION__, px, 0.0f, y, x); } if( px < 0.0f ) { val = 0; } // minimum integer value if( abs((int)px) < 0 ) { if( !err++ ) dwt_util_log(LOG_WARN, "%s: Wrong value (%f) at (y=%i, x=%i). Such an incident will be reported only once.\n", __FUNCTION__, px, y, x); val = 0; } if( fprintf(file, "%i\n", val) < 0) { dwt_util_log(LOG_WARN, "%s: error writing into file.\n", __FUNCTION__); fclose(file); return 1; } } } fclose(file); if( err ) dwt_util_log(LOG_WARN, "%s: %i errors ocurred while saving a file.\n", __FUNCTION__, err); return 0; } int dwt_util_save_to_pgm_d( const char *filename, double max_value, const void *ptr, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y) { assert( max_value != 0.0f && size_i_big_x >= 0 && size_i_big_y >= 0 ); const int target_max_value = 255; FILE *file = fopen(filename, "w"); if(NULL == file) return 1; fprintf(file, "P2\n%i %i\n%i\n", size_i_big_x, size_i_big_y, target_max_value); int err = 0; for(int y = 0; y < size_i_big_y; y++) { for(int x = 0; x < size_i_big_x; x++) { const double px = *addr2_const_d(ptr, y, x, stride_x, stride_y); int val = (target_max_value*px/max_value); if( px - 1e-6 > max_value ) { if( !err++ ) dwt_util_log(LOG_WARN, "%s: Maximum pixel intensity exceeded (%f > %f) at (y=%i, x=%i). Such an incident will be reported only once.\n", __FUNCTION__, px, max_value, y, x); } if( px > max_value ) { val = target_max_value; } if( px + 1e-6 < 0.0 ) { if( !err++ ) dwt_util_log(LOG_WARN, "%s: Minimum pixel intensity exceeded (%f < %f) at (y=%i, x=%i). Such an incident will be reported only once.\n", __FUNCTION__, px, 0.0f, y, x); } if( px < 0.0 ) { val = 0; } if( fprintf(file, "%i\n", val) < 0) { dwt_util_log(LOG_WARN, "%s: error writing into file.\n", __FUNCTION__); fclose(file); return 1; } } } fclose(file); if( err ) dwt_util_log(LOG_WARN, "%s: %i errors ocurred while saving a file.\n", __FUNCTION__, err); return 0; } void dwt_util_set_accel( int accel_type) { set_accel_type(accel_type); } #define iszero(x) (fpclassify(x) == FP_ZERO) int dwt_util_is_normal_or_zero_i(const float *a) { if( isnormal(*a) || iszero(*a) ) return 1; return 0; } int dwt_util_is_normal_or_zero(float a) { return dwt_util_is_normal_or_zero_i(&a); } int dwt_util_cmp_s_i(const float *a, const float *b) { assert( a ); assert( b ); const float eps = 1e-4; // FIXME: magic constant if( !dwt_util_is_normal_or_zero_i(a) || !dwt_util_is_normal_or_zero_i(b) ) { dwt_util_log(LOG_ERR, "%f or %f is not normal nor zero!\n", *a, *b); return 1; } if( fabsf( (*a) - (*b) ) > eps ) { dwt_util_log(LOG_ERR, "%f should be %f!\n", *a, *b); return 1; } return 0; } int dwt_util_cmp_s(float a, float b) { return dwt_util_cmp_s_i(&a, &b); } int dwt_util_generate_vec_s(float *addr, int size) { for(int i = 0; i < size; i++) addr[i] = (float)i; for(int i = 0; i < size; i++) { if( dwt_util_cmp_s(addr[i], (float)i) ) return 1; } return 0; } // 4-bytes alignment float *dwt_util_allocate_4_vec_s(int size) { assert( is_even(size) ); float *addr = (float *)0; // http://git.uclibc.org/uClibc/tree/include - memalign, posix_memalign addr = (float *)memalign(4, sizeof(float) * size); assert( is_aligned_4(addr) ); return addr; } // 8-bytes alignment float *dwt_util_allocate_8_vec_s(int size) { assert( is_even(size) ); float *addr = (float *)0; // http://git.uclibc.org/uClibc/tree/include - memalign, posix_memalign addr = (float *)memalign(8, sizeof(float) * size); assert( is_aligned_8(addr) ); return addr; } // 16-bytes alignment float *dwt_util_allocate_16_vec_s(int size) { assert( is_even(size) ); float *addr = (float *)0; // http://git.uclibc.org/uClibc/tree/include - memalign, posix_memalign addr = (float *)memalign(16, sizeof(float) * size); assert( is_aligned_16(addr) ); return addr; } float *dwt_util_allocate_vec_s(int size) { // FIXME: why must be even??? moreover, cannot allocate less elements than required size = to_even(size+1); float *addr = (float *)0; // http://git.uclibc.org/uClibc/tree/include - memalign, posix_memalign addr = (float *)memalign(16, sizeof(float) * size); assert( is_aligned_16(addr) ); return addr; } int dwt_util_zero_vec_s(float *addr, int size) { for(int i = 0; i < size; i++) addr[i] = (float)0; for(int i = 0; i < size; i++) { if( dwt_util_cmp_s(addr[i], (float)0) ) return 1; } return 0; } int dwt_util_copy_vec_s(const float *src, float *dst, int size) { dwt_util_memcpy_stride_s(dst, sizeof(dst[0]), src, sizeof(src[0]), size); for(int i = 0; i < size; i++) { if( dwt_util_cmp_s(dst[i], src[i]) ) return 1; } return 0; } int dwt_util_cmp_vec_s(const float *a, const float *b, int size) { for(int i = size-1; i >= 0; i--) { if( dwt_util_cmp_s(a[i], b[i]) ) return 1; } return 0; } void dwt_util_print_vec_s(const float *addr, int size) { dwt_util_log(LOG_NONE, "[ "); for(int i = 0; i < size; i++) dwt_util_log(LOG_NONE, "%f ", addr[i]); dwt_util_log(LOG_NONE, "]\n"); } void dwt_util_test() { for(int i = 2; i <= BANK_SIZE; i *= 2) { dwt_util_log(LOG_TEST, "allocate vector of %i floats...\n", i); float *addr = dwt_util_allocate_vec_s(i); if( !addr ) { dwt_util_log(LOG_ERR, "Failed to allocate vector of %i floats.\n", i); dwt_util_abort(); } free(addr); dwt_util_log(LOG_TEST, "ok\n"); } #ifdef __asvp__ for(int w = 0; w < get_total_workers(); w++) { dwt_util_log(LOG_TEST, "worker %i: init worker...\n", w); if( wal_init_worker(worker[w]) ) abort(); if( wal_reset_worker(worker[w]) ) abort(); const int size = BANK_SIZE; dwt_util_log(LOG_TEST, "allocating vector of %i floats...\n", size); float *addr = dwt_util_allocate_vec_s(size); if( !addr ) dwt_util_abort(); if( dwt_util_generate_vec_s(addr, size) ) dwt_util_abort(); dwt_util_log(LOG_TEST, "making copy of vector...\n"); float *copy = dwt_util_allocate_vec_s(size); if( !copy ) dwt_util_abort(); if( dwt_util_copy_vec_s(addr, copy, size) ) dwt_util_abort(); if( dwt_util_cmp_vec_s(addr, copy, size) ) dwt_util_abort(); dwt_util_log(LOG_TEST, "worker %i: memory transfer to BCE memory using new-style function...\n", w); if( wal_dma_configure(worker[w], 0, addr, 0, WAL_BCE_JSY_DMEM_A, 0, size) ) abort(); if( wal_dma_start(worker[w], 0, WAL_DMA_REQ_RD) ) abort(); while( wal_dma_isbusy(worker[w], 0x1) ) ; dwt_util_log(LOG_TEST, "zeroing memory...\n"); if( dwt_util_zero_vec_s(addr, size) ) dwt_util_abort(); dwt_util_log(LOG_TEST, "worker %i: memory transfer from BCE memory using new-style function...\n", w); if( wal_dma_start(worker[w], 0, WAL_DMA_REQ_WR) ) abort(); while( wal_dma_isbusy(worker[w], 0x1) ) ; dwt_util_log(LOG_TEST, "flushing cache...\n"); flush_cache_s(addr, size); dwt_util_log(LOG_TEST, "comparing with original sequence...\n"); if( dwt_util_cmp_vec_s(addr, copy, size) ) dwt_util_abort(); dwt_util_log(LOG_TEST, "worker %i: calling done worker...\n", w); wal_done_worker(worker[w]); dwt_util_log(LOG_TEST, "all tests done\n"); } #endif } int dwt_util_vfprintf(FILE *stream, const char *format, va_list ap) { return vfprintf(stream, format, ap); } int dwt_util_vprintf(const char *format, va_list ap) { return dwt_util_vfprintf(stdout, format, ap); } int dwt_util_fprintf(FILE *stream, const char *format, ...) { va_list ap; va_start(ap, format); int ret = dwt_util_vfprintf(stream, format, ap); va_end(ap); return ret; } int dwt_util_printf(const char *format, ...) { va_list ap; va_start(ap, format); int ret = dwt_util_vprintf(format, ap); va_end(ap); return ret; } enum dwt_color { DWT_COLOR_DEFAULT = 0, // styles DWT_COLOR_BOLD, // DWT_COLOR_DIM, // DWT_COLOR_UNDERLINED, // DWT_COLOR_BLINK, // DWT_COLOR_REVERSE, // DWT_COLOR_HIDDEN, // colors DWT_COLOR_BLACK, DWT_COLOR_RED, DWT_COLOR_GREEN, DWT_COLOR_YELLOW, DWT_COLOR_BLUE, DWT_COLOR_MAGENTA, DWT_COLOR_CYAN, DWT_COLOR_LIGHTGRAY, // DWT_COLOR_DARKGRAY, // DWT_COLOR_LIGHTRED, // DWT_COLOR_LIGHTGREEN, // DWT_COLOR_LIGHTYELLOW, // DWT_COLOR_LIGHTBLUE, // DWT_COLOR_LIGHTMAGENTA, // DWT_COLOR_LIGHTCYAN, // DWT_COLOR_WHITE, }; // TODO: http://misc.flogisoft.com/bash/tip_colors_and_formatting // TODO: http://www.termsys.demon.co.uk/vtansi.htm int dwt_util_color(FILE *stream, int style, int foreground, int background) { int ret = 0; ret += dwt_util_fprintf(stream, "\e[0m"); switch(style) { case DWT_COLOR_BOLD: ret += dwt_util_fprintf(stream, "\e[1m"); break; } switch(foreground) { case DWT_COLOR_BLACK: ret += dwt_util_fprintf(stream, "\e[30m"); break; case DWT_COLOR_RED: ret += dwt_util_fprintf(stream, "\e[31m"); break; case DWT_COLOR_GREEN: ret += dwt_util_fprintf(stream, "\e[32m"); break; case DWT_COLOR_YELLOW: ret += dwt_util_fprintf(stream, "\e[33m"); break; case DWT_COLOR_BLUE: ret += dwt_util_fprintf(stream, "\e[34m"); break; case DWT_COLOR_MAGENTA: ret += dwt_util_fprintf(stream, "\e[35m"); break; case DWT_COLOR_CYAN: ret += dwt_util_fprintf(stream, "\e[36m"); break; case DWT_COLOR_LIGHTGRAY: ret += dwt_util_fprintf(stream, "\e[37m"); break; } switch(background) { case DWT_COLOR_BLACK: ret += dwt_util_fprintf(stream, "\e[40m"); break; case DWT_COLOR_RED: ret += dwt_util_fprintf(stream, "\e[41m"); break; case DWT_COLOR_GREEN: ret += dwt_util_fprintf(stream, "\e[42m"); break; case DWT_COLOR_YELLOW: ret += dwt_util_fprintf(stream, "\e[43m"); break; case DWT_COLOR_BLUE: ret += dwt_util_fprintf(stream, "\e[44m"); break; case DWT_COLOR_MAGENTA: ret += dwt_util_fprintf(stream, "\e[45m"); break; case DWT_COLOR_CYAN: ret += dwt_util_fprintf(stream, "\e[46m"); break; case DWT_COLOR_LIGHTGRAY: ret += dwt_util_fprintf(stream, "\e[47m"); break; } return ret; } int dwt_util_log( enum dwt_util_loglevel level, const char *format, ... ) { int ret = 0; FILE *stream = stderr; const char *prefix[] = { [LOG_NONE] = "", [LOG_DBG] = "DEBUG: ", [LOG_INFO] = "INFO: ", [LOG_WARN] = "WARNING: ", [LOG_ERR] = "ERROR: ", [LOG_TEST] = "TEST: ", }; const int color[] = { [LOG_NONE] = DWT_COLOR_DEFAULT, [LOG_DBG] = DWT_COLOR_DEFAULT, [LOG_INFO] = DWT_COLOR_BLUE, [LOG_WARN] = DWT_COLOR_YELLOW, [LOG_ERR] = DWT_COLOR_RED, [LOG_TEST] = DWT_COLOR_MAGENTA, }; flockfile(stream); ret += dwt_util_color(stream, DWT_COLOR_BOLD, color[level], DWT_COLOR_DEFAULT); ret += dwt_util_fprintf(stream, prefix[level]); ret += dwt_util_color(stream, DWT_COLOR_DEFAULT, DWT_COLOR_DEFAULT, DWT_COLOR_DEFAULT); va_list ap; va_start(ap, format); ret += dwt_util_vfprintf(stream, format, ap); va_end(ap); fflush(stream); funlockfile(stream); return ret; } int dwt_util_vlog( enum dwt_util_loglevel level, const char *format, va_list ap) { int ret = 0; FILE *stream = stderr; const char *prefix[] = { [LOG_NONE] = "", [LOG_DBG] = "DEBUG: ", [LOG_INFO] = "INFO: ", [LOG_WARN] = "WARNING: ", [LOG_ERR] = "ERROR: ", [LOG_TEST] = "TEST: ", }; flockfile(stream); ret += dwt_util_fprintf(stream, prefix[level]); ret += dwt_util_vfprintf(stream, format, ap); fflush(stream); funlockfile(stream); return ret; } void dwt_util_error( const char *format, ...) { va_list ap; va_start(ap, format); dwt_util_vlog(LOG_ERR, format, ap); va_end(ap); dwt_util_abort(); } static void *alloc(size_t size) { void *ptr = malloc(size); if(!ptr) { dwt_util_log(LOG_ERR, "Unable to allocate memory.\n"); dwt_util_abort(); } return ptr; } static const char *node() { long host_name_max = sysconf(_SC_HOST_NAME_MAX); if( -1 == host_name_max ) host_name_max = 255; host_name_max++; // the terminating null byte // NOTE: should gethostname be called instead of reading from procfs? FILE *f = fopen("/proc/sys/kernel/hostname", "r"); if(f) { static char *buff = NULL; // NOTE: global variable if(!buff) buff = (char *)alloc(host_name_max); const char *ret = fgets(buff, host_name_max, f); fclose(f); if(ret) { char *nl = strchr(buff, '\n'); if(nl) *nl = 0; return buff; } else return "unknown"; } else return "unknown"; } const char *dwt_util_node() { return node(); } static const char *appname() { long page_size = sysconf(_SC_PAGESIZE); if( -1 == page_size ) page_size = 4096; FILE *f = fopen("/proc/self/cmdline", "r"); if(f) { static char *buff = NULL; // NOTE: global variable if(!buff) buff = (char *)alloc(page_size); const char *ret = fgets(buff, page_size, f); fclose(f); if(ret) return basename(buff); else return "unknown"; } else return "unknown"; } const char *dwt_util_appname() { return appname(); } static int find_dfa_seq(int N) { int state = 1; int count = 0; do { const int addr = 2 * state; state = addr - N * (addr >= N); count++; if( 1 == state ) return count; } while( count < 2*N ); return 0; } /** * @brief Variant of Fermat primality test for base-2. */ static int is_prime(int N) { // 2 is prime if( 2 == N ) return 1; // even numbers are not primes, i.e. 0, 2, 4, 6, 8, ... if( !(N & 1) ) return 0; // negative numbers and unity are not prime numbers, i.e. ..., -2, -1, 0, 1 if( N < 2 ) return 0; // number of zeros after leading one-bit in left side of Fermat's little theorem with base 2 const int d = N - 1; // length of zero-bit sequence after leading one-bit accepted by DFA which accepts numbers congruent to 1 modulo N const int c = find_dfa_seq(N); // can DFA accept a big number in the left side of Fermat's little theorem? const int r = d % c; // if can then we got probably prime if( 0 == r ) return 1; return 0; } int dwt_util_is_prime(int N) { return is_prime(N); } /** * @brief Returns smallest prime not less than N. */ static int next_prime(int N) { if( N <= 2 ) return 2; N |= 1; while( !is_prime(N) ) N += 2; return N; } int dwt_util_next_prime(int N) { return next_prime(N); } int dwt_util_is_pow2(int x) { return is_pow2(x); } long dwt_util_get_ncpus() { long nprocessors_conf = sysconf(_SC_NPROCESSORS_CONF); if( -1 == nprocessors_conf ) nprocessors_conf = 1; return nprocessors_conf; } void dwt_util_print_info() { dwt_util_log(LOG_INFO, "architecture: \"%s\"\n", dwt_util_arch()); dwt_util_log(LOG_INFO, "address:\n"); #if microblaze size_t ptr_size = sizeof(void*); long ptr_size_bits = ptr_size<<3; dwt_util_log(LOG_INFO, "[addr:%u]\n", ptr_size_bits); #endif #ifdef __x86_64__ size_t ptr_size = sizeof(void*); long ptr_size_bits = ptr_size<<3; long LEVEL1_DCACHE_SIZE = sysconf(_SC_LEVEL1_DCACHE_SIZE); long LEVEL1_DCACHE_ASSOC = sysconf(_SC_LEVEL1_DCACHE_ASSOC); long LEVEL1_DCACHE_LINESIZE = sysconf(_SC_LEVEL1_DCACHE_LINESIZE); unsigned dcache_offset_bits = (unsigned)ceil_log2(LEVEL1_DCACHE_LINESIZE); // FIXME: div. by zero long dcache_sets = LEVEL1_DCACHE_SIZE / LEVEL1_DCACHE_ASSOC / LEVEL1_DCACHE_LINESIZE; unsigned dcache_set_bits = (unsigned)ceil_log2(dcache_sets); unsigned tag_bits = (unsigned)(ptr_size_bits - dcache_set_bits - dcache_offset_bits); dwt_util_log(LOG_INFO, "[addr:%u] => [tag:%u][cache_set:%u][offset:%u]\n", ptr_size_bits, tag_bits, dcache_set_bits, dcache_offset_bits); #endif #if __arm__ size_t ptr_size = sizeof(void*); long ptr_size_bits = ptr_size<<3; dwt_util_log(LOG_INFO, "[addr:%u]\n", ptr_size_bits); #endif dwt_util_log(LOG_INFO, "number of CPUs = %lu\n", dwt_util_get_ncpus()); } static int get_opt_stride(int min_stride) { assert( min_stride > 0 ); #ifdef microblaze // align to 32 bits due to MicroBlaze constraints // higher stride has better performance (observed) const int stride = align_8(next_prime(min_stride)); // powers of two have worse performance (observed) return is_pow2(stride) ? align_8(stride+1) : stride; #endif #ifdef __x86_64__ // find prime number not lesser than min_stride return next_prime(min_stride); #endif #ifdef __arm__ // FIXME: what align is really needed? return align_8(min_stride); #endif } int dwt_util_get_opt_stride(int min_stride) { return get_opt_stride(min_stride); } #define up_to_odd(x) ((x)|1) /* getconf -a | grep -i cache /sys/devices/system/cpu/cpu0/cache/ LEVEL1_DCACHE_LINESIZE = 64-byte cache line => log2(64) = 6-bit [offset] LEVEL1_DCACHE_ASSOC = associativity = 8-way LEVEL1_DCACHE_SIZE = cache size = 64 sets x 8 ways x 64-byte line = 32K number of sets = 64 sets = 32K / 8-ways / 64-byte line => log2(64) = 6-bit [set] 32-bit address: [tag:20][set:6][offset:6] [ val] [ prime] [ prime][000000] [tag][000001][000000] */ int dwt_util_get_stride(int min_stride, int opt) { #ifdef microblaze return opt ? get_opt_stride(min_stride) : min_stride; #endif #ifdef __arm__ // FIXME: ARM needs special alignment for floats, e.g. "case 2" bellow return opt ? get_opt_stride(min_stride) : min_stride; #endif switch(opt) { case 0: // [ val] return min_stride; case 1: // [ prime] return next_prime(min_stride); case 2: // [ prime][000000] return next_prime(align_64(min_stride)>>6)<<6; case 3: // [tag][000001][000000] return ((align_4096(min_stride)>>6)+1)<<6; case 4: // [tag][ val][000000] return align_64(min_stride); case 5: // [tag][xx0000][000000] return align_4096(min_stride) + (1<<ceil_log2(min_stride)); case 6: // [ odd] return up_to_odd(min_stride); case 7: // [ odd][000000] return up_to_odd(align_64(min_stride)>>6)<<6; default: { dwt_util_log(LOG_DBG, "%s: invalid stride choice (%i)\n", __FUNCTION__, opt); return min_stride; } } } void dwt_util_subband( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, enum dwt_subbands band, void **dst_ptr, int *dst_size_x, int *dst_size_y) { assert( ptr != NULL && size_i_big_x >= 0 && size_i_big_y >= 0 && size_o_big_x >= 0 && size_o_big_y >= 0 ); int inner_H_x = 0; int inner_H_y = 0; int inner_L_x = size_i_big_x; int inner_L_y = size_i_big_y; int outer_x = size_o_big_x; int outer_y = size_o_big_y; for(int j = 1; j <= j_max; j++) { inner_H_x = floor_div2(inner_L_x); inner_H_y = floor_div2(inner_L_y); inner_L_x = ceil_div2 (inner_L_x); inner_L_y = ceil_div2 (inner_L_y); outer_x = ceil_div2 (outer_x); outer_y = ceil_div2 (outer_y); } switch(band) { case DWT_LL: *dst_ptr = addr2(ptr, 0, 0, stride_x, stride_y); *dst_size_x = inner_L_x; *dst_size_y = inner_L_y; break; case DWT_HL: *dst_ptr = addr2(ptr, 0, outer_x, stride_x, stride_y); *dst_size_x = inner_H_x; *dst_size_y = inner_L_y; break; case DWT_LH: *dst_ptr = addr2(ptr, outer_y, 0, stride_x, stride_y); *dst_size_x = inner_L_x; *dst_size_y = inner_H_y; break; case DWT_HH: *dst_ptr = addr2(ptr, outer_y, outer_x, stride_x, stride_y); *dst_size_x = inner_H_x; *dst_size_y = inner_H_y; break; } } void dwt_util_subband_const( const void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, enum dwt_subbands band, const void **dst_ptr, int *dst_size_x, int *dst_size_y) { assert( ptr != NULL && size_i_big_x >= 0 && size_i_big_y >= 0 && size_o_big_x >= 0 && size_o_big_y >= 0 ); int inner_H_x = 0; int inner_H_y = 0; int inner_L_x = size_i_big_x; int inner_L_y = size_i_big_y; int outer_x = size_o_big_x; int outer_y = size_o_big_y; for(int j = 1; j <= j_max; j++) { inner_H_x = floor_div2(inner_L_x); inner_H_y = floor_div2(inner_L_y); inner_L_x = ceil_div2 (inner_L_x); inner_L_y = ceil_div2 (inner_L_y); outer_x = ceil_div2 (outer_x); outer_y = ceil_div2 (outer_y); } switch(band) { case DWT_LL: *dst_ptr = addr2_const(ptr, 0, 0, stride_x, stride_y); *dst_size_x = inner_L_x; *dst_size_y = inner_L_y; break; case DWT_HL: *dst_ptr = addr2_const(ptr, 0, outer_x, stride_x, stride_y); *dst_size_x = inner_H_x; *dst_size_y = inner_L_y; break; case DWT_LH: *dst_ptr = addr2_const(ptr, outer_y, 0, stride_x, stride_y); *dst_size_x = inner_L_x; *dst_size_y = inner_H_y; break; case DWT_HH: *dst_ptr = addr2_const(ptr, outer_y, outer_x, stride_x, stride_y); *dst_size_x = inner_H_x; *dst_size_y = inner_H_y; break; } } void dwt_util_subband_i( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, enum dwt_subbands band, void **dst_ptr, int *dst_size_x, int *dst_size_y) { dwt_util_subband( ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j_max, band, dst_ptr, dst_size_x, dst_size_y); } void dwt_util_subband_s( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, enum dwt_subbands band, void **dst_ptr, int *dst_size_x, int *dst_size_y) { dwt_util_subband( ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j_max, band, dst_ptr, dst_size_x, dst_size_y); } void dwt_util_subband_const_s( const void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, enum dwt_subbands band, const void **dst_ptr, int *dst_size_x, int *dst_size_y) { dwt_util_subband_const( ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j_max, band, dst_ptr, dst_size_x, dst_size_y); } void dwt_util_subband_d( void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, enum dwt_subbands band, void **dst_ptr, int *dst_size_x, int *dst_size_y) { dwt_util_subband( ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j_max, band, dst_ptr, dst_size_x, dst_size_y); } void dwt_util_diff_i( const void *src0, const void *src1, void *dst, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y) { FUNC_BEGIN; assert( src0 && src1 && dst && size_i_big_x >= 0 && size_i_big_y >= 0 ); for(int y = 0; y < size_i_big_y; y++) { for(int x = 0; x < size_i_big_x; x++) { const int c0 = *addr2_const_i(src0, y, x, stride_x, stride_y); const int c1 = *addr2_const_i(src1, y, x, stride_x, stride_y); int *c = addr2_i(dst, y, x, stride_x, stride_y); *c = c1 - c0; } } FUNC_END; } /** * @brief Natural logarithm of @e x, i.e. ln(x) or log_{e}(x). */ void log_i_s(float *result, float x) { *result = log(x); } void log_i_d(double *result, double x) { *result = log(x); } void dwt_util_conv_show_i( const void *src, void *dst, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y) { FUNC_BEGIN; assert( src != NULL && dst != NULL && size_i_big_x >= 0 && size_i_big_y >= 0 ); for(int y = 0; y < size_i_big_y; y++) { for(int x = 0; x < size_i_big_x; x++) { const int coeff = *addr2_const_i(src, y, x, stride_x, stride_y); int *log_coeff = addr2_i(dst, y, x, stride_x, stride_y); // FIXME: *log_coeff = ceil_log2(1+abs(coeff)<<a)<<b; *log_coeff = abs(coeff); } } FUNC_END; } void dwt_util_conv_show_i16( const void *src, void *dst, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y ) { FUNC_BEGIN; assert( src != NULL && dst != NULL && size_i_big_x >= 0 && size_i_big_y >= 0 ); for(int y = 0; y < size_i_big_y; y++) { for(int x = 0; x < size_i_big_x; x++) { const int16_t coeff = *addr2_const_i16(src, y, x, stride_x, stride_y); int16_t *log_coeff = addr2_i16(dst, y, x, stride_x, stride_y); // FIXME: *log_coeff = ceil_log2(1+abs(coeff)<<a)<<b; *log_coeff = (int16_t)abs(coeff); } } FUNC_END; } void dwt_util_conv_show_s( const void *src, void *dst, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y) { FUNC_BEGIN; assert( src != NULL && dst != NULL && size_i_big_x >= 0 && size_i_big_y >= 0 ); // magic constants const float a = 100.f; const float b = 10.f; int err = 0; for(int y = 0; y < size_i_big_y; y++) { for(int x = 0; x < size_i_big_x; x++) { const float coeff = *addr2_const_s(src, y, x, stride_x, stride_y); float *log_coeff = addr2_s(dst, y, x, stride_x, stride_y); float temp; log_i_s(&temp, 1.f+fabsf(coeff)*a); temp /= b; if( !isfinite(temp) ) { if(!err) dwt_util_log(LOG_ERR, "either NaN or INFINITY; this error will be reported only once\n"); err++; temp = 0.f; } *log_coeff = temp; } } FUNC_END; } void dwt_util_conv_show_d( const void *src, void *dst, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y) { FUNC_BEGIN; assert( src != NULL && dst != NULL && size_i_big_x >= 0 && size_i_big_y >= 0 ); // magic constants const double a = 100.; const double b = 10.; for(int y = 0; y < size_i_big_y; y++) { for(int x = 0; x < size_i_big_x; x++) { const double coeff = *addr2_const_d(src, y, x, stride_x, stride_y); double *log_coeff = addr2_d(dst, y, x, stride_x, stride_y); double temp; log_i_d(&temp, 1.+fabs(coeff)*a); temp /= b; *log_coeff = temp; } } FUNC_END; } void dwt_util_copy_s( const void *src, void *dst, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y) { FUNC_BEGIN; assert( src != NULL && dst != NULL && size_i_big_x >= 0 && size_i_big_y >= 0 ); for(int y = 0; y < size_i_big_y; y++) { for(int x = 0; x < size_i_big_x; x++) { const float src_coeff = *addr2_const_s(src, y, x, stride_x, stride_y); float *dst_coeff = addr2_s(dst, y, x, stride_x, stride_y); *dst_coeff = src_coeff; } } FUNC_END; } void dwt_util_copy3_s( const void *src, void *dst, int src_stride_x, int src_stride_y, int dst_stride_x, int dst_stride_y, int size_x, int size_y ) { FUNC_BEGIN; assert( src != NULL && dst != NULL && size_x >= 0 && size_y >= 0 ); for(int y = 0; y < size_y; y++) { for(int x = 0; x < size_x; x++) { const float src_coeff = *addr2_const_s(src, y, x, src_stride_x, src_stride_y); float *dst_coeff = addr2_s(dst, y, x, dst_stride_x, dst_stride_y); *dst_coeff = src_coeff; } } FUNC_END; } void dwt_util_copy_d( const void *src, void *dst, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y) { FUNC_BEGIN; assert( src != NULL && dst != NULL && size_i_big_x >= 0 && size_i_big_y >= 0 ); for(int y = 0; y < size_i_big_y; y++) { for(int x = 0; x < size_i_big_x; x++) { const double src_coeff = *addr2_const_d(src, y, x, stride_x, stride_y); double *dst_coeff = addr2_d(dst, y, x, stride_x, stride_y); *dst_coeff = src_coeff; } } FUNC_END; } void dwt_util_copy_i( const void *src, void *dst, int stride_x, int stride_y, int size_i_big_x, int size_i_big_y) { FUNC_BEGIN; assert( src != NULL && dst != NULL && size_i_big_x >= 0 && size_i_big_y >= 0 ); for(int y = 0; y < size_i_big_y; y++) { for(int x = 0; x < size_i_big_x; x++) { const int src_coeff = *addr2_const_i(src, y, x, stride_x, stride_y); int *dst_coeff = addr2_i(dst, y, x, stride_x, stride_y); *dst_coeff = src_coeff; } } FUNC_END; } // TODO: propagate "flush" void dwt_util_perf_cdf53_2_i( int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding, int M, int N, int clock_type, float *fwd_secs, float *inv_secs) { FUNC_BEGIN; assert( M > 0 && N > 0 && fwd_secs && inv_secs ); assert( size_o_big_x > 0 && size_o_big_y > 0 && size_i_big_x > 0 && size_i_big_y > 0 ); // pointer to M pointers to image data void *ptr[M]; int j[M]; // allocate M images for(int m = 0; m < M; m++) { // copy j_max to j[] j[m] = j_max; // allocate dwt_util_alloc_image( &ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y); // fill with test pattern dwt_util_test_image_fill_i( ptr[m], stride_x, stride_y, size_i_big_x, size_i_big_y, 0); } *fwd_secs = +INFINITY; *inv_secs = +INFINITY; // perform N test loops, select minimum for(int n = 0; n < N; n++) { #if 1 // FIXME: flush memory for(int m = 0; m < M; m++) flush_cache(ptr[m], image_size(stride_x, stride_y, size_o_big_x, size_o_big_y) ); #endif // start timer const dwt_clock_t time_fwd_start = dwt_util_get_clock(clock_type); // perform M fwd transforms for(int m = 0; m < M; m++) { dwt_cdf53_2f_i( ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, &j[m], decompose_one, zero_padding); } // stop timer const dwt_clock_t time_fwd_stop = dwt_util_get_clock(clock_type); // calc avg const float time_fwd_secs = (float)(time_fwd_stop - time_fwd_start) / M * MEASURE_FACTOR / dwt_util_get_frequency(clock_type); // select min if( time_fwd_secs < *fwd_secs ) *fwd_secs = time_fwd_secs; #if 1 // FIXME: flush memory for(int m = 0; m < M; m++) flush_cache(ptr[m], image_size(stride_x, stride_y, size_o_big_x, size_o_big_y) ); #endif // start timer const dwt_clock_t time_inv_start = dwt_util_get_clock(clock_type); // perform M inv transforms for(int m = 0; m < M; m++) { dwt_cdf53_2i_i( ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j[m], decompose_one, zero_padding); } // stop timer const dwt_clock_t time_inv_stop = dwt_util_get_clock(clock_type); // calc avg const float time_inv_secs = (float)(time_inv_stop - time_inv_start) / M * MEASURE_FACTOR / dwt_util_get_frequency(clock_type); // select min if( time_inv_secs < *inv_secs ) *inv_secs = time_inv_secs; } // free M images for(int m = 0; m < M; m++) { dwt_util_free_image(&ptr[m]); } FUNC_END; } // TODO: propagate "flush" void dwt_util_perf_cdf97_2_s( int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding, int M, int N, int clock_type, float *fwd_secs, float *inv_secs) { FUNC_BEGIN; assert( M > 0 && N > 0 && fwd_secs && inv_secs ); assert( size_o_big_x > 0 && size_o_big_y > 0 && size_i_big_x > 0 && size_i_big_y > 0 ); // pointer to M pointers to image data void *ptr[M]; int j[M]; // allocate M images for(int m = 0; m < M; m++) { // copy j_max to j[] j[m] = j_max; // allocate dwt_util_alloc_image( &ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y); // fill with test pattern dwt_util_test_image_fill_s( ptr[m], stride_x, stride_y, size_i_big_x, size_i_big_y, 0); } *fwd_secs = +INFINITY; *inv_secs = +INFINITY; // perform N test loops, select minimum for(int n = 0; n < N; n++) { #if 1 // FIXME: flush memory for(int m = 0; m < M; m++) flush_cache(ptr[m], image_size(stride_x, stride_y, size_o_big_x, size_o_big_y) ); #endif // start timer const dwt_clock_t time_fwd_start = dwt_util_get_clock(clock_type); // perform M fwd transforms for(int m = 0; m < M; m++) { dwt_cdf97_2f_s( ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, &j[m], decompose_one, zero_padding); } // stop timer const dwt_clock_t time_fwd_stop = dwt_util_get_clock(clock_type); // calc avg const float time_fwd_secs = (float)(time_fwd_stop - time_fwd_start) / M * MEASURE_FACTOR / dwt_util_get_frequency(clock_type); // select min if( time_fwd_secs < *fwd_secs ) *fwd_secs = time_fwd_secs; #if 1 // FIXME: flush memory for(int m = 0; m < M; m++) flush_cache(ptr[m], image_size(stride_x, stride_y, size_o_big_x, size_o_big_y) ); #endif // start timer const dwt_clock_t time_inv_start = dwt_util_get_clock(clock_type); // perform M inv transforms for(int m = 0; m < M; m++) { dwt_cdf97_2i_s( ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j[m], decompose_one, zero_padding); } // stop timer const dwt_clock_t time_inv_stop = dwt_util_get_clock(clock_type); // calc avg const float time_inv_secs = (float)(time_inv_stop - time_inv_start) / M * MEASURE_FACTOR / dwt_util_get_frequency(clock_type); // select min if( time_inv_secs < *inv_secs ) *inv_secs = time_inv_secs; } // free M images for(int m = 0; m < M; m++) { dwt_util_free_image(&ptr[m]); } FUNC_END; } // TODO: propagate "flush" void dwt_util_perf_cdf97_2_inplace_s( int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding, int M, int N, int clock_type, float *fwd_secs, float *inv_secs) { FUNC_BEGIN; assert( M > 0 && N > 0 && fwd_secs && inv_secs ); assert( size_o_big_x > 0 && size_o_big_y > 0 && size_i_big_x > 0 && size_i_big_y > 0 ); void *template; dwt_util_alloc_image( &template, stride_x, stride_y, size_o_big_x, size_o_big_y); dwt_util_test_image_fill_s( template, stride_x, stride_y, size_i_big_x, size_i_big_y, 0); // pointer to M pointers to image data void *ptr[M]; int j[M]; // allocate M images for(int m = 0; m < M; m++) { // copy j_max to j[] j[m] = j_max; // allocate dwt_util_alloc_image( &ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y); } *fwd_secs = +INFINITY; *inv_secs = +INFINITY; // perform N test loops, select minimum for(int n = 0; n < N; n++) { for(int m = 0; m < M; m++) { // fill with test pattern dwt_util_test_image_fill_s( ptr[m], stride_x, stride_y, size_i_big_x, size_i_big_y, 0); } #if 1 // FIXME: flush memory for(int m = 0; m < M; m++) flush_cache(ptr[m], image_size(stride_x, stride_y, size_o_big_x, size_o_big_y) ); #endif // start timer const dwt_clock_t time_fwd_start = dwt_util_get_clock(clock_type); // perform M fwd transforms for(int m = 0; m < M; m++) { dwt_cdf97_2f_inplace_s( ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, &j[m], decompose_one, zero_padding); } // stop timer const dwt_clock_t time_fwd_stop = dwt_util_get_clock(clock_type); // calc avg const float time_fwd_secs = (float)(time_fwd_stop - time_fwd_start) / M * MEASURE_FACTOR / dwt_util_get_frequency(clock_type); // select min if( time_fwd_secs < *fwd_secs ) *fwd_secs = time_fwd_secs; #if 1 // FIXME: flush memory for(int m = 0; m < M; m++) flush_cache(ptr[m], image_size(stride_x, stride_y, size_o_big_x, size_o_big_y) ); #endif // start timer const dwt_clock_t time_inv_start = dwt_util_get_clock(clock_type); // perform M inv transforms for(int m = 0; m < M; m++) { dwt_cdf97_2i_inplace_s( ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j[m], decompose_one, zero_padding); } // stop timer const dwt_clock_t time_inv_stop = dwt_util_get_clock(clock_type); // calc avg const float time_inv_secs = (float)(time_inv_stop - time_inv_start) / M * MEASURE_FACTOR / dwt_util_get_frequency(clock_type); // select min if( time_inv_secs < *inv_secs ) *inv_secs = time_inv_secs; // compare for(int m = 0; m < M; m++) { if( dwt_util_compare_s(ptr[m], template, stride_x, stride_y, size_i_big_x, size_i_big_y) ) { dwt_util_log(LOG_ERR, "images differ!\n"); } } } // free M images for(int m = 0; m < M; m++) { dwt_util_free_image(&ptr[m]); } dwt_util_free_image(&template); FUNC_END; } // TODO: propagate "flush" void dwt_util_perf_cdf97_2_inplace_sep_s( int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding, int M, int N, int clock_type, float *fwd_secs, float *inv_secs) { FUNC_BEGIN; assert( M > 0 && N > 0 && fwd_secs && inv_secs ); assert( size_o_big_x > 0 && size_o_big_y > 0 && size_i_big_x > 0 && size_i_big_y > 0 ); void *template; dwt_util_alloc_image( &template, stride_x, stride_y, size_o_big_x, size_o_big_y); dwt_util_test_image_fill_s( template, stride_x, stride_y, size_i_big_x, size_i_big_y, 0); // pointer to M pointers to image data void *ptr[M]; int j[M]; // allocate M images for(int m = 0; m < M; m++) { // copy j_max to j[] j[m] = j_max; // allocate dwt_util_alloc_image( &ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y ); } *fwd_secs = +INFINITY; *inv_secs = +INFINITY; // perform N test loops, select minimum for(int n = 0; n < N; n++) { for(int m = 0; m < M; m++) { // fill with test pattern dwt_util_test_image_fill_s( ptr[m], stride_x, stride_y, size_i_big_x, size_i_big_y, 0); } #if 1 // FIXME: flush memory for(int m = 0; m < M; m++) flush_cache(ptr[m], image_size(stride_x, stride_y, size_o_big_x, size_o_big_y) ); #endif // start timer const dwt_clock_t time_fwd_start = dwt_util_get_clock(clock_type); // perform M fwd transforms for(int m = 0; m < M; m++) { dwt_cdf97_2f_inplace_sep_s( ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, &j[m], decompose_one, zero_padding); } // stop timer const dwt_clock_t time_fwd_stop = dwt_util_get_clock(clock_type); // calc avg const float time_fwd_secs = (float)(time_fwd_stop - time_fwd_start) / M * MEASURE_FACTOR / dwt_util_get_frequency(clock_type); // select min if( time_fwd_secs < *fwd_secs ) *fwd_secs = time_fwd_secs; #if 1 // FIXME: flush memory for(int m = 0; m < M; m++) flush_cache(ptr[m], image_size(stride_x, stride_y, size_o_big_x, size_o_big_y) ); #endif // start timer const dwt_clock_t time_inv_start = dwt_util_get_clock(clock_type); // perform M inv transforms for(int m = 0; m < M; m++) { dwt_cdf97_2i_inplace_s( ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j[m], decompose_one, zero_padding); } // stop timer const dwt_clock_t time_inv_stop = dwt_util_get_clock(clock_type); // calc avg const float time_inv_secs = (float)(time_inv_stop - time_inv_start) / M * MEASURE_FACTOR / dwt_util_get_frequency(clock_type); // select min if( time_inv_secs < *inv_secs ) *inv_secs = time_inv_secs; // compare for(int m = 0; m < M; m++) { if( dwt_util_compare_s(ptr[m], template, stride_x, stride_y, size_i_big_x, size_i_big_y) ) { dwt_util_log(LOG_ERR, "images differ!\n"); } } } // free M images for(int m = 0; m < M; m++) { dwt_util_free_image(&ptr[m]); } dwt_util_free_image(&template); FUNC_END; } // TODO: propagate "flush" void dwt_util_perf_cdf97_2_inplace_sep_sdl_s( int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding, int M, int N, int clock_type, float *fwd_secs, float *inv_secs) { FUNC_BEGIN; assert( M > 0 && N > 0 && fwd_secs && inv_secs ); assert( size_o_big_x > 0 && size_o_big_y > 0 && size_i_big_x > 0 && size_i_big_y > 0 ); void *template; dwt_util_alloc_image( &template, stride_x, stride_y, size_o_big_x, size_o_big_y); dwt_util_test_image_fill_s( template, stride_x, stride_y, size_i_big_x, size_i_big_y, 0); // pointer to M pointers to image data void *ptr[M]; int j[M]; // allocate M images for(int m = 0; m < M; m++) { // copy j_max to j[] j[m] = j_max; // allocate dwt_util_alloc_image( &ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y); } *fwd_secs = +INFINITY; *inv_secs = +INFINITY; // perform N test loops, select minimum for(int n = 0; n < N; n++) { for(int m = 0; m < M; m++) { // fill with test pattern dwt_util_test_image_fill_s( ptr[m], stride_x, stride_y, size_i_big_x, size_i_big_y, 0); } #if 1 // FIXME: flush memory for(int m = 0; m < M; m++) flush_cache(ptr[m], image_size(stride_x, stride_y, size_o_big_x, size_o_big_y) ); #endif // start timer const dwt_clock_t time_fwd_start = dwt_util_get_clock(clock_type); // perform M fwd transforms for(int m = 0; m < M; m++) { dwt_cdf97_2f_inplace_sep_sdl_s( ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, &j[m], decompose_one, zero_padding); } // stop timer const dwt_clock_t time_fwd_stop = dwt_util_get_clock(clock_type); // calc avg const float time_fwd_secs = (float)(time_fwd_stop - time_fwd_start) / M * MEASURE_FACTOR / dwt_util_get_frequency(clock_type); // select min if( time_fwd_secs < *fwd_secs ) *fwd_secs = time_fwd_secs; #if 1 // FIXME: flush memory for(int m = 0; m < M; m++) flush_cache(ptr[m], image_size(stride_x, stride_y, size_o_big_x, size_o_big_y) ); #endif // start timer const dwt_clock_t time_inv_start = dwt_util_get_clock(clock_type); // perform M inv transforms for(int m = 0; m < M; m++) { dwt_cdf97_2i_inplace_s( ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j[m], decompose_one, zero_padding); } // stop timer const dwt_clock_t time_inv_stop = dwt_util_get_clock(clock_type); // calc avg const float time_inv_secs = (float)(time_inv_stop - time_inv_start) / M * MEASURE_FACTOR / dwt_util_get_frequency(clock_type); // select min if( time_inv_secs < *inv_secs ) *inv_secs = time_inv_secs; // compare for(int m = 0; m < M; m++) { if( dwt_util_compare_s(ptr[m], template, stride_x, stride_y, size_i_big_x, size_i_big_y) ) { dwt_util_log(LOG_ERR, "images differ!\n"); } } } // free M images for(int m = 0; m < M; m++) { dwt_util_free_image(&ptr[m]); } dwt_util_free_image(&template); FUNC_END; } // TODO: propagate "flush" void dwt_util_perf_cdf97_2_inplace_sdl_s( int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding, int M, int N, int clock_type, float *fwd_secs, float *inv_secs) { FUNC_BEGIN; assert( M > 0 && N > 0 && fwd_secs && inv_secs ); assert( size_o_big_x > 0 && size_o_big_y > 0 && size_i_big_x > 0 && size_i_big_y > 0 ); // pointer to M pointers to image data void *ptr[M]; int j[M]; // allocate M images for(int m = 0; m < M; m++) { // copy j_max to j[] j[m] = j_max; // allocate dwt_util_alloc_image( &ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y); // fill with test pattern dwt_util_test_image_fill_s( ptr[m], stride_x, stride_y, size_i_big_x, size_i_big_y, 0); } *fwd_secs = +INFINITY; *inv_secs = +INFINITY; // perform N test loops, select minimum for(int n = 0; n < N; n++) { #if 1 // FIXME: flush memory for(int m = 0; m < M; m++) flush_cache(ptr[m], image_size(stride_x, stride_y, size_o_big_x, size_o_big_y) ); #endif // start timer const dwt_clock_t time_fwd_start = dwt_util_get_clock(clock_type); // perform M fwd transforms for(int m = 0; m < M; m++) { dwt_cdf97_2f_inplace_sdl_s( ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, &j[m], decompose_one, zero_padding); } // stop timer const dwt_clock_t time_fwd_stop = dwt_util_get_clock(clock_type); // calc avg const float time_fwd_secs = (float)(time_fwd_stop - time_fwd_start) / M * MEASURE_FACTOR / dwt_util_get_frequency(clock_type); // select min if( time_fwd_secs < *fwd_secs ) *fwd_secs = time_fwd_secs; #if 1 // FIXME: flush memory for(int m = 0; m < M; m++) flush_cache(ptr[m], image_size(stride_x, stride_y, size_o_big_x, size_o_big_y) ); #endif // start timer const dwt_clock_t time_inv_start = dwt_util_get_clock(clock_type); // perform M inv transforms for(int m = 0; m < M; m++) { // FIXME: use SDL version of inverse transform dwt_cdf97_2i_inplace_s( ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j[m], decompose_one, zero_padding); } // stop timer const dwt_clock_t time_inv_stop = dwt_util_get_clock(clock_type); // calc avg const float time_inv_secs = (float)(time_inv_stop - time_inv_start) / M * MEASURE_FACTOR / dwt_util_get_frequency(clock_type); // select min if( time_inv_secs < *inv_secs ) *inv_secs = time_inv_secs; } // free M images for(int m = 0; m < M; m++) { dwt_util_free_image(&ptr[m]); } FUNC_END; } // TODO: propagate "flush" void dwt_util_perf_cdf97_2_d( int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding, int M, int N, int clock_type, double *fwd_secs, double *inv_secs) { FUNC_BEGIN; assert( M > 0 && N > 0 && fwd_secs && inv_secs ); assert( size_o_big_x > 0 && size_o_big_y > 0 && size_i_big_x > 0 && size_i_big_y > 0 ); // pointer to M pointers to image data void *ptr[M]; int j[M]; // allocate M images for(int m = 0; m < M; m++) { // copy j_max to j[] j[m] = j_max; // allocate dwt_util_alloc_image( &ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y); // fill with test pattern dwt_util_test_image_fill_d( ptr[m], stride_x, stride_y, size_i_big_x, size_i_big_y, 0); } *fwd_secs = +INFINITY; *inv_secs = +INFINITY; // perform N test loops, select minimum for(int n = 0; n < N; n++) { #if 1 // FIXME: flush memory for(int m = 0; m < M; m++) flush_cache(ptr[m], image_size(stride_x, stride_y, size_o_big_x, size_o_big_y) ); #endif // start timer const dwt_clock_t time_fwd_start = dwt_util_get_clock(clock_type); // perform M fwd transforms for(int m = 0; m < M; m++) { dwt_cdf97_2f_d( ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, &j[m], decompose_one, zero_padding); } // stop timer const dwt_clock_t time_fwd_stop = dwt_util_get_clock(clock_type); // calc avg const double time_fwd_secs = (double)(time_fwd_stop - time_fwd_start) / M * MEASURE_FACTOR / dwt_util_get_frequency(clock_type); // select min if( time_fwd_secs < *fwd_secs ) *fwd_secs = time_fwd_secs; #if 1 // FIXME: flush memory for(int m = 0; m < M; m++) flush_cache(ptr[m], image_size(stride_x, stride_y, size_o_big_x, size_o_big_y) ); #endif // start timer const dwt_clock_t time_inv_start = dwt_util_get_clock(clock_type); // perform M inv transforms for(int m = 0; m < M; m++) { dwt_cdf97_2i_d( ptr[m], stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j[m], decompose_one, zero_padding); } // stop timer const dwt_clock_t time_inv_stop = dwt_util_get_clock(clock_type); // calc avg const double time_inv_secs = (double)(time_inv_stop - time_inv_start) / M * MEASURE_FACTOR / dwt_util_get_frequency(clock_type); // select min if( time_inv_secs < *inv_secs ) *inv_secs = time_inv_secs; } // free M images for(int m = 0; m < M; m++) { dwt_util_free_image(&ptr[m]); } FUNC_END; } void dwt_util_get_sizes_i( enum dwt_array array_type, int size_x, int size_y, int opt_stride, int *stride_x, int *stride_y, int *size_o_big_x, int *size_o_big_y, int *size_i_big_x, int *size_i_big_y ) { FUNC_BEGIN; assert( size_x > 0 && size_y > 0 ); assert( stride_x && stride_y && size_o_big_x && size_o_big_y && size_i_big_x && size_i_big_y ); *stride_y = sizeof(int); *stride_x = dwt_util_get_stride( (*stride_y) * dwt_util_pow2_ceil_log2(size_x), opt_stride); *size_o_big_x = size_x; *size_o_big_y = size_y; *size_i_big_x = size_x; *size_i_big_y = size_y; if( DWT_ARR_SPARSE == array_type || DWT_ARR_SIMPLE == array_type ) { *size_o_big_x = dwt_util_pow2_ceil_log2(*size_o_big_x); *size_o_big_y = dwt_util_pow2_ceil_log2(*size_o_big_y); } if( DWT_ARR_SIMPLE == array_type ) { *size_i_big_x = *size_i_big_x; *size_i_big_y = *size_i_big_y; } FUNC_END; } void dwt_util_get_sizes_s( enum dwt_array array_type, int size_x, int size_y, int opt_stride, int *stride_x, int *stride_y, int *size_o_big_x, int *size_o_big_y, int *size_i_big_x, int *size_i_big_y ) { FUNC_BEGIN; assert( size_x > 0 && size_y > 0 ); assert( stride_x && stride_y && size_o_big_x && size_o_big_y && size_i_big_x && size_i_big_y ); *stride_y = sizeof(float); *stride_x = dwt_util_get_stride( (*stride_y) * dwt_util_pow2_ceil_log2(size_x), opt_stride); *size_o_big_x = size_x; *size_o_big_y = size_y; *size_i_big_x = size_x; *size_i_big_y = size_y; if( DWT_ARR_SPARSE == array_type || DWT_ARR_SIMPLE == array_type ) { *size_o_big_x = dwt_util_pow2_ceil_log2(*size_o_big_x); *size_o_big_y = dwt_util_pow2_ceil_log2(*size_o_big_y); } if( DWT_ARR_SIMPLE == array_type ) { *size_i_big_x = *size_i_big_x; *size_i_big_y = *size_i_big_y; } FUNC_END; } void dwt_util_get_sizes_d( enum dwt_array array_type, int size_x, int size_y, int opt_stride, int *stride_x, int *stride_y, int *size_o_big_x, int *size_o_big_y, int *size_i_big_x, int *size_i_big_y ) { FUNC_BEGIN; assert( size_x > 0 && size_y > 0 ); assert( stride_x && stride_y && size_o_big_x && size_o_big_y && size_i_big_x && size_i_big_y ); *stride_y = sizeof(double); *stride_x = dwt_util_get_stride( (*stride_y) * dwt_util_pow2_ceil_log2(size_x), opt_stride); *size_o_big_x = size_x; *size_o_big_y = size_y; *size_i_big_x = size_x; *size_i_big_y = size_y; if( DWT_ARR_SPARSE == array_type || DWT_ARR_SIMPLE == array_type ) { *size_o_big_x = dwt_util_pow2_ceil_log2(*size_o_big_x); *size_o_big_y = dwt_util_pow2_ceil_log2(*size_o_big_y); } if( DWT_ARR_SIMPLE == array_type ) { *size_i_big_x = *size_i_big_x; *size_i_big_y = *size_i_big_y; } FUNC_END; } // 1.618, 1.333, 1.28, 1.13, 1.06, 1.02 // float g_growth_factor_s = 1.28f; // float g_growth_factor_d = 1.28; float g_growth_factor_s = 1.13f; float g_growth_factor_d = 1.13; void dwt_util_measure_perf_cdf97_1_s( enum dwt_array array_type, int min_x, int max_x, int opt_stride, int j_max, int decompose_one, int zero_padding, int M, int N, int clock_type, FILE *fwd_plot_data, FILE *inv_plot_data ) { FUNC_BEGIN; assert( min_x > 0 && min_x < max_x ); assert( M > 0 && N > 0 ); assert( fwd_plot_data && inv_plot_data ); const float growth_factor = g_growth_factor_s; // for x = min_x to max_x for(int x = min_x; x <= max_x; x = ceilf(x * growth_factor)) { // fixed y const int y = 1; int stride_x; int stride_y; int size_o_big_x; int size_o_big_y; int size_i_big_x; int size_i_big_y; // get sizes dwt_util_get_sizes_s( array_type, x, y, opt_stride, &stride_x, &stride_y, &size_o_big_x, &size_o_big_y, &size_i_big_x, &size_i_big_y ); dwt_util_log(LOG_DBG, "performance test for [%ix%i] in [%ix%i] with strides (%i, %i)...\n", size_i_big_x, size_i_big_y, size_o_big_x, size_o_big_y, stride_x, stride_y); float fwd_secs; float inv_secs; // call perf() dwt_util_perf_cdf97_2_s( stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j_max, decompose_one, zero_padding, M, N, clock_type, &fwd_secs, &inv_secs ); #ifdef MEASURE_PER_PIXEL const int denominator = x*y; #else const int denominator = 1; #endif // printf into file fprintf(fwd_plot_data, "%i\t%.10f\n", x*y, fwd_secs/denominator); fprintf(inv_plot_data, "%i\t%.10f\n", x*y, inv_secs/denominator); } FUNC_END; } void dwt_util_measure_perf_cdf97_1_d( enum dwt_array array_type, int min_x, int max_x, int opt_stride, int j_max, int decompose_one, int zero_padding, int M, int N, int clock_type, FILE *fwd_plot_data, FILE *inv_plot_data ) { FUNC_BEGIN; assert( min_x > 0 && min_x < max_x ); assert( M > 0 && N > 0 ); assert( fwd_plot_data && inv_plot_data ); const double growth_factor = g_growth_factor_d; // for x = min_x to max_x for(int x = min_x; x <= max_x; x = ceil(x * growth_factor)) { // fixed y const int y = 1; int stride_x; int stride_y; int size_o_big_x; int size_o_big_y; int size_i_big_x; int size_i_big_y; // get sizes dwt_util_get_sizes_d( array_type, x, y, opt_stride, &stride_x, &stride_y, &size_o_big_x, &size_o_big_y, &size_i_big_x, &size_i_big_y ); dwt_util_log(LOG_DBG, "performance test for [%ix%i] in [%ix%i] with strides (%i, %i)...\n", size_i_big_x, size_i_big_y, size_o_big_x, size_o_big_y, stride_x, stride_y); double fwd_secs; double inv_secs; // call perf() dwt_util_perf_cdf97_2_d( stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j_max, decompose_one, zero_padding, M, N, clock_type, &fwd_secs, &inv_secs ); // printf into file fprintf(fwd_plot_data, "%i\t%.10f\n", x*y, fwd_secs); fprintf(inv_plot_data, "%i\t%.10f\n", x*y, inv_secs); } FUNC_END; } void dwt_util_measure_perf_cdf97_2_s( enum dwt_array array_type, int min_x, int max_x, int opt_stride, int j_max, int decompose_one, int zero_padding, int M, int N, int clock_type, FILE *fwd_plot_data, FILE *inv_plot_data ) { FUNC_BEGIN; assert( min_x > 0 && min_x < max_x ); assert( M > 0 && N > 0 ); assert( fwd_plot_data && inv_plot_data ); const float growth_factor = g_growth_factor_s; // for x = min_x to max_x for(int x = min_x; x <= max_x; x = ceilf(x * growth_factor)) { // y is equal to x const int y = x; int stride_x; int stride_y; int size_o_big_x; int size_o_big_y; int size_i_big_x; int size_i_big_y; // get sizes dwt_util_get_sizes_s( array_type, x, y, opt_stride, &stride_x, &stride_y, &size_o_big_x, &size_o_big_y, &size_i_big_x, &size_i_big_y ); dwt_util_log(LOG_DBG, "performance test for [%ix%i] in [%ix%i] with strides (%i, %i)...\n", size_i_big_x, size_i_big_y, size_o_big_x, size_o_big_y, stride_x, stride_y); float fwd_secs; float inv_secs; // call perf() dwt_util_perf_cdf97_2_s( stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j_max, decompose_one, zero_padding, M, N, clock_type, &fwd_secs, &inv_secs ); #ifdef MEASURE_PER_PIXEL const int denominator = x*y; #else const int denominator = 1; #endif // printf into file fprintf(fwd_plot_data, "%i\t%.10f\n", x*y, fwd_secs/denominator); fprintf(inv_plot_data, "%i\t%.10f\n", x*y, inv_secs/denominator); } FUNC_END; } void dwt_util_measure_perf_cdf97_2_inplace_s( enum dwt_array array_type, int min_x, int max_x, int opt_stride, int j_max, int decompose_one, int zero_padding, int M, int N, int clock_type, FILE *fwd_plot_data, FILE *inv_plot_data ) { FUNC_BEGIN; assert( min_x > 0 && min_x < max_x ); assert( M > 0 && N > 0 ); assert( fwd_plot_data && inv_plot_data ); const float growth_factor = g_growth_factor_s; // for x = min_x to max_x for(int x = min_x; x <= max_x; x = ceilf(x * growth_factor)) { // y is equal to x const int y = x; int stride_x; int stride_y; int size_o_big_x; int size_o_big_y; int size_i_big_x; int size_i_big_y; // get sizes dwt_util_get_sizes_s( array_type, x, y, opt_stride, &stride_x, &stride_y, &size_o_big_x, &size_o_big_y, &size_i_big_x, &size_i_big_y ); dwt_util_log(LOG_DBG, "performance test for [%ix%i] in [%ix%i] with strides (%i, %i)...\n", size_i_big_x, size_i_big_y, size_o_big_x, size_o_big_y, stride_x, stride_y); float fwd_secs; float inv_secs; // call perf() dwt_util_perf_cdf97_2_inplace_s( stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j_max, decompose_one, zero_padding, M, N, clock_type, &fwd_secs, &inv_secs ); #ifdef MEASURE_PER_PIXEL const int denominator = x*y; #else const int denominator = 1; #endif // printf into file fprintf(fwd_plot_data, "%i\t%.10f\n", x*y, fwd_secs/denominator); fprintf(inv_plot_data, "%i\t%.10f\n", x*y, inv_secs/denominator); } FUNC_END; } void dwt_util_measure_perf_cdf97_2_inplace_sep_s( enum dwt_array array_type, int min_x, int max_x, int opt_stride, int j_max, int decompose_one, int zero_padding, int M, int N, int clock_type, FILE *fwd_plot_data, FILE *inv_plot_data ) { FUNC_BEGIN; assert( min_x > 0 && min_x < max_x ); assert( M > 0 && N > 0 ); assert( fwd_plot_data && inv_plot_data ); const float growth_factor = g_growth_factor_s; // for x = min_x to max_x for(int x = min_x; x <= max_x; x = ceilf(x * growth_factor)) { // y is equal to x const int y = x; int stride_x; int stride_y; int size_o_big_x; int size_o_big_y; int size_i_big_x; int size_i_big_y; // get sizes dwt_util_get_sizes_s( array_type, x, y, opt_stride, &stride_x, &stride_y, &size_o_big_x, &size_o_big_y, &size_i_big_x, &size_i_big_y ); dwt_util_log(LOG_DBG, "performance test for [%ix%i] in [%ix%i] with strides (%i, %i)...\n", size_i_big_x, size_i_big_y, size_o_big_x, size_o_big_y, stride_x, stride_y); float fwd_secs; float inv_secs; // call perf() dwt_util_perf_cdf97_2_inplace_sep_s( stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j_max, decompose_one, zero_padding, M, N, clock_type, &fwd_secs, &inv_secs ); #ifdef MEASURE_PER_PIXEL const int denominator = x*y; #else const int denominator = 1; #endif // printf into file fprintf(fwd_plot_data, "%i\t%.10f\n", x*y, fwd_secs/denominator); fprintf(inv_plot_data, "%i\t%.10f\n", x*y, inv_secs/denominator); } FUNC_END; } void dwt_util_measure_perf_cdf97_2_inplace_sep_sdl_s( enum dwt_array array_type, int min_x, int max_x, int opt_stride, int j_max, int decompose_one, int zero_padding, int M, int N, int clock_type, FILE *fwd_plot_data, FILE *inv_plot_data ) { FUNC_BEGIN; assert( min_x > 0 && min_x < max_x ); assert( M > 0 && N > 0 ); assert( fwd_plot_data && inv_plot_data ); const float growth_factor = g_growth_factor_s; // for x = min_x to max_x for(int x = min_x; x <= max_x; x = ceilf(x * growth_factor)) { // y is equal to x const int y = x; int stride_x; int stride_y; int size_o_big_x; int size_o_big_y; int size_i_big_x; int size_i_big_y; // get sizes dwt_util_get_sizes_s( array_type, x, y, opt_stride, &stride_x, &stride_y, &size_o_big_x, &size_o_big_y, &size_i_big_x, &size_i_big_y ); dwt_util_log(LOG_DBG, "performance test for [%ix%i] in [%ix%i] with strides (%i, %i)...\n", size_i_big_x, size_i_big_y, size_o_big_x, size_o_big_y, stride_x, stride_y); float fwd_secs; float inv_secs; // call perf() dwt_util_perf_cdf97_2_inplace_sep_sdl_s( stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j_max, decompose_one, zero_padding, M, N, clock_type, &fwd_secs, &inv_secs ); #ifdef MEASURE_PER_PIXEL const int denominator = x*y; #else const int denominator = 1; #endif // printf into file fprintf(fwd_plot_data, "%i\t%.10f\n", x*y, fwd_secs/denominator); fprintf(inv_plot_data, "%i\t%.10f\n", x*y, inv_secs/denominator); } FUNC_END; } void dwt_util_measure_perf_cdf97_2_inplace_sdl_s( enum dwt_array array_type, int min_x, int max_x, int opt_stride, int j_max, int decompose_one, int zero_padding, int M, int N, int clock_type, FILE *fwd_plot_data, FILE *inv_plot_data ) { FUNC_BEGIN; assert( min_x > 0 && min_x < max_x ); assert( M > 0 && N > 0 ); assert( fwd_plot_data && inv_plot_data ); const float growth_factor = g_growth_factor_s; // for x = min_x to max_x for(int x = min_x; x <= max_x; x = ceilf(x * growth_factor)) { // y is equal to x const int y = x; int stride_x; int stride_y; int size_o_big_x; int size_o_big_y; int size_i_big_x; int size_i_big_y; // get sizes dwt_util_get_sizes_s( array_type, x, y, opt_stride, &stride_x, &stride_y, &size_o_big_x, &size_o_big_y, &size_i_big_x, &size_i_big_y ); dwt_util_log(LOG_DBG, "performance test for [%ix%i] in [%ix%i] with strides (%i, %i)...\n", size_i_big_x, size_i_big_y, size_o_big_x, size_o_big_y, stride_x, stride_y); float fwd_secs; float inv_secs; // call perf() dwt_util_perf_cdf97_2_inplace_sdl_s( stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j_max, decompose_one, zero_padding, M, N, clock_type, &fwd_secs, &inv_secs ); #ifdef MEASURE_PER_PIXEL const int denominator = x*y; #else const int denominator = 1; #endif // printf into file fprintf(fwd_plot_data, "%i\t%.10f\n", x*y, fwd_secs/denominator); fprintf(inv_plot_data, "%i\t%.10f\n", x*y, inv_secs/denominator); } FUNC_END; } void dwt_util_measure_perf_cdf97_2_d( enum dwt_array array_type, int min_x, int max_x, int opt_stride, int j_max, int decompose_one, int zero_padding, int M, int N, int clock_type, FILE *fwd_plot_data, FILE *inv_plot_data ) { FUNC_BEGIN; assert( min_x > 0 && min_x < max_x ); assert( M > 0 && N > 0 ); assert( fwd_plot_data && inv_plot_data ); const double growth_factor = g_growth_factor_d; // for x = min_x to max_x for(int x = min_x; x <= max_x; x = ceil(x * growth_factor)) { // y is equal to x const int y = x; int stride_x; int stride_y; int size_o_big_x; int size_o_big_y; int size_i_big_x; int size_i_big_y; // get sizes dwt_util_get_sizes_d( array_type, x, y, opt_stride, &stride_x, &stride_y, &size_o_big_x, &size_o_big_y, &size_i_big_x, &size_i_big_y ); dwt_util_log(LOG_DBG, "performance test for [%ix%i] in [%ix%i] with strides (%i, %i)...\n", size_i_big_x, size_i_big_y, size_o_big_x, size_o_big_y, stride_x, stride_y); double fwd_secs; double inv_secs; // call perf() dwt_util_perf_cdf97_2_d( stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j_max, decompose_one, zero_padding, M, N, clock_type, &fwd_secs, &inv_secs ); // printf into file fprintf(fwd_plot_data, "%i\t%.10f\n", x*y, fwd_secs); fprintf(inv_plot_data, "%i\t%.10f\n", x*y, inv_secs); } FUNC_END; } float dwt_util_band_wps_s( const void *ptr, int stride_x, int stride_y, int size_x, int size_y, int j ) { float sum = 0.0f; for(int y = 0; y < size_y; y++) for(int x = 0; x < size_x; x++) { const float *coeff = dwt_util_addr_coeff_const_s(ptr, y, x, stride_x, stride_y); // ^2 sum += *coeff * *coeff; } // rectification // Liu, Y., X.S. Liang, and R.H. Weisberg, 2007: Rectification of the bias in the wavelet power spectrum. Journal of Atmospheric and Oceanic Technology, 24(12), 2093-2102. // http://ocgweb.marine.usf.edu/~liu/wavelet.html // http://ocgweb.marine.usf.edu/~liu/Papers/Liu_etal_2007_JAOT_wavelet.pdf sum /= 1<<j; return sum; } static int cmp_s( const void *p1, const void *p2 ) { if( *(const float *)p1 > *(const float *)p2 ) return +1; if( *(const float *)p1 < *(const float *)p2 ) return -1; return 0; } float dwt_util_band_med_s( const void *ptr, int stride_x, int stride_y, int size_x, int size_y ) { const int size = size_x * size_y; //dwt_util_log(LOG_DBG, "size=%i size_x=%i size_y=%i\n", size, size_x, size_y); float *arr = dwt_util_allocate_vec_s(size); #ifdef FV_ON_MAGNITUDES for(int y = 0; y < size_y; y++) for(int x = 0; x < size_x; x++) arr[y*size_x+x] = fabsf( *dwt_util_addr_coeff_const_s(ptr, y, x, stride_x, stride_y) ); #else for(int y = 0; y < size_y; y++) dwt_util_memcpy_stride_s( &arr[y*size_x], sizeof(float), dwt_util_addr_coeff_const_s(ptr, y, 0, stride_x, stride_y), stride_y, size_x); #endif qsort(arr, size, sizeof(float), cmp_s); const float med = arr[size/2]; free(arr); return med; } int dwt_util_count_subbands_s( const void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max ) { int count = 0; for(int j = 1; j < j_max; j++) { const void *band_ptr; int band_x; int band_y; dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HL, &band_ptr, &band_x, &band_y); if( band_x && band_y ) count++; dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_LH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) count++; dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) count++; } return count; } void dwt_util_wps_s( const void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, float *fv ) { int count = 0; for(int j = 1; j < j_max; j++) { const void *band_ptr; int band_x; int band_y; dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HL, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_wps_s(band_ptr, stride_x, stride_y, band_x, band_y, j); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_LH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_wps_s(band_ptr, stride_x, stride_y, band_x, band_y, j); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_wps_s(band_ptr, stride_x, stride_y, band_x, band_y, j); } } void dwt_util_med_s( const void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, float *fv ) { int count = 0; for(int j = 1; j < j_max; j++) { const void *band_ptr; int band_x; int band_y; dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HL, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_med_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_LH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_med_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_med_s(band_ptr, stride_x, stride_y, band_x, band_y); } } float dwt_util_band_maxidx_s( const void *ptr, int stride_x, int stride_y, int size_x, int size_y ) { int idx = -1; float val; for(int y = 0; y < size_y; y++) for(int x = 0; x < size_x; x++) { float coeff = fabsf(*dwt_util_addr_coeff_const_s(ptr, y, x, stride_x, stride_y)); if( -1 == idx || coeff > val ) { val = coeff; idx = y * size_x + x; } } return (float)idx; } float dwt_util_band_mean_s( const void *ptr, int stride_x, int stride_y, int size_x, int size_y ) { float sum = 0.0f; for(int y = 0; y < size_y; y++) for(int x = 0; x < size_x; x++) { #ifdef FV_ON_MAGNITUDES float coeff = fabsf(*dwt_util_addr_coeff_const_s(ptr, y, x, stride_x, stride_y)); #else float coeff = *dwt_util_addr_coeff_const_s(ptr, y, x, stride_x, stride_y); #endif sum += coeff; } sum /= size_x * size_y; return sum; } float dwt_util_band_moment_s( const void *ptr, int stride_x, int stride_y, int size_x, int size_y, int n, float c ) { const int size = size_x * size_y; float sum = 0.0f; for(int y = 0; y < size_y; y++) for(int x = 0; x < size_x; x++) { #ifdef FV_ON_MAGNITUDES float coeff = fabsf(*dwt_util_addr_coeff_const_s(ptr, y, x, stride_x, stride_y)); #else float coeff = *dwt_util_addr_coeff_const_s(ptr, y, x, stride_x, stride_y); #endif sum += powf(coeff - c, n); } return sum/size; } float dwt_util_band_cmoment_s( const void *ptr, int stride_x, int stride_y, int size_x, int size_y, int n ) { const float mean = dwt_util_band_mean_s(ptr, stride_x, stride_y, size_x, size_y); return dwt_util_band_moment_s(ptr, stride_x, stride_y, size_x, size_y, n, mean); } float dwt_util_band_var_s( const void *ptr, int stride_x, int stride_y, int size_x, int size_y ) { return dwt_util_band_cmoment_s(ptr, stride_x, stride_y, size_x, size_y, 2); } float dwt_util_band_stdev_s( const void *ptr, int stride_x, int stride_y, int size_x, int size_y ) { const float var = dwt_util_band_var_s(ptr, stride_x, stride_y, size_x, size_y); return sqrtf(var); } float dwt_util_band_smoment_s( const void *ptr, int stride_x, int stride_y, int size_x, int size_y, int n ) { const float stdev = dwt_util_band_stdev_s(ptr, stride_x, stride_y, size_x, size_y); return dwt_util_band_cmoment_s(ptr, stride_x, stride_y, size_x, size_y, n) / powf(stdev, n); } float dwt_util_band_skew_s( const void *ptr, int stride_x, int stride_y, int size_x, int size_y ) { return dwt_util_band_smoment_s(ptr, stride_x, stride_y, size_x, size_y, 3); } float dwt_util_band_kurt_s( const void *ptr, int stride_x, int stride_y, int size_x, int size_y ) { return dwt_util_band_smoment_s(ptr, stride_x, stride_y, size_x, size_y, 4) - 3; } float dwt_util_band_maxnorm_s( const void *ptr, int stride_x, int stride_y, int size_x, int size_y ) { float max = 0.0f; for(int y = 0; y < size_y; y++) for(int x = 0; x < size_x; x++) { const float c = fabsf(*dwt_util_addr_coeff_const_s(ptr, y, x, stride_x, stride_y)); if( c > max ) max = c; } return max; } float dwt_util_band_lpnorm_s( const void *ptr, int stride_x, int stride_y, int size_x, int size_y, float p ) { float sum = 0.0f; if( +INFINITY == p ) return dwt_util_band_maxnorm_s(ptr, stride_x, stride_y, size_x, size_y); for(int y = 0; y < size_y; y++) for(int x = 0; x < size_x; x++) { const float c = *dwt_util_addr_coeff_const_s(ptr, y, x, stride_x, stride_y); sum += powf(fabsf(c), p); } return powf(sum, 1/p); } float dwt_util_band_norm_s( const void *ptr, int stride_x, int stride_y, int size_x, int size_y ) { return dwt_util_band_lpnorm_s(ptr, stride_x, stride_y, size_x, size_y, 2); } void dwt_util_maxidx_s( const void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, float *fv ) { int count = 0; for(int j = 1; j < j_max; j++) { const void *band_ptr; int band_x; int band_y; dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HL, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_maxidx_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_LH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_maxidx_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_maxidx_s(band_ptr, stride_x, stride_y, band_x, band_y); } } void dwt_util_mean_s( const void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, float *fv ) { int count = 0; for(int j = 1; j < j_max; j++) { const void *band_ptr; int band_x; int band_y; dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HL, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_mean_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_LH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_mean_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_mean_s(band_ptr, stride_x, stride_y, band_x, band_y); } } void dwt_util_var_s( const void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, float *fv ) { int count = 0; for(int j = 1; j < j_max; j++) { const void *band_ptr; int band_x; int band_y; dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HL, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_var_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_LH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_var_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_var_s(band_ptr, stride_x, stride_y, band_x, band_y); } } void dwt_util_stdev_s( const void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, float *fv ) { int count = 0; for(int j = 1; j < j_max; j++) { const void *band_ptr; int band_x; int band_y; dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HL, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_stdev_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_LH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_stdev_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_stdev_s(band_ptr, stride_x, stride_y, band_x, band_y); } } void dwt_util_skew_s( const void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, float *fv ) { int count = 0; for(int j = 1; j < j_max; j++) { const void *band_ptr; int band_x; int band_y; dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HL, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_skew_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_LH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_skew_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_skew_s(band_ptr, stride_x, stride_y, band_x, band_y); } } void dwt_util_kurt_s( const void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, float *fv ) { int count = 0; for(int j = 1; j < j_max; j++) { const void *band_ptr; int band_x; int band_y; dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HL, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_kurt_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_LH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_kurt_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_kurt_s(band_ptr, stride_x, stride_y, band_x, band_y); } } void dwt_util_maxnorm_s( const void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, float *fv ) { int count = 0; for(int j = 1; j < j_max; j++) { const void *band_ptr; int band_x; int band_y; dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HL, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_maxnorm_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_LH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_maxnorm_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_maxnorm_s(band_ptr, stride_x, stride_y, band_x, band_y); } } void dwt_util_lpnorm_s( const void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, float *fv, float p ) { int count = 0; for(int j = 1; j < j_max; j++) { const void *band_ptr; int band_x; int band_y; dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HL, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_lpnorm_s(band_ptr, stride_x, stride_y, band_x, band_y, p); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_LH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_lpnorm_s(band_ptr, stride_x, stride_y, band_x, band_y, p); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_lpnorm_s(band_ptr, stride_x, stride_y, band_x, band_y, p); } } void dwt_util_norm_s( const void *ptr, int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, float *fv ) { int count = 0; for(int j = 1; j < j_max; j++) { const void *band_ptr; int band_x; int band_y; dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HL, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_norm_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_LH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_norm_s(band_ptr, stride_x, stride_y, band_x, band_y); dwt_util_subband_const_s(ptr, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, DWT_HH, &band_ptr, &band_x, &band_y); if( band_x && band_y ) fv[count++] = dwt_util_band_norm_s(band_ptr, stride_x, stride_y, band_x, band_y); } } int dwt_util_test_cdf97_2_s( int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding ) { int j = j_max; void *data, *copy; // allocate image dwt_util_alloc_image( &data, stride_x, stride_y, size_o_big_x, size_o_big_y); // allocate copy dwt_util_alloc_image( &copy, stride_x, stride_y, size_o_big_x, size_o_big_y); // fill with test pattern dwt_util_test_image_fill_s( data, stride_x, stride_y, size_i_big_x, size_i_big_y, 0); // copy test the image into the copy dwt_util_copy_s( data, copy, stride_x, stride_y, size_i_big_x, size_i_big_y); // forward dwt_cdf97_2f_s( data, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, &j, decompose_one, zero_padding); // inverse dwt_cdf97_2i_s( data, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, decompose_one, zero_padding); int ret; // compare if( dwt_util_compare_s(data, copy, stride_x, stride_y, size_i_big_x, size_i_big_y) ) ret = 1; else ret = 0; dwt_util_free_image(&data); dwt_util_free_image(&copy); return ret; } int dwt_util_test_cdf97_2_s2( int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding ) { int j = j_max; void *data1, *data2, *data3, *copy; // allocate image dwt_util_alloc_image( &data1, stride_x, stride_y, size_o_big_x, size_o_big_y); dwt_util_alloc_image( &data2, stride_x, stride_y, size_o_big_x, size_o_big_y); dwt_util_alloc_image( &data3, stride_x, stride_y, size_o_big_x, size_o_big_y); // allocate copy dwt_util_alloc_image( &copy, stride_x, stride_y, size_o_big_x, size_o_big_y); // fill with test pattern dwt_util_test_image_fill_s( data1, stride_x, stride_y, size_i_big_x, size_i_big_y, 0); // copy test the image into the copy dwt_util_copy_s( data1, copy, stride_x, stride_y, size_i_big_x, size_i_big_y); // forward dwt_cdf97_2f_s2( data1, data2, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, &j, decompose_one, zero_padding); // inverse dwt_cdf97_2i_s2( data2, data3, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, decompose_one, zero_padding); int ret; // compare if( dwt_util_compare_s(data3, copy, stride_x, stride_y, size_i_big_x, size_i_big_y) ) ret = 1; else ret = 0; dwt_util_free_image(&data1); dwt_util_free_image(&data2); dwt_util_free_image(&data3); dwt_util_free_image(&copy); return ret; } int dwt_util_test_cdf97_2_d( int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding ) { int j = j_max; void *data, *copy; // allocate image dwt_util_alloc_image( &data, stride_x, stride_y, size_o_big_x, size_o_big_y); // allocate copy dwt_util_alloc_image( &copy, stride_x, stride_y, size_o_big_x, size_o_big_y); // fill with test pattern dwt_util_test_image_fill_d( data, stride_x, stride_y, size_i_big_x, size_i_big_y, 0); // copy test the image into the copy dwt_util_copy_d( data, copy, stride_x, stride_y, size_i_big_x, size_i_big_y); // forward dwt_cdf97_2f_d( data, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, &j, decompose_one, zero_padding); // inverse dwt_cdf97_2i_d( data, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, decompose_one, zero_padding); int ret; // compare if( dwt_util_compare_d(data, copy, stride_x, stride_y, size_i_big_x, size_i_big_y) ) ret = 1; else ret = 0; dwt_util_free_image(&data); dwt_util_free_image(&copy); return ret; } int dwt_util_test_cdf97_2_i( int stride_x, int stride_y, int size_o_big_x, int size_o_big_y, int size_i_big_x, int size_i_big_y, int j_max, int decompose_one, int zero_padding ) { int j = j_max; void *data, *copy; // allocate image dwt_util_alloc_image( &data, stride_x, stride_y, size_o_big_x, size_o_big_y); // allocate copy dwt_util_alloc_image( &copy, stride_x, stride_y, size_o_big_x, size_o_big_y); // fill with test pattern dwt_util_test_image_fill_i( data, stride_x, stride_y, size_i_big_x, size_i_big_y, 0); // copy test the image into the copy dwt_util_copy_i( data, copy, stride_x, stride_y, size_i_big_x, size_i_big_y); // forward dwt_cdf97_2f_i( data, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, &j, decompose_one, zero_padding); // inverse dwt_cdf97_2i_i( data, stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j, decompose_one, zero_padding); int ret; // compare if( dwt_util_compare_i(data, copy, stride_x, stride_y, size_i_big_x, size_i_big_y) ) ret = 1; else ret = 0; dwt_util_free_image(&data); dwt_util_free_image(&copy); return ret; } int dwt_util_test2_cdf97_2_s( enum dwt_array array_type, int size_x, int size_y, int opt_stride, int j_max, int decompose_one ) { int stride_x; int stride_y; int size_o_big_x; int size_o_big_y; int size_i_big_x; int size_i_big_y; // get sizes dwt_util_get_sizes_s( array_type, size_x, size_y, opt_stride, &stride_x, &stride_y, &size_o_big_x, &size_o_big_y, &size_i_big_x, &size_i_big_y ); return dwt_util_test_cdf97_2_s( stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j_max, decompose_one, 0 ); } int dwt_util_test2_cdf97_2_s2( enum dwt_array array_type, int size_x, int size_y, int opt_stride, int j_max, int decompose_one ) { int stride_x; int stride_y; int size_o_big_x; int size_o_big_y; int size_i_big_x; int size_i_big_y; // get sizes dwt_util_get_sizes_s( array_type, size_x, size_y, opt_stride, &stride_x, &stride_y, &size_o_big_x, &size_o_big_y, &size_i_big_x, &size_i_big_y ); return dwt_util_test_cdf97_2_s2( stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j_max, decompose_one, 0 ); } int dwt_util_test2_cdf97_2_d( enum dwt_array array_type, int size_x, int size_y, int opt_stride, int j_max, int decompose_one ) { int stride_x; int stride_y; int size_o_big_x; int size_o_big_y; int size_i_big_x; int size_i_big_y; // get sizes dwt_util_get_sizes_d( array_type, size_x, size_y, opt_stride, &stride_x, &stride_y, &size_o_big_x, &size_o_big_y, &size_i_big_x, &size_i_big_y ); return dwt_util_test_cdf97_2_d( stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j_max, decompose_one, 0 ); } int dwt_util_test2_cdf97_2_i( enum dwt_array array_type, int size_x, int size_y, int opt_stride, int j_max, int decompose_one ) { int stride_x; int stride_y; int size_o_big_x; int size_o_big_y; int size_i_big_x; int size_i_big_y; // get sizes dwt_util_get_sizes_i( array_type, size_x, size_y, opt_stride, &stride_x, &stride_y, &size_o_big_x, &size_o_big_y, &size_i_big_x, &size_i_big_y ); return dwt_util_test_cdf97_2_i( stride_x, stride_y, size_o_big_x, size_o_big_y, size_i_big_x, size_i_big_y, j_max, decompose_one, 0 ); } void dwt_util_abs_s( void *ptr, int stride_x, int stride_y, int size_x, int size_y ) { assert( ptr ); for(int y = 0; y < size_y; y++) for(int x = 0; x < size_x; x++) { float *c = dwt_util_addr_coeff_s(ptr, y, x, stride_x, stride_y); *c = fabsf(*c); } } struct delta { int curr; int *symb; int symb_no; int next; void (*func)(void *, int, int *, int); }; static int fsm_symb_found(int symb, int *symb_group, int symb_no) { int found = 0; for(int s = 0; s < symb_no; s++) { if( symb == symb_group[s] ) found = 1; } return found; } /** * @brief Finite state machine. * @return Returns final or error state. */ static int fsm( void *ctx, int (*get_symb)(void *), struct delta *delta, int count, int s_init, int s_final, int s_error ) { int state = s_init; while(1) { int symb = get_symb(ctx); int d; for(d = 0; d < count; d++) { struct delta *row = &delta[d]; if( state == row->curr ) { if( fsm_symb_found(symb, row->symb, row->symb_no) ) { if( row->func ) row->func(ctx, symb, row->symb, row->symb_no); state = row->next; break; } } } if( count == d ) state = s_error; if( s_error == state ) { dwt_util_log(LOG_DBG, "FSM in error state: symb=%i(%c)\n", symb, (char)symb); } if( s_final == state || s_error == state ) break; } return state; } int dwt_util_save_to_mat_s( const char *path, const void *ptr, int size_x, int size_y, int stride_x, int stride_y ) { //dwt_util_log(LOG_DBG, "size_x=%i size_y=%i\n", size_x, size_y); FILE *file = fopen(path, "w"); if( NULL == file ) return 1; int symb_delim[] = { ',', ';', '\t', ' ' }; int symb_newline[] = { '\n', '\r' }; for(int y = 0; y < size_y; y++) { for(int x = 0; x < size_x; x++) { float coeff = *dwt_util_addr_coeff_const_s( ptr, y, x, stride_x, stride_y ); fprintf(file, "%f", coeff); if( x+1 != size_x ) fprintf(file, "%c", (char)symb_delim[0]); } fprintf(file, "%c", (char)symb_newline[0]); } fclose(file); return 0; } int dwt_util_save_to_mat_i16( const char *path, const void *ptr, int size_x, int size_y, int stride_x, int stride_y ) { //dwt_util_log(LOG_DBG, "size_x=%i size_y=%i\n", size_x, size_y); FILE *file = fopen(path, "w"); if( NULL == file ) return 1; int symb_delim[] = { ',', ';', '\t', ' ' }; int symb_newline[] = { '\n', '\r' }; for(int y = 0; y < size_y; y++) { for(int x = 0; x < size_x; x++) { int16_t coeff = *dwt_util_addr_coeff_const_i16( ptr, y, x, stride_x, stride_y ); fprintf(file, "%hi", coeff); if( x+1 != size_x ) fprintf(file, "%c", (char)symb_delim[0]); } fprintf(file, "%c", (char)symb_newline[0]); } fclose(file); return 0; } struct mat_context { FILE *file; int curr_cols; int min_cols; int rows; void **ptr; int *size_x; int *size_y; int *stride_x; int *stride_y; }; static void mat_context_init(struct mat_context *ctx, FILE *file, void **ptr, int *size_x, int *size_y, int *stride_x, int *stride_y) { ctx->file = file; ctx->ptr = ptr; ctx->size_x = size_x; ctx->size_y = size_y; ctx->stride_x = stride_x; ctx->stride_y = stride_y; *ptr = NULL; } static void mat_context_reset(struct mat_context *ctx) { ctx->curr_cols = 0; ctx->min_cols = 0; ctx->rows = 0; } int mat_get_symb(void *ctx) { struct mat_context *c = ctx; return fgetc(c->file); } int mat_unget_symb(void *ctx, int symb) { struct mat_context *c = ctx; return ungetc(symb, c->file); } void mat_end_line(void *ctx, int symb, int *symb_group, int symb_no) { struct mat_context *c = ctx; UNUSED(symb); UNUSED(symb_group); UNUSED(symb_no); //dwt_util_log(LOG_DBG, "end line: curr_cols=%i min_cols=%i\n", c->curr_cols, c->min_cols); c->min_cols = min( c->curr_cols?c->curr_cols:c->min_cols, c->min_cols?c->min_cols:c->curr_cols); c->curr_cols = 0; } static int str_val_s(const char *buff, float *val) { return 1 != sscanf(buff, "%f", val); } static int str_val_i(const char *buff, int *val) { return 1 != sscanf(buff, "%i", val); } static int str_val_i16(const char *buff, int16_t *val) { return 1 != sscanf(buff, "%hi", val); } #define CELL_MAX 256 void mat_cell_read_s(void *ctx, int symb, int *symb_group, int symb_no) { struct mat_context *c = ctx; char buff[CELL_MAX]; int cnt = 0; buff[cnt++] = (char)symb; do{ int symb_new = mat_get_symb(ctx); if( fsm_symb_found(symb_new, symb_group, symb_no) ) { buff[cnt++] = (char)symb_new; } else { mat_unget_symb(ctx, symb_new); break; } } while( cnt+1 < CELL_MAX ); buff[cnt] = 0; int pos_x = c->curr_cols-1; int pos_y = c->rows; float val; if( str_val_s(buff, &val) ) { dwt_util_log(LOG_WARN, "invalid cell content\n"); } else { //dwt_util_log(LOG_DBG, "store %f at (y=%i,x=%i)\n", val, pos_y, pos_x); if( pos_x+1 > *c->size_x ) { dwt_util_log(LOG_WARN, "x-coordinate is over limit\n"); } else { float *coeff = dwt_util_addr_coeff_s( *c->ptr, pos_y, pos_x, *c->stride_x, *c->stride_y ); *coeff = val; } } } #undef CELL_MAX #define CELL_MAX 256 void mat_cell_read_i(void *ctx, int symb, int *symb_group, int symb_no) { struct mat_context *c = ctx; char buff[CELL_MAX]; int cnt = 0; buff[cnt++] = (char)symb; do{ int symb_new = mat_get_symb(ctx); if( fsm_symb_found(symb_new, symb_group, symb_no) ) { buff[cnt++] = (char)symb_new; } else { mat_unget_symb(ctx, symb_new); break; } } while( cnt+1 < CELL_MAX ); buff[cnt] = 0; int pos_x = c->curr_cols-1; int pos_y = c->rows; int val; if( str_val_i(buff, &val) ) { dwt_util_log(LOG_WARN, "invalid cell content\n"); } else { //dwt_util_log(LOG_DBG, "store %f at (y=%i,x=%i)\n", val, pos_y, pos_x); if( pos_x+1 > *c->size_x ) { dwt_util_log(LOG_WARN, "x-coordinate is over limit\n"); } else { int *coeff = dwt_util_addr_coeff_i( *c->ptr, pos_y, pos_x, *c->stride_x, *c->stride_y ); *coeff = val; } } } #undef CELL_MAX #define CELL_MAX 256 void mat_cell_read_i16(void *ctx, int symb, int *symb_group, int symb_no) { struct mat_context *c = ctx; char buff[CELL_MAX]; int cnt = 0; buff[cnt++] = (char)symb; do { int symb_new = mat_get_symb(ctx); if( fsm_symb_found(symb_new, symb_group, symb_no) ) { buff[cnt++] = (char)symb_new; } else { mat_unget_symb(ctx, symb_new); break; } } while( cnt+1 < CELL_MAX ); buff[cnt] = 0; int pos_x = c->curr_cols-1; int pos_y = c->rows; int val; if( str_val_i16(buff, &val) ) { dwt_util_log(LOG_WARN, "invalid cell content\n"); } else { //dwt_util_log(LOG_DBG, "store %f at (y=%i,x=%i)\n", val, pos_y, pos_x); if( pos_x+1 > *c->size_x ) { dwt_util_log(LOG_WARN, "x-coordinate is over limit\n"); } else { int *coeff = dwt_util_addr_coeff_i16( *c->ptr, pos_y, pos_x, *c->stride_x, *c->stride_y ); *coeff = val; } } } #undef CELL_MAX void mat_new_cell_s(void *ctx, int symb, int *symb_group, int symb_no) { struct mat_context *c = ctx; c->curr_cols++; // read a cell content when the matrix is allocated if( *c->ptr ) mat_cell_read_s(ctx, symb, symb_group, symb_no); } void mat_new_cell_i(void *ctx, int symb, int *symb_group, int symb_no) { struct mat_context *c = ctx; c->curr_cols++; // read a cell content when the matrix is allocated if( *c->ptr ) mat_cell_read_i(ctx, symb, symb_group, symb_no); } void mat_new_cell_i16(void *ctx, int symb, int *symb_group, int symb_no) { struct mat_context *c = ctx; c->curr_cols++; // read a cell content when the matrix is allocated if( *c->ptr ) mat_cell_read_i16(ctx, symb, symb_group, symb_no); } void mat_new_line(void *ctx, int symb, int *symb_group, int symb_no) { struct mat_context *c = ctx; //dwt_util_log(LOG_DBG, "new line on row %i\n", c->rows+1); mat_end_line(ctx, symb, symb_group, symb_no); c->rows++; } int dwt_util_load_from_mat_s( const char *path, void **ptr, int *size_x, int *size_y, int *stride_x, int *stride_y ) { FILE *file = fopen(path, "r"); if( NULL == file ) { *ptr = NULL; return 1; } enum state { S_START, S_DELIM, S_CELL, S_FINAL, S_ERROR }; int symb_delim[] = { ',', ';', '\t', ' ' }; int symb_newline[] = { '\n', '\r' }; int symb_number[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', '-', 'e', '+' }; int symb_eof[] = { EOF }; struct delta delta[] = { { S_START, symb_delim, sizeof_arr(symb_delim), S_DELIM, 0 }, { S_START, symb_newline, sizeof_arr(symb_newline), S_START, mat_end_line }, { S_START, symb_number, sizeof_arr(symb_number), S_CELL, mat_new_cell_s }, { S_START, symb_eof, sizeof_arr(symb_eof), S_FINAL, mat_end_line }, { S_DELIM, symb_delim, sizeof_arr(symb_delim), S_DELIM, 0 }, { S_DELIM, symb_newline, sizeof_arr(symb_newline), S_START, mat_new_line }, { S_DELIM, symb_number, sizeof_arr(symb_number), S_CELL, mat_new_cell_s }, { S_DELIM, symb_eof, sizeof_arr(symb_eof), S_FINAL, mat_end_line }, { S_CELL, symb_number, sizeof_arr(symb_number), S_CELL, 0 }, { S_CELL, symb_newline, sizeof_arr(symb_newline), S_START, mat_new_line }, { S_CELL, symb_delim, sizeof_arr(symb_delim), S_DELIM, 0 }, { S_CELL, symb_eof, sizeof_arr(symb_eof), S_FINAL, mat_end_line }, }; struct mat_context ctx; mat_context_init(&ctx, file, ptr, size_x, size_y, stride_x, stride_y); mat_context_reset(&ctx); if( S_ERROR == fsm(&ctx, mat_get_symb, delta, sizeof_arr(delta), S_START, S_FINAL, S_ERROR) ) { fclose(file); *ptr = NULL; return 2; } //dwt_util_log(LOG_DBG, "y=%i x=%i\n", ctx.rows, ctx.min_cols); *size_x = ctx.min_cols; *size_y = ctx.rows; *stride_y = sizeof(float); *stride_x = dwt_util_get_opt_stride(*stride_y * *size_x); dwt_util_alloc_image(ctx.ptr, *ctx.stride_x, *ctx.stride_y, *ctx.size_x, *ctx.size_y); mat_context_reset(&ctx); rewind(file); if( S_ERROR == fsm(&ctx, mat_get_symb, delta, sizeof_arr(delta), S_START, S_FINAL, S_ERROR) ) { fclose(file); // free allocated image dwt_util_free_image(ptr); *ptr = NULL; return 3; } fclose(file); return 0; } int dwt_util_load_from_mat_i( const char *path, void **ptr, int *size_x, int *size_y, int *stride_x, int *stride_y ) { FILE *file = fopen(path, "r"); if( NULL == file ) { *ptr = NULL; return 1; } enum state { S_START, S_DELIM, S_CELL, S_FINAL, S_ERROR }; int symb_delim[] = { ',', ';', '\t', ' ' }; int symb_newline[] = { '\n', '\r' }; int symb_number[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', '-' }; int symb_eof[] = { EOF }; struct delta delta[] = { { S_START, symb_delim, sizeof_arr(symb_delim), S_DELIM, 0 }, { S_START, symb_newline, sizeof_arr(symb_newline), S_START, mat_end_line }, { S_START, symb_number, sizeof_arr(symb_number), S_CELL, mat_new_cell_i }, { S_START, symb_eof, sizeof_arr(symb_eof), S_FINAL, mat_end_line }, { S_DELIM, symb_delim, sizeof_arr(symb_delim), S_DELIM, 0 }, { S_DELIM, symb_newline, sizeof_arr(symb_newline), S_START, mat_new_line }, { S_DELIM, symb_number, sizeof_arr(symb_number), S_CELL, mat_new_cell_i }, { S_DELIM, symb_eof, sizeof_arr(symb_eof), S_FINAL, mat_end_line }, { S_CELL, symb_number, sizeof_arr(symb_number), S_CELL, 0 }, { S_CELL, symb_newline, sizeof_arr(symb_newline), S_START, mat_new_line }, { S_CELL, symb_delim, sizeof_arr(symb_delim), S_DELIM, 0 }, { S_CELL, symb_eof, sizeof_arr(symb_eof), S_FINAL, mat_end_line }, }; struct mat_context ctx; mat_context_init(&ctx, file, ptr, size_x, size_y, stride_x, stride_y); mat_context_reset(&ctx); if( S_ERROR == fsm(&ctx, mat_get_symb, delta, sizeof_arr(delta), S_START, S_FINAL, S_ERROR) ) { fclose(file); *ptr = NULL; return 2; } //dwt_util_log(LOG_DBG, "y=%i x=%i\n", ctx.rows, ctx.min_cols); *size_x = ctx.min_cols; *size_y = ctx.rows; *stride_y = sizeof(int); *stride_x = dwt_util_get_opt_stride(*stride_y * *size_x); dwt_util_alloc_image(ctx.ptr, *ctx.stride_x, *ctx.stride_y, *ctx.size_x, *ctx.size_y); mat_context_reset(&ctx); rewind(file); if( S_ERROR == fsm(&ctx, mat_get_symb, delta, sizeof_arr(delta), S_START, S_FINAL, S_ERROR) ) { fclose(file); // free allocated image dwt_util_free_image(ptr); *ptr = NULL; return 3; } fclose(file); return 0; } int dwt_util_load_from_mat_i16( const char *path, void **ptr, int *size_x, int *size_y, int *stride_x, int *stride_y ) { FILE *file = fopen(path, "r"); if( NULL == file ) { *ptr = NULL; return 1; } enum state { S_START, S_DELIM, S_CELL, S_FINAL, S_ERROR }; int symb_delim[] = { ',', ';', '\t', ' ' }; int symb_newline[] = { '\n', '\r' }; int symb_number[] = { '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '.', '-' }; int symb_eof[] = { EOF }; struct delta delta[] = { { S_START, symb_delim, sizeof_arr(symb_delim), S_DELIM, 0 }, { S_START, symb_newline, sizeof_arr(symb_newline), S_START, mat_end_line }, { S_START, symb_number, sizeof_arr(symb_number), S_CELL, mat_new_cell_i16 }, { S_START, symb_eof, sizeof_arr(symb_eof), S_FINAL, mat_end_line }, { S_DELIM, symb_delim, sizeof_arr(symb_delim), S_DELIM, 0 }, { S_DELIM, symb_newline, sizeof_arr(symb_newline), S_START, mat_new_line }, { S_DELIM, symb_number, sizeof_arr(symb_number), S_CELL, mat_new_cell_i16 }, { S_DELIM, symb_eof, sizeof_arr(symb_eof), S_FINAL, mat_end_line }, { S_CELL, symb_number, sizeof_arr(symb_number), S_CELL, 0 }, { S_CELL, symb_newline, sizeof_arr(symb_newline), S_START, mat_new_line }, { S_CELL, symb_delim, sizeof_arr(symb_delim), S_DELIM, 0 }, { S_CELL, symb_eof, sizeof_arr(symb_eof), S_FINAL, mat_end_line }, }; struct mat_context ctx; mat_context_init(&ctx, file, ptr, size_x, size_y, stride_x, stride_y); mat_context_reset(&ctx); if( S_ERROR == fsm(&ctx, mat_get_symb, delta, sizeof_arr(delta), S_START, S_FINAL, S_ERROR) ) { fclose(file); *ptr = NULL; return 2; } //dwt_util_log(LOG_DBG, "y=%i x=%i\n", ctx.rows, ctx.min_cols); *size_x = ctx.min_cols; *size_y = ctx.rows; *stride_y = sizeof(int16_t); *stride_x = dwt_util_get_opt_stride(*stride_y * *size_x); dwt_util_alloc_image(ctx.ptr, *ctx.stride_x, *ctx.stride_y, *ctx.size_x, *ctx.size_y); mat_context_reset(&ctx); rewind(file); if( S_ERROR == fsm(&ctx, mat_get_symb, delta, sizeof_arr(delta), S_START, S_FINAL, S_ERROR) ) { fclose(file); // free allocated image dwt_util_free_image(ptr); *ptr = NULL; return 3; } fclose(file); return 0; } /** * @brief Saturation arithmetic. */ static int saturate_i(int val, int lo, int hi) { if( val < lo ) return lo; if( val > hi ) return hi; return val; } static float dot_s( const void *ptr1, int size1_x, int size1_y, int stride1_x, int stride1_y, int displ_x, int displ_y, const void *ptr2, int size2_x, int size2_y, int stride2_x, int stride2_y ) { float sum = 0.0f; for(int y1 = 0; y1 < size1_y; y1++) for(int x1 = 0; x1 < size1_x; x1++) { float val1 = *dwt_util_addr_coeff_const_s( ptr1, y1, x1, stride1_x, stride1_y ); // saturation arithmetic int y2 = saturate_i(y1 + displ_y, 0, size2_y-1); int x2 = saturate_i(x1 + displ_x, 0, size2_x-1); float val2 = *dwt_util_addr_coeff_const_s( ptr2, y2, x2, stride2_x, stride2_y ); sum += val1 * val2; } return sum; } float dwt_util_dot_s( const void *ptr1, int size1_x, int size1_y, int stride1_x, int stride1_y, int displ_x, int displ_y, const void *ptr2, int size2_x, int size2_y, int stride2_x, int stride2_y ) { return dot_s( ptr1, size1_x, size1_y, stride1_x, stride1_y, displ_x, displ_y, ptr2, size2_x, size2_y, stride2_x, stride2_y ); } static void normalize_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y, float p ) { float norm = dwt_util_band_lpnorm_s( ptr, stride_x, stride_y, size_x, size_y, p ); for(int y = 0; y < size_y; y++) for(int x = 0; x < size_x; x++) { float *coeff = dwt_util_addr_coeff_s( ptr, y, x, stride_x, stride_y ); *coeff /= norm; } dwt_util_log(LOG_DBG, "normalize: p=%f, norm %f => %f\n", p, norm, dwt_util_band_lpnorm_s( ptr, stride_x, stride_y, size_x, size_y, p ) ); } void dwt_util_normalize_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y, float p ) { normalize_s( ptr, size_x, size_y, stride_x, stride_y, p ); } static void add_s( void *ptr1, int size1_x, int size1_y, int stride1_x, int stride1_y, int displ_x, int displ_y, const void *ptr2, int size2_x, int size2_y, int stride2_x, int stride2_y ) { for(int y1 = 0; y1 < size1_y; y1++) for(int x1 = 0; x1 < size1_x; x1++) { float *pdst = dwt_util_addr_coeff_s( ptr1, y1, x1, stride1_x, stride1_y ); // saturation arithmetic int y2 = saturate_i(y1 + displ_y, 0, size2_y-1); int x2 = saturate_i(x1 + displ_x, 0, size2_x-1); float src = *dwt_util_addr_coeff_const_s( ptr2, y2, x2, stride2_x, stride2_y ); *pdst += src; } } static void mul_s( void *ptr1, int size1_x, int size1_y, int stride1_x, int stride1_y, int displ_x, int displ_y, const void *ptr2, int size2_x, int size2_y, int stride2_x, int stride2_y ) { for(int y1 = 0; y1 < size1_y; y1++) for(int x1 = 0; x1 < size1_x; x1++) { float *pdst = dwt_util_addr_coeff_s( ptr1, y1, x1, stride1_x, stride1_y ); // saturation arithmetic int y2 = saturate_i(y1 + displ_y, 0, size2_y-1); int x2 = saturate_i(x1 + displ_x, 0, size2_x-1); float src = *dwt_util_addr_coeff_const_s( ptr2, y2, x2, stride2_x, stride2_y ); *pdst *= src; } } void dwt_util_add_s( void *ptr1, int size1_x, int size1_y, int stride1_x, int stride1_y, int displ_x, int displ_y, const void *ptr2, int size2_x, int size2_y, int stride2_x, int stride2_y ) { add_s( ptr1, size1_x, size1_y, stride1_x, stride1_y, displ_x, displ_y, ptr2, size2_x, size2_y, stride2_x, stride2_y ); } void dwt_util_mul_s( void *ptr1, int size1_x, int size1_y, int stride1_x, int stride1_y, int displ_x, int displ_y, const void *ptr2, int size2_x, int size2_y, int stride2_x, int stride2_y ) { mul_s( ptr1, size1_x, size1_y, stride1_x, stride1_y, displ_x, displ_y, ptr2, size2_x, size2_y, stride2_x, stride2_y ); } int dwt_util_save_to_svm_s( const char *path, const void *ptr, int size_x, int size_y, int stride_x, int stride_y, const void *cls_ptr, int cls_size_x, int cls_size_y, int cls_stride_x, int cls_stride_y ) { // assert assert( path && ptr && cls_ptr ); assert( size_x > 0 && size_y > 0 && cls_size_x == 1 && cls_size_y == size_y ); // fopen FILE *file = fopen(path, "w"); if( NULL == file ) return 1; // for each y: for(int y = 0; y < size_y; y++) { // get label(y) int label = *dwt_util_addr_coeff_const_i( cls_ptr, y, // y 0, // x cls_stride_x, cls_stride_y ); // put label(y) fprintf(file, "%i", label); // for each x: for(int x = 0; x < size_x; x++) { // get value(y,x) float coeff = *dwt_util_addr_coeff_const_s( ptr, y, x, stride_x, stride_y ); // put value(y,x) fprintf(file, " %i:%f", x+1, coeff); } // line end fprintf(file, "\n"); } // fclose fclose(file); return 0; } int dwt_util_find_min_max_s( const void *ptr, int size_x, int size_y, int stride_x, int stride_y, float *min, float *max ) { assert( ptr ); assert( size_x > 0 && size_y > 0 ); *min = *dwt_util_addr_coeff_const_s( ptr, 0, 0, stride_x, stride_y ); *max = *dwt_util_addr_coeff_const_s( ptr, 0, 0, stride_x, stride_y ); for(int y = 0; y < size_y; y++) { for(int x = 0; x < size_x; x++) { float coeff = *dwt_util_addr_coeff_const_s( ptr, y, x, stride_x, stride_y ); if( coeff > *max ) *max = coeff; if( coeff < *min ) *min = coeff; } } return 0; } int dwt_util_find_min_max_i( const void *ptr, int size_x, int size_y, int stride_x, int stride_y, int *min, int *max ) { assert( ptr ); assert( size_x > 0 && size_y > 0 ); *min = *dwt_util_addr_coeff_const_i( ptr, 0, 0, stride_x, stride_y ); *max = *dwt_util_addr_coeff_const_i( ptr, 0, 0, stride_x, stride_y ); for(int y = 0; y < size_y; y++) { for(int x = 0; x < size_x; x++) { float coeff = *dwt_util_addr_coeff_const_i( ptr, y, x, stride_x, stride_y ); if( coeff > *max ) *max = coeff; if( coeff < *min ) *min = coeff; } } return 0; } int dwt_util_shift_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y, float a ) { assert( ptr ); for(int y = 0; y < size_y; y++) { for(int x = 0; x < size_x; x++) { *dwt_util_addr_coeff_s( ptr, y, x, stride_x, stride_y ) += a; } } return 0; } int dwt_util_scale_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y, float a ) { assert( ptr ); for(int y = 0; y < size_y; y++) { for(int x = 0; x < size_x; x++) { *dwt_util_addr_coeff_s( ptr, y, x, stride_x, stride_y ) *= a; } } return 0; } int dwt_util_scale21_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y, float lo, float hi ) { assert( ptr ); assert( hi > lo ); float target_diff = hi - lo; // for each y: for(int y = 0; y < size_y; y++) { float min, max; // find min, max (on row) dwt_util_find_min_max_s( dwt_util_addr_coeff_const_s( ptr, y, 0, stride_x, stride_y ), // (y,0) size_x, // size_x 1, // 1 stride_x, stride_y, &min, &max ); float diff = max - min; if( max == min ) { dwt_util_log(LOG_WARN, "Cannot scale row y=%i (min=max=%f)\n", y, min); continue; } //dwt_util_log(LOG_DBG, "scale(y=%i) <%f..%f> => <%f..%f>\n", y, min, max, lo, hi); // shift min => lo (on row) dwt_util_shift_s( dwt_util_addr_coeff_s( ptr, y, 0, stride_x, stride_y ), // (y,0) size_x, // size_x 1, // 1 stride_x, stride_y, (lo-min) ); // scale to hi (on row) dwt_util_scale_s( dwt_util_addr_coeff_s( ptr, y, 0, stride_x, stride_y ), // (y,0) size_x, // size_x 1, // 1 stride_x, stride_y, (target_diff/diff) ); // check dwt_util_find_min_max_s( dwt_util_addr_coeff_const_s( ptr, y, 0, stride_x, stride_y ), // (y,0) size_x, // size_x 1, // 1 stride_x, stride_y, &min, &max ); //dwt_util_log(LOG_DBG, "scale(y=%i) <%f..%f>\n", y, min, max); } return 0; } int dwt_util_displace1_s( void *ptr, int size_x, int stride_y, int displ_x ) { // assert( ptr ); // not needed if( !displ_x ) return 0; if( displ_x > 0 ) { for(int x = 0; x < size_x; x++) { int src_x = saturate_i(x + displ_x, 0, size_x-1); *dwt_util_addr_coeff_s( ptr, 0, // y x, // x 0, // stride x stride_y // stride y ) = *dwt_util_addr_coeff_const_s( ptr, 0, // y src_x, // x 0, // stride x stride_y // stride y ); } } else // < 0 { for(int x = size_x-1; x >= 0; x--) { int src_x = saturate_i(x + displ_x, 0, size_x-1); *dwt_util_addr_coeff_s( ptr, 0, // y x, // x 0, // stride x stride_y // stride y ) = *dwt_util_addr_coeff_const_s( ptr, 0, // y src_x, // x 0, // stride x stride_y // stride y ); } } return 0; } int dwt_util_displace1_zero_s( void *ptr, int size_x, int stride_y, int displ_x ) { // assert( ptr ); // not needed if( !displ_x ) return 0; if( displ_x > 0 ) { for(int x = 0; x < size_x; x++) { int src_x = saturate_i(x + displ_x, 0, size_x-1); *dwt_util_addr_coeff_s( ptr, 0, // y x, // x 0, // stride x stride_y // stride y ) = ( x + displ_x != src_x ) ? 0.0f : *dwt_util_addr_coeff_const_s( ptr, 0, // y src_x, // x 0, // stride x stride_y // stride y ); } } else // < 0 { for(int x = size_x-1; x >= 0; x--) { int src_x = saturate_i(x + displ_x, 0, size_x-1); *dwt_util_addr_coeff_s( ptr, 0, // y x, // x 0, // stride x stride_y // stride y ) = ( x + displ_x != src_x ) ? 0.0f : *dwt_util_addr_coeff_const_s( ptr, 0, // y src_x, // x 0, // stride x stride_y // stride y ); } } return 0; } int dwt_util_get_center1_s( const void *ptr, int size_x, int stride_y ) { // assert( ptr ); // not needed assert( size_x > 0 ); // TODO: as an argument const int p = 10; // total "norm" float norm = dwt_util_band_lpnorm_s( ptr, 0, // stride x stride_y, // stride y size_x, // size x 1, // size y p // p ); if( 0.0f == norm ) { dwt_util_log(LOG_WARN, "Cannot get a center of signal due to its zero norm!\n"); return size_x/2; } // the value of p-norm raised to the power of p norm = powf(norm, p); float half = norm / 2; // indexes of center borders int lidx = -1; int ridx = -1; // "norm" accumulator float sum; sum = 0.0f; for(int x = 0; x < size_x; x++) { // get coeff float coeff = *dwt_util_addr_coeff_const_s( ptr, 0, // y x, // x 0, // stride x stride_y // stride y ); // accumulate "norm" sum += powf(fabsf(coeff), p); if( sum > half ) { ridx = x - 1; break; } } sum = 0.0f; for(int x = size_x-1; x >= 0; x--) { // get coeff float coeff = *dwt_util_addr_coeff_const_s( ptr, 0, // y x, // x 0, // stride x stride_y // stride y ); // accumulate "norm" sum += powf(fabsf(coeff), p); if( sum > half ) { lidx = x + 1; break; } } if( -1 == lidx || -1 == ridx ) { dwt_util_log(LOG_WARN, "Cannot found center indexes! lidx=%i ridx=%i norm=%f half=%f size_x=%i\n", lidx, ridx, norm, half, size_x); if( -1 == lidx && -1 == ridx ) return size_x / 2; if( -1 == lidx ) lidx = ridx; else ridx = lidx; } int center = (lidx + ridx) / 2; // dwt_util_log(LOG_DBG, "center at %i (%i %i)\n", center, lidx, ridx); return center; } int dwt_util_center1_s( void *ptr, int size_x, int stride_y, int max_iters ) { // iterations for(int i = 0; i < max_iters; i++) { int center = dwt_util_get_center1_s( ptr, size_x, stride_y ); int exp_center = size_x / 2; int displ = exp_center - center; #if 0 dwt_util_log(LOG_DBG, "i=%i: real_center=%i expected_center=%i displacement=%i\n", i, center, exp_center, displ); #endif if( !displ ) break; dwt_util_displace1_zero_s( ptr, size_x, stride_y, -displ ); } return 0; } int dwt_util_center21_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y, int max_iters ) { for(int y = 0; y < size_y; y++) { #if 0 dwt_util_log(LOG_DBG, "Centering vector y=%i\n", y); #endif dwt_util_center1_s( dwt_util_addr_coeff_s( ptr, y, // y 0, // x stride_x, stride_y ), size_x, stride_y, max_iters ); } return 0; } void dwt_util_shift21_med_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y ) { for(int y = 0; y < size_y; y++) { // single transformed vector void *src = dwt_util_addr_coeff_s(ptr, y, 0, stride_x, stride_y); int src_x = size_x; int src_y = 1; float med = dwt_util_band_med_s( src, stride_x, stride_y, src_x, src_y ); //dwt_util_log(LOG_DBG, "shift21_med: y=%i med=%f\n", y, med); dwt_util_shift_s( src, src_x, src_y, stride_x, stride_y, -med ); } } void *dwt_util_viewport( void *ptr, int size_x, int size_y, int stride_x, int stride_y, int offset_x, int offset_y ) { assert( offset_x < size_x && offset_y < size_y ); return dwt_util_addr_coeff_s(ptr, offset_y, offset_x, stride_x, stride_y); } void *dwt_util_crop21( void *ptr, int size_x, int size_y, int stride_x, int stride_y, int len_x ) { UNUSED(size_y); assert( len_x > 0 ); assert( len_x < size_x ); int center_x = size_x / 2; int offset_x = center_x - len_x/2; assert( offset_x + len_x <= size_x ); //dwt_util_log(LOG_DBG, "crop: offset_x=%i len_x=%i\n", offset_x, len_x); return dwt_util_addr_coeff_s(ptr, 0, offset_x, stride_x, stride_y); } void dwt_util_unit_vec_s( float *addr, int size, int offset ) { dwt_util_zero_vec_s(addr, size); addr[size/2+offset] = 1.f; } struct cbuff { char *ptr; size_t size; char *pos; }; static struct cbuff *cbuff_create() { struct cbuff *cbuff = (struct cbuff *)malloc(sizeof(struct cbuff)); cbuff->size = 1; cbuff->ptr = malloc(1); if(!cbuff->ptr) dwt_util_error("unable to allocate memory!\n"); cbuff->pos = cbuff->ptr; return cbuff; } static void cbuff_destroy(struct cbuff *cbuff) { free(cbuff->ptr); free(cbuff); } static void cbuff_reset(struct cbuff *cbuff) { cbuff->pos = cbuff->ptr; } static int cbuff_offset(struct cbuff *cbuff) { return cbuff->pos - cbuff->ptr; } static int cbuff_avail(struct cbuff *cbuff) { return cbuff->size - cbuff_offset(cbuff); } static void cbuff_realloc(struct cbuff *cbuff) { cbuff->size <<= 1; if(!cbuff->size) dwt_util_error("size_t is too small!\n"); int offset = cbuff_offset(cbuff); cbuff->ptr = realloc(cbuff->ptr, cbuff->size); if(!cbuff->ptr) dwt_util_error("unable to allocate %i bytes!\n", cbuff->size); cbuff->pos = cbuff->ptr + offset; } static char *cbuff_cstr(struct cbuff *cbuff) { return cbuff->ptr; } static void cbuff_sprintf(struct cbuff *cbuff, const char *format, ...) { va_list ap; while(1) { va_start(ap, format); int n = vsnprintf(cbuff->pos, cbuff_avail(cbuff), format, ap); va_end(ap); if( n < 0 ) dwt_util_error("vsnprintf returned negative value!\n"); if( n >= cbuff_avail(cbuff) ) cbuff_realloc(cbuff); else { cbuff->pos += n; break; } } } const char *dwt_util_str_vec_s( const float *vec, int size ) { static struct cbuff *cbuff = NULL; if(!cbuff) cbuff = cbuff_create(); cbuff_reset(cbuff); cbuff_sprintf(cbuff, "[ "); for(int i = 0; i < size; i++) cbuff_sprintf(cbuff, "%+11.8f ", vec[i]); cbuff_sprintf(cbuff, "] "); cbuff_sprintf(cbuff, "(%i)", size); return cbuff_cstr(cbuff); } int dwt_util_save_sym_to_pgm_s( const char *path, float max_value, const void *ptr, int stride_x, int stride_y, int size_x, int size_y ) { assert( max_value > 0.f ); // alloc "clone" void *clone = dwt_util_alloc_image2(stride_x, stride_y, size_x, size_y); // copy "ptr" into "clone" dwt_util_copy_s(ptr, clone, stride_x, stride_y, size_x, size_y); // shift "clone" dwt_util_shift_s( clone, size_x, size_y, stride_x, stride_y, +max_value ); // save "clone" dwt_util_save_to_pgm_s( path, 2.f*max_value, clone, stride_x, stride_y, size_x, size_y ); // free "clone" dwt_util_free_image(&clone); return 0; }
library.h
#ifndef __INCLUDED_LIBRARY_H__ #define __INCLUDED_LIBRARY_H__ #pragma omp declare simd double BlackBoxFunction(const double x); double InverseDerivative(const double x); #endif
mpifft.c
/* -*- mode: C; tab-width: 2; indent-tabs-mode: nil; fill-column: 79; coding: iso-latin-1-unix -*- */ /* mpifft.c */ #include <hpcc.h> #include "hpccfft.h" #include "wrapmpifftw.h" double *HPCC_fft_timings_forward, *HPCC_fft_timings_backward; static void MPIFFT0(HPCC_Params *params, int doIO, FILE *outFile, MPI_Comm comm, int locN, double *UGflops, s64Int_t *Un, double *UmaxErr, int *Ufailure) { int commRank, commSize, failure, flags; s64Int_t i, n; s64Int_t locn, loc0, alocn, aloc0, tls; double maxErr, tmp1, tmp2, tmp3, t0, t1, t2, t3, Gflops; double deps; fftw_complex *inout, *work; fftw_mpi_plan p; hpcc_fftw_mpi_plan ip; int sAbort, rAbort; #ifdef USING_FFTW int ilocn, iloc0, ialocn, ialoc0, itls; #endif failure = 1; Gflops = -1.0; deps = HPL_dlamch( HPL_MACH_EPS ); maxErr = 1.0 / deps; MPI_Comm_size( comm, &commSize ); MPI_Comm_rank( comm, &commRank ); n = locN; /* number of processes have been factored out - need to put it back in */ n *= commSize; n *= commSize; /* global vector size */ #ifdef USING_FFTW /* FFTW ver. 2 only supports vector sizes that fit in 'int' */ if (n > (1<<30)-1+(1<<30)) { #ifdef HPCC_FFTW_CHECK32 goto no_plan; #else if (doIO) { fprintf( outFile, "Warning: problem size too large: %ld*%d*%d\n", (long)(n / commSize / commSize), commSize, commSize ); } #endif } #endif #ifdef HPCC_FFTW_ESTIMATE flags = FFTW_ESTIMATE; #else flags = FFTW_MEASURE; #endif t1 = -MPI_Wtime(); p = fftw_mpi_create_plan( comm, n, FFTW_FORWARD, flags ); t1 += MPI_Wtime(); if (! p) goto no_plan; #ifdef USING_FFTW fftw_mpi_local_sizes( p, &ilocn, &iloc0, &ialocn, &ialoc0, &itls ); locn = ilocn; loc0 = iloc0; alocn = ialocn; aloc0 = ialoc0; tls = itls; #else fftw_mpi_local_sizes( p, &locn, &loc0, &alocn, &aloc0, &tls ); #endif inout = (fftw_complex *)HPCC_fftw_malloc( tls * (sizeof *inout) ); work = (fftw_complex *)HPCC_fftw_malloc( tls * (sizeof *work) ); sAbort = 0; if (! inout || ! work) sAbort = 1; MPI_Allreduce( &sAbort, &rAbort, 1, MPI_INT, MPI_SUM, comm ); if (rAbort > 0) { fftw_mpi_destroy_plan( p ); goto comp_end; } /* Make sure that `inout' and `work' are initialized in parallel if using Open MP: this will ensure better placement of pages if first-touch policy is used by a distrubuted shared memory machine. */ #ifdef _OPENMP #pragma omp parallel for for (i = 0; i < tls; ++i) { c_re( inout[i] ) = c_re( work[i] ) = 0.0; c_re( inout[i] ) = c_im( work[i] ) = 0.0; } #endif t0 = -MPI_Wtime(); HPCC_bcnrand( 2 * tls, 53 * commRank * 2 * tls, inout ); t0 += MPI_Wtime(); /* *void fftw_mpi(fftw_mpi_plan p, int n_fields, fftw_complex *local_data, fftw_complex *work); */ t2 = -MPI_Wtime(); fftw_mpi( p, 1, inout, work ); t2 += MPI_Wtime(); fftw_mpi_destroy_plan( p ); ip = HPCC_fftw_mpi_create_plan( comm, n, FFTW_BACKWARD, FFTW_ESTIMATE ); if (ip) { t3 = -MPI_Wtime(); HPCC_fftw_mpi( ip, 1, inout, work ); t3 += MPI_Wtime(); HPCC_fftw_mpi_destroy_plan( ip ); } HPCC_bcnrand( 2 * tls, 53 * commRank * 2 * tls, work ); /* regenerate data */ maxErr = 0.0; for (i = 0; i < locn; ++i) { tmp1 = c_re( inout[i] ) - c_re( work[i] ); tmp2 = c_im( inout[i] ) - c_im( work[i] ); tmp3 = sqrt( tmp1*tmp1 + tmp2*tmp2 ); maxErr = maxErr >= tmp3 ? maxErr : tmp3; } MPI_Allreduce( &maxErr, UmaxErr, 1, MPI_DOUBLE, MPI_MAX, comm ); maxErr = *UmaxErr; if (maxErr / log(n) / deps < params->test.thrsh) failure = 0; if (t2 > 0.0) Gflops = 1e-9 * (5.0 * n * log(n) / log(2.0)) / t2; if (doIO) { fprintf( outFile, "Number of nodes: %d\n", commSize ); fprintf( outFile, "Vector size: %20.0f\n", tmp1 = (double)n ); fprintf( outFile, "Generation time: %9.3f\n", t0 ); fprintf( outFile, "Tuning: %9.3f\n", t1 ); fprintf( outFile, "Computing: %9.3f\n", t2 ); fprintf( outFile, "Inverse FFT: %9.3f\n", t3 ); fprintf( outFile, "max(|x-x0|): %9.3e\n", maxErr ); fprintf( outFile, "Gflop/s: %9.3f\n", Gflops ); } comp_end: if (work) HPCC_fftw_free( work ); if (inout) HPCC_fftw_free( inout ); no_plan: *UGflops = Gflops; *Un = n; *UmaxErr = maxErr; *Ufailure = failure; } int HPCC_MPIFFT(HPCC_Params *params) { int commRank, commSize; int locN, procCnt, isComputing, doIO, failure = 0; s64Int_t n; double Gflops = -1.0, maxErr = -1.0; MPI_Comm comm; FILE *outFile; MPI_Comm_size( MPI_COMM_WORLD, &commSize ); MPI_Comm_rank( MPI_COMM_WORLD, &commRank ); doIO = commRank == 0 ? 1 : 0; if (doIO) { outFile = fopen( params->outFname, "a" ); if (! outFile) outFile = stderr; } /* There are two vectors of size 'n'/'commSize': inout, work, and internal work: 2*'n'/'commSize'; it's 4 vectors then. FFTE requires that the global vector size 'n' has to be at least as big as square of number of processes. The square is calculated in each factor independently. In other words, 'n' has to have at least twice as many 2 factors as the process count, twice as many 3 factors and twice as many 5 factors. */ #ifdef HPCC_FFT_235 locN = 0; procCnt = commSize + 1; do { int f[3]; procCnt--; for ( ; procCnt > 1 && HPCC_factor235( procCnt, f ); procCnt--) ; /* EMPTY */ /* Make sure the local vector size is greater than 0 */ locN = HPCC_LocalVectorSize( params, 4*procCnt, sizeof(fftw_complex), 0 ); for ( ; locN >= 1 && HPCC_factor235( locN, f ); locN--) ; /* EMPTY */ } while (locN < 1); #else /* Find power of two that is smaller or equal to number of processes */ for (procCnt = 1; procCnt <= (commSize >> 1); procCnt <<= 1) ; /* EMPTY */ /* Make sure the local vector size is greater than 0 */ while (1) { locN = HPCC_LocalVectorSize( params, 4*procCnt, sizeof(fftw_complex), 1 ); if (locN) break; procCnt >>= 1; } #endif isComputing = commRank < procCnt ? 1 : 0; HPCC_fft_timings_forward = params->MPIFFTtimingsForward; HPCC_fft_timings_backward = params->MPIFFTtimingsBackward; if (commSize == procCnt) comm = MPI_COMM_WORLD; else MPI_Comm_split( MPI_COMM_WORLD, isComputing ? 0 : MPI_UNDEFINED, commRank, &comm ); if (isComputing) MPIFFT0( params, doIO, outFile, comm, locN, &Gflops, &n, &maxErr, &failure ); if (commSize != procCnt && isComputing && comm != MPI_COMM_NULL) MPI_Comm_free( &comm ); params->MPIFFT_N = n; params->MPIFFT_Procs = procCnt; params->MPIFFT_maxErr = maxErr; MPI_Bcast( &Gflops, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD ); params->MPIFFTGflops = Gflops; params->FFTEnblk = FFTE_NBLK; params->FFTEnp = FFTE_NP; params->FFTEl2size = FFTE_L2SIZE; if (failure) params->Failure = 1; if (doIO) if (outFile != stderr) fclose( outFile ); return 0; }
mapped_max_pool.h
#ifndef MAPPED_MAX_POOL_H_ #define MAPPED_MAX_POOL_H_ #include <math.h> #include <omp.h> #include <torch/extension.h> #include <limits> #include "common/mapped_max_pool.h" #include "core/resample.h" namespace spherical { namespace cpu { template <typename T> void MappedMaxPool2D(const int num_kernels, torch::Tensor in_data, torch::Tensor sample_map, // OH x OW x K x 2 const int channels, const int in_height, const int in_width, const int out_height, const int out_width, const int kernel_size, const InterpolationType interpolation, torch::Tensor out_data, torch::Tensor out_idx) // Indices of kernel sample in map { const T *in_data_ptr = in_data.data_ptr<T>(); const T *sample_map_ptr = sample_map.data_ptr<T>(); T *out_data_ptr = out_data.data_ptr<T>(); int64_t *out_idx_ptr = out_idx.data_ptr<int64_t>(); int index; #pragma omp parallel for shared(in_data_ptr, sample_map_ptr, out_data_ptr, \ out_idx_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::MappedMaxPool2D(index, in_data_ptr, sample_map_ptr, channels, in_height, in_width, out_height, out_width, kernel_size, interpolation, out_data_ptr, out_idx_ptr); } } template <typename T> void MappedMaxUnpool2D(const int num_kernels, torch::Tensor grad_output, torch::Tensor idx_mask, torch::Tensor sample_map, const int channels, const int orig_height, const int orig_width, const int pooled_height, const int pooled_width, const int kernel_size, const InterpolationType interpolation, torch::Tensor grad_input) { const T *grad_output_ptr = grad_output.data_ptr<T>(); const int64_t *idx_mask_ptr = idx_mask.data_ptr<int64_t>(); const T *sample_map_ptr = sample_map.data_ptr<T>(); T *grad_input_ptr = grad_input.data_ptr<T>(); int index; #pragma omp parallel for shared( \ grad_output_ptr, idx_mask_ptr, sample_map_ptr, \ grad_input_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::MappedMaxUnpool2D(index, grad_output_ptr, idx_mask_ptr, sample_map_ptr, channels, orig_height, orig_width, pooled_height, pooled_width, kernel_size, interpolation, grad_input_ptr); } } // ------------------------------------------------- // ------------------------------------------------- template <typename T> void MappedMaxPool2DWeighted( const int num_kernels, torch::Tensor in_data, torch::Tensor sample_map, // OH x OW x K x P x 2 torch::Tensor interp_weights, // OH x OW x K x P const int channels, const int in_height, const int in_width, const int out_height, const int out_width, const int kernel_size, const InterpolationType interpolation, const int num_interp_pts, torch::Tensor out_data, torch::Tensor out_idx) // Indices of kernel sample in map { const T *in_data_ptr = in_data.data_ptr<T>(); const T *sample_map_ptr = sample_map.data_ptr<T>(); const T *interp_weights_ptr = interp_weights.data_ptr<T>(); T *out_data_ptr = out_data.data_ptr<T>(); int64_t *out_idx_ptr = out_idx.data_ptr<int64_t>(); int index; #pragma omp parallel for shared(in_data_ptr, sample_map_ptr, \ interp_weights_ptr, out_data_ptr, \ out_idx_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::MappedMaxPool2DWeighted( index, in_data_ptr, sample_map_ptr, interp_weights_ptr, channels, in_height, in_width, out_height, out_width, kernel_size, interpolation, num_interp_pts, out_data_ptr, out_idx_ptr); } } template <typename T> void MappedMaxUnpool2DWeighted( const int num_kernels, torch::Tensor grad_output, torch::Tensor idx_mask, torch::Tensor sample_map, torch::Tensor interp_weights, const int channels, const int orig_height, const int orig_width, const int pooled_height, const int pooled_width, const int kernel_size, const InterpolationType interpolation, const int num_interp_pts, torch::Tensor grad_input) { const T *grad_output_ptr = grad_output.data_ptr<T>(); const int64_t *idx_mask_ptr = idx_mask.data_ptr<int64_t>(); const T *sample_map_ptr = sample_map.data_ptr<T>(); const T *interp_weights_ptr = interp_weights.data_ptr<T>(); T *grad_input_ptr = grad_input.data_ptr<T>(); int index; #pragma omp parallel for shared( \ grad_output_ptr, idx_mask_ptr, sample_map_ptr, interp_weights_ptr, \ grad_input_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::MappedMaxUnpool2DWeighted( index, grad_output_ptr, idx_mask_ptr, sample_map_ptr, interp_weights_ptr, channels, orig_height, orig_width, pooled_height, pooled_width, kernel_size, interpolation, num_interp_pts, grad_input_ptr); } } } // namespace cpu } // namespace spherical #endif
module_bl_mynn_bl_init_driver_impl.h
#ifndef __MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_H__ #define __MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_H__ // File granularity version. #ifndef MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_VERSION_MAJOR #define MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_VERSION_MAJOR 1 #endif #ifndef MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_VERSION_MINOR #define MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_VERSION_MINOR 0 #endif #ifndef MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_PATCH_VERSION #define MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_PATCH_VERSION 0 #endif #ifndef MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_CREATE_DATE #define MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_CREATE_DATE "Date: 30-10-2016 , Time: 14:39 PM GMT+2" #endif // Set this value to successful build date/time. #ifndef MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_BUILD_DATE #define MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_BUILD_DATE "" #endif #ifndef MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_AUTHOR #define MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_AUTHOR "Name: Bernard Gingold , e-mail: [email protected]" #endif #include "module_bl_mynn_F90_iface.h" #include "PhysLib_Config.h" #include "std_headers.h" namespace phys_lib_wrappers { namespace module_bl_mynn { template<typename R32 = float, typename I32 = int > struct Wrap_Mynn_Bl_Init_Driver { /******************************************* Constructors and Destructor. ********************************************/ /* @Purpose: Default Constructor - explicitly default. */ Wrap_Mynn_Bl_Init_Driver() = default; /* @Purpose: 1st 'main' Constructor which purpose is to allocate and initialize scalar and array members. Array members are zero-filled. Caller must later initialize input arrays to correct physical state. */ Wrap_Mynn_Bl_Init_Driver(_In_ const I32 IDS, _In_ const I32 IDE, _In_ const I32 JDS, _In_ const I32 JDE, _In_ const I32 KDS, _In_ const I32 KDE, _In_ const I32 IMS, _In_ const I32 IME, _In_ const I32 JMS, _In_ const I32 JME, _In_ const I32 KMS, _In_ const I32 KME, _In_ const I32 ITS, _In_ const I32 ITE, _In_ const I32 JTS, _In_ const I32 JTE, _In_ const I32 KTS, _In_ const I32 KTE, _In_ const I32 ALLOWED_TO_READ, _In_ const I32 RESTART, _In_ const I32 LEVEL) : m_IDS{ IDS }, m_IDE{ IDE }, m_JDS{ JDS }, m_JDE{ JDE }, m_KDS{ KDS }, m_KDE{ KDE }, m_IMS{ IMS }, m_IME{ IME }, m_JMS{ JMS }, m_JME{ JME }, m_KMS{ KMS }, m_KME{ KME }, m_ITS{ ITS }, m_ITE{ ITE }, m_JTS{ JTS }, m_JTE{ JTE }, m_KTS{ KTS }, m_KTE{ KTE }, m_ALLOWED_TO_READ{ ALLOWED_TO_READ }, m_RESTART{ RESTART }, m_LEVEL{ LEVEL }, m_RUBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_RVBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_RTHBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_RQVBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_RQCBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_RQIBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_QKE{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_TKE_PBL{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_EXCH_H{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) } { // Check for memory allocation errors i.e. (malloc failures). for (int i{ 0 }; i != this->m_nArrays3D; ++i) { if ((&this->m_RUBLTEN)[i] == NULL) { std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 1st Ctor: 'Wrap_Mynn_Bl_Init_Driver'!!\n"; std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n"; std::cerr << "***** ERROR-DETAILS ***** \n"; std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_RUBLTEN)[i] << "\n"; std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n"; std::exit(-1); } } // Zero-initialize arrays. // Using OpenMP and vectorization for // innermost stride. #if defined (USE_ICL_OPENMP) && \ OPENMP_CURR_VER >= 40 #pragma omp parallel for if((m_IME * m_KME * m_JME) >= (1 << 20)) for (int i = m_IMS; i != m_IME; ++i) { for(int k = m_KMS; k != m_KME; ++k) { #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #pragma unroll(UNROLL_4X) #endif for (int j = m_JMS; j != m_JME; ++j) { this->m_RUBLTEN[i + m_IME * k + m_KME * j] = 0.0; this->m_RVBLTEN[i + m_IME * k + m_KME * j] = 0.0; this->m_RTHBLTEN[i + m_IME * k + m_KME * j] = 0.0; this->m_RQVBLTEN[i + m_IME * k + m_KME * j] = 0.0; this->m_RQCBLTEN[i + m_IME * k + m_KME * j] = 0.0; this->m_RQIBLTEN[i + m_IME * k + m_KME * j] = 0.0; this->m_QKE[i + m_IME * k + m_KME * j] = 0.0; this->m_TKE_PBL[i + m_IME * k + m_KME * j] = 0.0; this->m_EXCH_H[i + m_IME * k + m_KME * j] = 0.0; } } } #else // Use loop blocking. // Warning: You must not #undef 'USE_LOOP_BLOCKING' macro!! for (int i = m_IMS; i != m_IME; i += DEFAULT_BLOCK_SIZE) { for (int k = m_KMS; k != m_KME; k += DEFAULT_BLOCK_SIZE) { for (int j = m_JMS; j != m_JME; j += DEFAULT_BLOCK_SIZE) { for (int ii = i; ii < DEFAULT_BLOCK_SIZE; ++ii) { for (int kk = k; kk < DEFAULT_BLOCK_SIZE; ++kk) { #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #endif for (int jj = j; jj < DEFAULT_BLOCK_SIZE; ++jj) { this->m_RUBLTEN[ii + m_IME * kk + m_KME * jj] = 0.0; this->m_RVBLTEN[ii + m_IME * kk + m_KME * jj] = 0.0; this->m_RTHBLTEN[ii + m_IME * kk + m_KME * jj] = 0.0; this->m_RQVBLTEN[ii + m_IME * kk + m_KME * jj] = 0.0; this->m_RQCBLTEN[ii + m_IME * kk + m_KME * jj] = 0.0; this->m_RQIBLTEN[ii + m_IME * kk + m_KME * jj] = 0.0; this->m_QKE[ii + m_IME * kk + m_KME * jj] = 0.0; this->m_TKE_PBL[ii + m_IME * kk + m_KME * jj] = 0.0; this->m_EXCH_H[ii + m_IME * kk + m_KME * jj] = 0.0; } } } } } } #endif } /* @Purpose: 2nd 'main' Constructor which purpose is to allocate and initialize scalar and array members. Array output members are zero-filled. Caller must pass initialized input arrays to correct physical state. */ Wrap_Mynn_Bl_Init_Driver(_In_ const I32 IDS, _In_ const I32 IDE, _In_ const I32 JDS, _In_ const I32 JDE, _In_ const I32 KDS, _In_ const I32 KDE, _In_ const I32 IMS, _In_ const I32 IME, _In_ const I32 JMS, _In_ const I32 JME, _In_ const I32 KMS, _In_ const I32 KME, _In_ const I32 ITS, _In_ const I32 ITE, _In_ const I32 JTS, _In_ const I32 JTE, _In_ const I32 KTS, _In_ const I32 KTE, _In_ const I32 ALLOWED_TO_READ, _In_ const I32 RESTART, _In_ const I32 LEVEL, _In_ R32* __restrict const RUBLTEN, _In_ R32* __restrict const RVBLTEN, _In_ R32* __restrict const RTHBLTEN, _In_ R32* __restrict const RQVBLTEN, _In_ R32* __restrict const RQCBLTEN, _In_ R32* __restrict const RQIBLTEN, _In_ R32* __restrict const QKE, _In_ R32* __restrict const TKE_PBL, _In_ R32* __restrict const EXCH_H) : m_IDS{ IDS }, m_IDE{ IDE }, m_JDS{ JDS }, m_JDE{ JDE }, m_KDS{ KDS }, m_KDE{ KDE }, m_IMS{ IMS }, m_IME{ IME }, m_JMS{ JMS }, m_JME{ JME }, m_KMS{ KMS }, m_KME{ KME }, m_ITS{ ITS }, m_ITE{ ITE }, m_JTS{ JTS }, m_JTE{ JTE }, m_KTS{ KTS }, m_KTE{ KTE }, m_ALLOWED_TO_READ{ ALLOWED_TO_READ }, m_RESTART{ RESTART }, m_LEVEL{ LEVEL }, m_RUBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_RVBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_RTHBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_RQVBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_RQCBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_RQIBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_QKE{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_TKE_PBL{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_EXCH_H{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) } { // Check for occurrence of memory allocation errors i.e. (malloc failures). for (int i{ 0 }; i != this->m_nArrays3D; ++i) { if ((&this->m_RUBLTEN)[i] == NULL) { std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 2nd Ctor: 'Wrap_Mynn_Bl_Init_Driver'!!\n"; std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n"; std::cerr << "***** ERROR-DETAILS ***** \n"; std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_RUBLTEN)[i] << "\n"; std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n"; std::exit(-1); } } // Check for null pointers in call input arrays. if (RUBLTEN == NULL || RVBLTEN == NULL || RTHBLTEN == NULL || RQVBLTEN == NULL || RQCBLTEN == NULL || RQIBLTEN == NULL || QKE == NULL || TKE_PBL == NULL || EXCH_H == NULL ) { std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in 2nd Ctor: 'Wrap_Mynn_Bl_Init_Driver'!!\n"; std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n"; std::cerr << "***** ERROR-DETAILS ***** \n"; std::cerr << "One or more caller's arrays contains invalid pointer!!\n"; std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n"; std::exit(-1); } // Copy caller's input arrays. // Using OpenMP for outermost loop // and vectorization for innermost loop. #if defined (USE_ICL_OPENMP) && \ OPENMP_CURR_VER >= 40 #pragma omp parallel for if((m_IME * m_KME * m_JME) >= (1 << 20)) for (int i = m_IMS; i != m_IME; ++i) { for(int k = m_KMS; k != m_KME; ++k) { #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #pragma unroll(UNROLL_4X) #endif for(int j = m_JMS; j != m_JME; ++j) { this->m_RUBLTEN[i + m_IME * k + m_KME * j] = RUBLTEN[i + m_IME * k + m_KME * j]; this->m_RVBLTEN[i + m_IME * k + m_KME * j] = RVBLTEN[i + m_IME * k + m_KME * j]; this->m_RTHBLTEN[i + m_IME * k + m_KME * j] = RTHBLTEN[i + m_IME * k + m_KME * j]; this->m_RQVBLTEN[i + m_IME * k + m_KME * j] = RQVBLTEN[i + m_IME * k + m_KME * j]; this->m_RQCBLTEN[i + m_IME * k + m_KME * j] = RQCBLTEN[i + m_IME * k + m_KME * j]; this->m_RQIBLTEN[i + m_IME * k + m_KME * j] = RQIBLTEN[i + m_IME * k + m_KME * j]; this->m_QKE[i + m_IME * k + m_KME * j] = QKE[i + m_IME * k + m_KME * j]; this->m_TKE_PBL[i + m_IME * k + m_KME * j] = TKE_PBL[i + m_IME * k + m_KME * j]; this->m_EXCH_H[i + m_IME * k + m_KME * j] = EXCH_H[i + m_IME * k + m_KME * j]; } } } #else // Use loop blocking. // Warning: You must not #undef 'USE_LOOP_BLOCKING' macro!! for (int i = m_IMS; i != m_IME; i += DEFAULT_BLOCK_SIZE) { for (int k = m_KMS; k != m_KME; k += DEFAULT_BLOCK_SIZE) { for (int j = m_JMS; j != m_JME; j += DEFAULT_BLOCK_SIZE) { for (int ii = i; ii < DEFAULT_BLOCK_SIZE; ++ii) { for (int kk = k; kk < DEFAULT_BLOCK_SIZE; ++kk) { #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #endif for (int jj = j; jj < DEFAULT_BLOCK_SIZE; ++jj) { this->m_RUBLTEN[ii + m_IME * kk + m_KME * jj] = RUBLTEN[ii + m_IME * kk + m_KME * jj]; this->m_RVBLTEN[ii + m_IME * kk + m_KME * jj] = RVBLTEN[ii + m_IME * kk + m_KME * jj]; this->m_RTHBLTEN[ii + m_IME * kk + m_KME * jj] = RTHBLTEN[ii + m_IME * kk + m_KME * jj]; this->m_RQVBLTEN[ii + m_IME * kk + m_KME * jj] = RQVBLTEN[ii + m_IME * kk + m_KME * jj]; this->m_RQCBLTEN[ii + m_IME * kk + m_KME * jj] = RQCBLTEN[ii + m_IME * kk + m_KME * jj]; this->m_RQIBLTEN[ii + m_IME * kk + m_KME * jj] = RQIBLTEN[ii + m_IME * kk + m_KME * jj]; this->m_QKE[ii + m_IME * kk + m_KME * jj] = QKE[ii + m_IME * kk + m_KME * jj]; this->m_TKE_PBL[ii + m_IME * kk + m_KME * jj] = TKE_PBL[ii + m_IME * kk + m_KME * jj]; this->m_EXCH_H[ii + m_IME * kk + m_KME * jj] = EXCH_H[ii + m_IME * kk + m_KME * jj]; } } } } } } #endif } /* @Purpose: Copy Constructor implements deep copy semantics. */ Wrap_Mynn_Bl_Init_Driver(_In_ const Wrap_Mynn_Bl_Init_Driver &x) : m_IDS{ x.m_IDS }, m_IDE{ x.m_IDE }, m_JDS{ x.m_JDS }, m_JDE{ x.m_JDE }, m_KDS{ x.m_KDS }, m_KDE{ x.m_KDE }, m_IMS{ x.m_IMS }, m_IME{ x.m_IME }, m_JMS{ x.m_JMS }, m_JME{ x.m_JME }, m_KMS{ x.m_KMS }, m_KME{ x.m_KME }, m_ITS{ x.m_ITS }, m_ITE{ x.m_ITE }, m_JTS{ x.m_JTS }, m_JTE{ x.m_JTE }, m_KTS{ x.m_KTS }, m_KTE{ x.m_KTE }, m_ALLOWED_TO_READ{ x.m_ALLOWED_TO_READ }, m_RESTART{ x.m_RESTART }, m_LEVEL{ x.m_LEVEL }, m_RUBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_RVBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_RTHBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_RQVBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_RQCBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_RQIBLTEN{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_QKE{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_TKE_PBL{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) }, m_EXCH_H{ reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)), align32B)) } { // Check for memory allocation errors i.e. (malloc failures). for (int i{ 0 }; i != this->m_nArrays3D; ++i) { if ((&this->m_RUBLTEN)[i] == NULL) { std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in Copy Ctor: 'Wrap_Mynn_Bl_Init_Driver'!!\n"; std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n"; std::cerr << "***** ERROR-DETAILS ***** \n"; std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << (&this->m_RUBLTEN)[i] << "\n"; std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n"; std::exit(-1); } } // Copy caller's input arrays. // Using OpenMP for outermost loop // and vectorization for innermost loop. #if defined (USE_ICL_OPENMP) && \ OPENMP_CURR_VER >= 40 #pragma omp parallel for if((m_IME * m_KME * m_JME) >= (1 << 20)) for (int i = m_IMS; i != m_IME; ++i) { for (int k = m_KMS; k != m_KME; ++k) { #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #pragma unroll(UNROLL_4X) #endif for(int j = m_JMS; j != m_JME; ++j) { this->m_RUBLTEN[i + m_IME * k + m_KME * j] = x.m_RUBLTEN[i + x.m_IME * k + x.m_KME * j]; this->m_RVBLTEN[i + m_IME * k + m_KME * j] = x.m_RVBLTEN[i + x.m_IME * k + x.m_KME * j]; this->m_RTHBLTEN[i + m_IME * k + m_KME * j] = x.m_RTHBLTEN[i + x.m_IME * k + x.m_KME * j]; this->m_RQVBLTEN[i + m_IME * k + m_KME * j] = x.m_RQVBLTEN[i + x.m_IME * k + x.m_KME * j]; this->m_RQCBLTEN[i + m_IME * k + m_KME * j] = x.m_RQCBLTEN[i + x.m_IME * k + x.m_KME * j]; this->m_RQIBLTEN[i + m_IME * k + m_KME * j] = x.m_RQIBLTEN[i + x.m_IME * k + x.m_KME * j]; this->m_QKE[i + m_IME * k + m_KME * j] = x.m_QKE[i + x.m_IME * k + x.m_KME * j]; this->m_TKE_PBL[i + m_IME * k + m_KME * j] = x.m_TKE_PBL[i + x.m_IME * k + x.m_KME * j]; this->m_EXCH_H[i + m_IME * k + m_KME * j] = x.m_EXCH_H[i + x.m_IME * k + x.m_KME * j]; } } } #else // Use loop blocking. // Warning: You must not #undef 'USE_LOOP_BLOCKING' macro!! for (int i = m_IMS; i != m_IME; i += DEFAULT_BLOCK_SIZE) { for (int k = m_KMS; k != m_KME; k += DEFAULT_BLOCK_SIZE) { for (int j = m_JMS; j != m_JME; j += DEFAULT_BLOCK_SIZE) { for (int ii = i; ii < DEFAULT_BLOCK_SIZE; ++ii) { for (int kk = k; kk < DEFAULT_BLOCK_SIZE; ++kk) { #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #endif for (int jj = j; jj < DEFAULT_BLOCK_SIZE; ++jj) { this->m_RUBLTEN[ii + m_IME * kk + m_KME * jj] = x.m_RUBLTEN[ii + x.m_IME * kk + x.m_KME * jj]; this->m_RVBLTEN[ii + m_IME * kk + m_KME * jj] = x.m_RVBLTEN[ii + x.m_IME * kk + x.m_KME * jj]; this->m_RTHBLTEN[ii + m_IME * kk + m_KME * jj] = x.m_RTHBLTEN[ii + x.m_IME * kk + x.m_KME * jj]; this->m_RQVBLTEN[ii + m_IME * kk + m_KME * jj] = x.m_RQVBLTEN[ii + x.m_IME * kk + x.m_KME * jj]; this->m_RQCBLTEN[ii + m_IME * kk + m_KME * jj] = x.m_RQCBLTEN[ii + x.m_IME * kk + x.m_KME * jj]; this->m_RQIBLTEN[ii + m_IME * kk + m_KME * jj] = x.m_RQIBLTEN[ii + x.m_IME * kk + x.m_KME * jj]; this->m_QKE[ii + m_IME * kk + m_KME * jj] = x.m_QKE[ii + x.m_IME * kk + x.m_KME * jj]; this->m_TKE_PBL[ii + m_IME * kk + m_KME * jj] = x.m_TKE_PBL[ii + x.m_IME * kk + x.m_KME * jj]; this->m_EXCH_H[ii + m_IME * kk + m_KME * jj] = x.m_EXCH_H[ii + x.m_IME * kk + x.m_KME * jj]; } } } } } } #endif } /* @Purpose: Move Constructor implements shallow copy semantics. */ Wrap_Mynn_Bl_Init_Driver(_In_ Wrap_Mynn_Bl_Init_Driver &&x) : m_IDS{ x.m_IDS }, m_IDE{ x.m_IDE }, m_JDS{ x.m_JDS }, m_JDE{ x.m_JDE }, m_KDS{ x.m_KDS }, m_KDE{ x.m_KDE }, m_IMS{ x.m_IMS }, m_IME{ x.m_IME }, m_JMS{ x.m_JMS }, m_JME{ x.m_JME }, m_KMS{ x.m_KMS }, m_KME{ x.m_KME }, m_ITS{ x.m_ITS }, m_ITE{ x.m_ITE }, m_JTS{ x.m_JTS }, m_JTE{ x.m_JTE }, m_KTS{ x.m_KTS }, m_KTE{ x.m_KTE }, m_ALLOWED_TO_READ{ x.m_ALLOWED_TO_READ }, m_RESTART{ x.m_RESTART }, m_LEVEL{ x.m_LEVEL } { // Reassign x's pointers to *this. for (int i{ 0 }; i != this->m_nArrays3D; ++i) { if ((&x.m_RUBLTEN)[i]) { (&this->m_RUBLTEN)[i] = (&x.m_RUBLTEN)[i]; } } // Nullify x's pointers. for (int i{ 0 }; i != this->m_nArrays3D; ++i) { (&x.m_RUBLTEN)[i] = NULL; } x.m_IMS = 0; x.m_IME = 0; x.m_KMS = 0; x.m_KME = 0; x.m_JMS = 0; x.m_JME = 0; } /* @Purpose: Class Destructor. */ ~Wrap_Mynn_Bl_Init_Driver() { for (int i{ 0 }; i != this->m_nArrays3D; ++i) { if ((&this->m_RUBLTEN)[i]) { _mm_free((&this->m_RUBLTEN)[i]); } } for (int i{ 0 }; i != this->m_nArrays3D; ++i) { (&this->m_RUBLTEN)[i] = NULL; } this->m_IMS = 0; this->m_IME = 0; this->m_JMS = 0; this->m_JME = 0; this->m_KMS = 0; this->m_KME = 0; } /* @Purpose: Copy-assign Operator implements deep copy semantics. */ Wrap_Mynn_Bl_Init_Driver & operator=(_In_ const Wrap_Mynn_Bl_Init_Driver &x) { if (this == &x) return (*this); this->m_IDS = x.m_IDS; this->m_IDE = x.m_IDE; this->m_JDS = x.m_JDS; this->m_JDE = x.m_JDE; this->m_KDS = x.m_KDS; this->m_KDE = x.m_KDE; this->m_IMS = x.m_IMS; this->m_IME = x.m_IME; this->m_JMS = x.m_JMS; this->m_JME = x.m_JME; this->m_KMS = x.m_KMS; this->m_KME = x.m_KME; this->m_ITS = x.m_ITS; this->m_ITE = x.m_ITE; this->m_JTS = x.m_JTS; this->m_JTE = x.m_JTE; this->m_KTS = x.m_KTS; this->m_KTE = x.m_KTE; this->m_ALLOWED_TO_READ = x.m_ALLOWED_TO_READ; this->m_RESTART = x.m_RESTART; this->m_LEVEL = x.m_LEVEL; constexpr int ntPtrs3D = 9; R32 *tPtrs3D[ntPtrs3D] = {}; for (int i{ 0 }; i != ntPtrs3D; ++i) { tPtrs3D[i] = reinterpret_cast<R32*>(_mm_malloc((m_IME * m_KME * m_JME * sizeof(R32)),align32B)); } // Check for memory allocation errors. for (int i{ 0 }; i != m_nArrays3D; ++i) { if (tPtrs3D[i] == NULL) { std::cerr << "[" << __DATE__ << ":" << __TIME__ << "]" << "FATAL ERROR: Memory allocation failure in Copy Operator: 'Wrap_Mynn_Bl_Init_Driver'!!\n"; std::cerr << "at " << __FILE__ << ":" << __LINE__ << "(" << std::hex << "0x" << __FUNCTIONW__ << ")" << "\n"; std::cerr << "***** ERROR-DETAILS ***** \n"; std::cerr << "Checking allocation of temporary arrays 2D\.n"; std::cerr << "Failure detected at index: " << i << " heap address: " << std::hex << "0x" << tPtrs3D[i] << "\n"; std::cerr << "Cannot recover, hence on first failure occurrence --> calling exit(-1)!!\n"; } } // Copy caller's input arrays. // Using OpenMP for outermost loop // and vectorization for innermost loop. #if defined (USE_ICL_OPENMP) && \ OPENMP_CURR_VER >= 40 #pragma omp parallel for if((m_IME * m_KME * m_JME) >= (1 << 20)) for (int i = m_IMS; i != m_IME; ++i) { for(int k = m_KMS; k != m_KME; ++k) { #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #pragma unroll(UNROLL_4X) #endif for (int j = m_JMS; j != m_JME; ++j) { tPtrs3D[0][i + m_IME * k + m_KME * j] = x.m_RUBLTEN[i + m_IME * k + m_KME * j]; tPtrs3D[1][i + m_IME * k + m_KME * j] = x.m_RVBLTEN[i + m_IME * k + m_KME * j]; tPtrs3D[2][i + m_IME * k + m_KME * j] = x.m_RTHBLTEN[i + m_IME * k + m_KME * j]; tPtrs3D[3][i + m_IME * k + m_KME * j] = x.m_RQVBLTEN[i + m_IME * k + m_KME * j]; tPtrs3D[4][i + m_IME * k + m_KME * j] = x.m_RQCBLTEN[i + m_IME * k + m_KME * j]; tPtrs3D[5][i + m_IME * k + m_KME * j] = x.m_RQIBLTEN[i + m_IME * k + m_KME * j]; tPtrs3D[6][i + m_IME * k + m_KME * j] = x.m_QKE[i + m_IME * k + m_KME * j]; tPtrs3D[7][i + m_IME * k + m_KME * j] = x.m_TKE_PBL[i + m_IME * k + m_KME * j]; tPtrs3D[8][i + m_IME * k + m_KME * j] = x.m_EXCH_H[i + m_IME * k + m_KME * j]; } } } // Deallocate current context of *this. for(int i{0}; i != this->m_nArrays3D; ++i) { _mm_free((&this->m_RUBLTEN)[i]); } // Reassign temporay pointers to member pointers. this->m_RUBLTEN = tPtrs3D[0]; this->m_RVBLTEN = tPtrs3D[1]; this->m_RTHBLTEN = tPtrs3D[2]; this->m_RQVBLTEN = tPtrs3D[3]; this->m_RQCBLTEN = tPtrs3D[4]; this->m_RQIBLTEN = tPtrs3D[5]; this->m_QKE = tPtrs3D[6]; this->m_TKE_PBL = tPtrs3D[7]; this->m_EXCH_H = tPtrs3D[8]; return (*this); #else // Use loop blocking. // Warning: You must not #undef 'USE_LOOP_BLOCKING' macro!! for (int i = m_IMS; i != m_IME; i += DEFAULT_BLOCK_SIZE) { for (int k = m_KMS; k != m_KME; k += DEFAULT_BLOCK_SIZE) { for (int j = m_JMS; j != m_JME; j += DEFAULT_BLOCK_SIZE) { for (int ii = i; ii < DEFAULT_BLOCK_SIZE; ++ii) { for (int kk = k; kk < DEFAULT_BLOCK_SIZE; ++kk) { #if defined (USE_AUTO_VECTORIZATION) #pragma ivdep #pragma simd #endif for (int jj = j; jj < DEFAULT_BLOCK_SIZE; ++jj) { tPtrs3D[0][ii + m_IME * kk + m_KME * jj] = x.m_RUBLTEN[ii + m_IME * kk + m_KME * jj]; tPtrs3D[1][ii + m_IME * kk + m_KME * jj] = x.m_RVBLTEN[ii + m_IME * kk + m_KME * jj]; tPtrs3D[2][ii + m_IME * kk + m_KME * jj] = x.m_RTHBLTEN[ii + m_IME * kk + m_KME * jj]; tPtrs3D[3][ii + m_IME * kk + m_KME * jj] = x.m_RQVBLTEN[ii + m_IME * kk + m_KME * jj]; tPtrs3D[4][ii + m_IME * kk + m_KME * jj] = x.m_RQCBLTEN[ii + m_IME * kk + m_KME * jj]; tPtrs3D[5][ii + m_IME * kk + m_KME * jj] = x.m_RQIBLTEN[ii + m_IME * kk + m_KME * jj]; tPtrs3D[6][ii + m_IME * kk + m_KME * jj] = x.m_QKE[ii + m_IME * kk + m_KME * jj]; tPtrs3D[7][ii + m_IME * kk + m_KME * jj] = x.m_TKE_PBL[ii + m_IME * kk + m_KME * jj]; tPtrs3D[8][ii + m_IME * kk + m_KME * jj] = x.m_EXCH_H[ii + m_IME * kk + m_KME * jj]; } } } } } } // Deallocate current context of *this. for (int i{ 0 }; i != this->m_nArrays3D; ++i) { _mm_free((&this->m_RUBLTEN)[i]); } // Reassign temporay pointers to member pointers. this->m_RUBLTEN = tPtrs3D[0]; this->m_RVBLTEN = tPtrs3D[1]; this->m_RTHBLTEN = tPtrs3D[2]; this->m_RQVBLTEN = tPtrs3D[3]; this->m_RQCBLTEN = tPtrs3D[4]; this->m_RQIBLTEN = tPtrs3D[5]; this->m_QKE = tPtrs3D[6]; this->m_TKE_PBL = tPtrs3D[7]; this->m_EXCH_H = tPtrs3D[8]; return (*this); #endif } /* @Purpose: Move-assign Operator implements shallow copy semantics. */ Wrap_Mynn_Bl_Init_Driver & operator=(_In_ Wrap_Mynn_Bl_Init_Driver &&x) { if (this == &x) return (*this); this->m_IDS = x.m_IDS; this->m_IDE = x.m_IDE; this->m_JDS = x.m_JDS; this->m_JDE = x.m_JDE; this->m_KDS = x.m_KDS; this->m_KDE = x.m_KDE; this->m_IMS = x.m_IMS; this->m_IME = x.m_IME; this->m_JMS = x.m_JMS; this->m_JME = x.m_JME; this->m_KMS = x.m_KMS; this->m_KME = x.m_KME; this->m_ITS = x.m_ITS; this->m_ITE = x.m_ITE; this->m_JTS = x.m_JTS; this->m_JTE = x.m_JTE; this->m_KTS = x.m_KTS; this->m_KTE = x.m_KTE; this->m_ALLOWED_TO_READ = x.m_ALLOWED_TO_READ; this->m_RESTART = x.m_RESTART; this->m_LEVEL = x.m_LEVEL; // Deallocate current context. for (int i{ 0 }; i != this->m_nArrays3D; ++i) { if ((&this->m_RUBLTEN)[i]) { _mm_free((&this->m_RUBLTEN)[i]); } } // Reassign x's pointers to *this's pointers. for (int i{ 0 }; i != this->m_nArrays3D; ++i) { (&this->m_RUBLTEN)[i] = (x.m_RUBLTEN)[i]; } // Nullify x's pointers. for (int i{ 0 }; i != x.m_nArrays3D; ++i) { (x.m_RUBLTEN)[i] = NULL; } x.m_IMS = 0; x.m_IME = 0; x.m_KMS = 0; x.m_KME = 0; x.m_JMS = 0; x.m_JME = 0; return (*this); } /* @Purpose: Call Fortran 90 'MYNN_BL_INIT_DRIVER' subroutine. */ void Call_Mynn_Bl_Init_Driver() { MODULE_BL_MYNN_mp_MYNN_BL_INIT_DRIVER(&this->m_RUBLTEN[0], &this->m_RUBLTEN[0], &this->m_RTHBLTEN[0], &this->m_RQVBLTEN[0], &this->m_RQCBLTEN[0], &this->m_RQIBLTEN[0], &this->m_QKE[0], &this->m_TKE_PBL[0], &this->m_EXCH_H[0], &this->m_RESTART, &this->m_ALLOWED_TO_READ, &this->m_LEVEL, &this->m_IDS, &this->m_IDE, &this->m_JDS, &this->m_JDE, &this->m_KDS, &this->m_KDE, &this->m_IMS, &this->m_IME, &this->m_JMS, &this->m_JME, &this->m_KMS, &this->m_KME, &this->m_ITS, &this->m_ITE, &this->m_JTS, &this->m_JTE, &this->m_KTS, &this->m_KTE); } /* @Purpose: Member variables. */ // Memory and patch dimension variables. I32 m_IDS; I32 m_IDE; I32 m_JDS; I32 m_JDE; I32 m_KDS; I32 m_KDE; I32 m_IMS; I32 m_IME; I32 m_JMS; I32 m_JME; I32 m_KMS; I32 m_KME; I32 m_ITS; I32 m_ITE; I32 m_JTS; I32 m_JTE; I32 m_KTS; I32 m_KTE; I32 m_ALLOWED_TO_READ; I32 m_RESTART; I32 m_LEVEL; // Array variables. _Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_RUBLTEN; _Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_RVBLTEN; _Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_RTHBLTEN; _Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_RQVBLTEN; _Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_RQCBLTEN; _Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_RQIBLTEN; _Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_QKE; _Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_TKE_PBL; _Field_size_(m_IME * m_KME * m_JME) R32* __restrict m_EXCH_H; static const int m_nArrays3D = 9; }; } } #endif /*__MODULE_BL_MYNN_BL_INIT_DRIVER_IMPL_H__*/
convolution_sgemm_int8.h
// BUG1989 is pleased to support the open source community by supporting ncnn available. // // Copyright (C) 2019 BUG1989. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv_im2col_sgemm_int8_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char* kernel = _kernel; // im2row Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator); { signed char* ret = (signed char*)bottom_im2row; int retID = 0; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { for (int p = 0; p < inch; p++) { const signed char* input = bottom_blob.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // int M = outch; // outch int N = outw * outh; // outsize or out stride int K = kernel_w * kernel_h * inch; // ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t)1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_im2row.row<signed char>(i); const signed char* img1 = bottom_im2row.row<signed char>(i + 1); const signed char* img2 = bottom_im2row.row<signed char>(i + 2); const signed char* img3 = bottom_im2row.row<signed char>(i + 3); signed char* tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < out_size; i++) { const signed char* img0 = bottom_im2row.row<signed char>(i); signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } // kernel memory packed 4 x 4 Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t)1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char* k0 = kernel + (p + 0) * inch * kernel_size; const signed char* k1 = kernel + (p + 1) * inch * kernel_size; const signed char* k2 = kernel + (p + 2) * inch * kernel_size; const signed char* k3 = kernel + (p + 3) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch * kernel_size; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { const signed char* k0 = kernel + (p + 0) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } // 4x4 // sgemm(int M, int N, int K, float* A, float* B, float* C) { // int M = outch; // outch // int N = outw * outh; // outsize or out stride // int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 4; int* output0 = top_blob.channel(i); int* output1 = top_blob.channel(i + 1); int* output2 = top_blob.channel(i + 2); int* output3 = top_blob.channel(i + 3); int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[2 * n]; // k0 sum0[n] += (int)va[1] * vb[2 * n + 1]; sum1[n] += (int)va[2] * vb[2 * n]; // k1 sum1[n] += (int)va[3] * vb[2 * n + 1]; sum2[n] += (int)va[4] * vb[2 * n]; // k2 sum2[n] += (int)va[5] * vb[2 * n + 1]; sum3[n] += (int)va[6] * vb[2 * n]; // k3 sum3[n] += (int)va[7] * vb[2 * n + 1]; } va += 8; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = sum0[n]; output1[n] = sum1[n]; output2[n] = sum2[n]; output3[n] = sum3[n]; } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4); int k = 0; for (; k + 1 < K; k = k + 2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k < K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = sum0; output1[0] = sum1; output2[0] = sum2; output3[0] = sum3; output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_outch_start; i < outch; i++) { int* output = top_blob.channel(i); int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); int sum[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[2 * n]; sum[n] += (int)va[1] * vb[2 * n + 1]; } va += 2; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = sum[n]; } output += 4; } for (; j < N; j++) { int sum = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); for (int k = 0; k < K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = sum; output++; } } } // // sgemm(int M, int N, int K, float* A, float* B, float* C) // { // for (int i=0; i<M; i++) // { // int* output = top_blob.channel(i); // for (int j=0; j<N; j++) // { // int sum = 0; // signed char* vb = (signed char*)bottom_im2row + K * j; // const signed char* va = kernel + K * i; // for (int k=0; k<K; k++) // { // sum += (int)va[0] * vb[0]; // va += 1; // vb += 1; // } // output[0] = sum; // output++; // } // } // } } static void conv_im2col_sgemm_int8_dequant_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat& _bias, std::vector<float> scale_dequant, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char* kernel = _kernel; const float* bias = _bias; // im2row Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator); { signed char* ret = (signed char*)bottom_im2row; int retID = 0; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { for (int p = 0; p < inch; p++) { const signed char* input = bottom_blob.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // int M = outch; // outch int N = outw * outh; // outsize or out stride int K = kernel_w * kernel_h * inch; // ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t)1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_im2row.row<signed char>(i); const signed char* img1 = bottom_im2row.row<signed char>(i + 1); const signed char* img2 = bottom_im2row.row<signed char>(i + 2); const signed char* img3 = bottom_im2row.row<signed char>(i + 3); signed char* tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < out_size; i++) { const signed char* img0 = bottom_im2row.row<signed char>(i); signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } // kernel memory packed 4 x 4 Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t)1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char* k0 = kernel + (p + 0) * inch * kernel_size; const signed char* k1 = kernel + (p + 1) * inch * kernel_size; const signed char* k2 = kernel + (p + 2) * inch * kernel_size; const signed char* k3 = kernel + (p + 3) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch * kernel_size; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { const signed char* k0 = kernel + (p + 0) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } // 4x4 // sgemm(int M, int N, int K, float* A, float* B, float* C) { // int M = outch; // outch // int N = outw * outh; // outsize or out stride // int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 4; const float bias0 = bias ? bias[i] : 0.f; const float bias1 = bias ? bias[i + 1] : 0.f; const float bias2 = bias ? bias[i + 2] : 0.f; const float bias3 = bias ? bias[i + 3] : 0.f; const float scale_dequant0 = scale_dequant[i]; const float scale_dequant1 = scale_dequant[i + 1]; const float scale_dequant2 = scale_dequant[i + 2]; const float scale_dequant3 = scale_dequant[i + 3]; float* output0 = top_blob.channel(i); float* output1 = top_blob.channel(i + 1); float* output2 = top_blob.channel(i + 2); float* output3 = top_blob.channel(i + 3); int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[2 * n]; // k0 sum0[n] += (int)va[1] * vb[2 * n + 1]; sum1[n] += (int)va[2] * vb[2 * n]; // k1 sum1[n] += (int)va[3] * vb[2 * n + 1]; sum2[n] += (int)va[4] * vb[2 * n]; // k2 sum2[n] += (int)va[5] * vb[2 * n + 1]; sum3[n] += (int)va[6] * vb[2 * n]; // k3 sum3[n] += (int)va[7] * vb[2 * n + 1]; } va += 8; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = (float)sum0[n] * scale_dequant0 + bias0; output1[n] = (float)sum1[n] * scale_dequant1 + bias1; output2[n] = (float)sum2[n] * scale_dequant2 + bias2; output3[n] = (float)sum3[n] * scale_dequant3 + bias3; } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4); int k = 0; for (; k + 1 < K; k = k + 2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k < K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = (float)sum0 * scale_dequant0 + bias0; output1[0] = (float)sum1 * scale_dequant1 + bias1; output2[0] = (float)sum2 * scale_dequant2 + bias2; output3[0] = (float)sum3 * scale_dequant3 + bias3; output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_outch_start; i < outch; i++) { float* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; const float scale_dequant0 = scale_dequant[i]; int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); int sum[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[2 * n]; sum[n] += (int)va[1] * vb[2 * n + 1]; } va += 2; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = (float)sum[n] * scale_dequant0 + bias0; } output += 4; } for (; j < N; j++) { int sum = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); for (int k = 0; k < K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = (float)sum * scale_dequant0 + bias0; output++; } } } // // sgemm(int M, int N, int K, float* A, float* B, float* C) // { // for (int i=0; i<M; i++) // { // int* output = top_blob.channel(i); // for (int j=0; j<N; j++) // { // int sum = 0; // signed char* vb = (signed char*)bottom_im2row + K * j; // const signed char* va = kernel + K * i; // for (int k=0; k<K; k++) // { // sum += (int)va[0] * vb[0]; // va += 1; // vb += 1; // } // output[0] = sum; // output++; // } // } // } } static void conv_im2col_sgemm_int8_requant_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& _kernel, const int kernel_w, const int kernel_h, const int stride_w, const int stride_h, const Mat& _bias, std::vector<float> scale_requant, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const signed char* kernel = _kernel; const float* bias = _bias; // im2row Mat bottom_im2row(kernel_h * kernel_w * inch, outw * outh, 1UL, opt.workspace_allocator); { signed char* ret = (signed char*)bottom_im2row; int retID = 0; for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { for (int p = 0; p < inch; p++) { const signed char* input = bottom_blob.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { int row = u + i * stride_h; int col = v + j * stride_w; int index = row * w + col; ret[retID] = input[index]; retID++; } } } } } } int kernel_size = kernel_w * kernel_h; int out_size = outw * outh; // int M = outch; // outch int N = outw * outh; // outsize or out stride int K = kernel_w * kernel_h * inch; // ksize * inch // bottom_im2row memory packed 4 x 4 Mat bottom_tm(4 * kernel_size, inch, out_size / 4 + out_size % 4, (size_t)1u, opt.workspace_allocator); { int nn_size = out_size >> 2; int remain_size_start = nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = ii * 4; const signed char* img0 = bottom_im2row.row<signed char>(i); const signed char* img1 = bottom_im2row.row<signed char>(i + 1); const signed char* img2 = bottom_im2row.row<signed char>(i + 2); const signed char* img3 = bottom_im2row.row<signed char>(i + 3); signed char* tmpptr = bottom_tm.channel(i / 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr[2] = img1[0]; tmpptr[3] = img1[1]; tmpptr[4] = img2[0]; tmpptr[5] = img2[1]; tmpptr[6] = img3[0]; tmpptr[7] = img3[1]; tmpptr += 8; img0 += 2; img1 += 2; img2 += 2; img3 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr[1] = img1[0]; tmpptr[2] = img2[0]; tmpptr[3] = img3[0]; tmpptr += 4; img0 += 1; img1 += 1; img2 += 1; img3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < out_size; i++) { const signed char* img0 = bottom_im2row.row<signed char>(i); signed char* tmpptr = bottom_tm.channel(i / 4 + i % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { tmpptr[0] = img0[0]; tmpptr[1] = img0[1]; tmpptr += 2; img0 += 2; } for (; q < inch * kernel_size; q++) { tmpptr[0] = img0[0]; tmpptr += 1; img0 += 1; } } } // kernel memory packed 4 x 4 Mat kernel_tm(4 * kernel_size, inch, outch / 4 + outch % 4, (size_t)1u, opt.workspace_allocator); { int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 4; const signed char* k0 = kernel + (p + 0) * inch * kernel_size; const signed char* k1 = kernel + (p + 1) * inch * kernel_size; const signed char* k2 = kernel + (p + 2) * inch * kernel_size; const signed char* k3 = kernel + (p + 3) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4); int q = 0; for (; q + 1 < inch * kernel_size; q += 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp[2] = k1[0]; ktmp[3] = k1[1]; ktmp[4] = k2[0]; ktmp[5] = k2[1]; ktmp[6] = k3[0]; ktmp[7] = k3[1]; ktmp += 8; k0 += 2; k1 += 2; k2 += 2; k3 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp[1] = k1[0]; ktmp[2] = k2[0]; ktmp[3] = k3[0]; ktmp += 4; k0 += 1; k1 += 1; k2 += 1; k3 += 1; } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { const signed char* k0 = kernel + (p + 0) * inch * kernel_size; signed char* ktmp = kernel_tm.channel(p / 4 + p % 4); int q = 0; for (; q + 1 < inch * kernel_size; q = q + 2) { ktmp[0] = k0[0]; ktmp[1] = k0[1]; ktmp += 2; k0 += 2; } for (; q < inch * kernel_size; q++) { ktmp[0] = k0[0]; ktmp++; k0++; } } } // 4x4 // sgemm(int M, int N, int K, float* A, float* B, float* C) { // int M = outch; // outch // int N = outw * outh; // outsize or out stride // int L = kernel_w * kernel_h * inch; // ksize * inch int nn_outch = 0; int remain_outch_start = 0; nn_outch = outch >> 2; remain_outch_start = nn_outch << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int i = pp * 4; signed char* output0 = top_blob.channel(i); signed char* output1 = top_blob.channel(i + 1); signed char* output2 = top_blob.channel(i + 2); signed char* output3 = top_blob.channel(i + 3); const float bias0 = bias ? bias[i] : 0.f; const float bias1 = bias ? bias[i + 1] : 0.f; const float bias2 = bias ? bias[i + 2] : 0.f; const float bias3 = bias ? bias[i + 3] : 0.f; const float scale_requant_in0 = scale_requant[2 * i]; const float scale_requant_out0 = scale_requant[2 * i + 1]; const float scale_requant_in1 = scale_requant[2 * (i + 1)]; const float scale_requant_out1 = scale_requant[2 * (i + 1) + 1]; const float scale_requant_in2 = scale_requant[2 * (i + 2)]; const float scale_requant_out2 = scale_requant[2 * (i + 2) + 1]; const float scale_requant_in3 = scale_requant[2 * (i + 3)]; const float scale_requant_out3 = scale_requant[2 * (i + 3) + 1]; int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4); int sum0[4] = {0}; int sum1[4] = {0}; int sum2[4] = {0}; int sum3[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[2 * n]; // k0 sum0[n] += (int)va[1] * vb[2 * n + 1]; sum1[n] += (int)va[2] * vb[2 * n]; // k1 sum1[n] += (int)va[3] * vb[2 * n + 1]; sum2[n] += (int)va[4] * vb[2 * n]; // k2 sum2[n] += (int)va[5] * vb[2 * n + 1]; sum3[n] += (int)va[6] * vb[2 * n]; // k3 sum3[n] += (int)va[7] * vb[2 * n + 1]; } va += 8; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum0[n] += (int)va[0] * vb[n]; sum1[n] += (int)va[1] * vb[n]; sum2[n] += (int)va[2] * vb[n]; sum3[n] += (int)va[3] * vb[n]; } va += 4; vb += 4; } for (int n = 0; n < 4; n++) { output0[n] = float2int8(((float)sum0[n] * scale_requant_in0 + bias0) * scale_requant_out0); output1[n] = float2int8(((float)sum1[n] * scale_requant_in1 + bias1) * scale_requant_out1); output2[n] = float2int8(((float)sum2[n] * scale_requant_in2 + bias2) * scale_requant_out2); output3[n] = float2int8(((float)sum3[n] * scale_requant_in3 + bias3) * scale_requant_out3); } output0 += 4; output1 += 4; output2 += 4; output3 += 4; } for (; j < N; j++) { int sum0 = 0; int sum1 = 0; int sum2 = 0; int sum3 = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4); int k = 0; for (; k + 1 < K; k = k + 2) { sum0 += (int)va[0] * vb[0]; sum0 += (int)va[1] * vb[1]; sum1 += (int)va[2] * vb[0]; sum1 += (int)va[3] * vb[1]; sum2 += (int)va[4] * vb[0]; sum2 += (int)va[5] * vb[1]; sum3 += (int)va[6] * vb[0]; sum3 += (int)va[7] * vb[1]; va += 8; vb += 2; } for (; k < K; k++) { sum0 += (int)va[0] * vb[0]; sum1 += (int)va[1] * vb[0]; sum2 += (int)va[2] * vb[0]; sum3 += (int)va[3] * vb[0]; va += 4; vb += 1; } output0[0] = float2int8(((float)sum0 * scale_requant_in0 + bias0) * scale_requant_out0); output1[0] = float2int8(((float)sum1 * scale_requant_in1 + bias1) * scale_requant_out1); output2[0] = float2int8(((float)sum2 * scale_requant_in2 + bias2) * scale_requant_out2); output3[0] = float2int8(((float)sum3 * scale_requant_in3 + bias3) * scale_requant_out3); output0++; output1++; output2++; output3++; } } #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_outch_start; i < outch; i++) { signed char* output = top_blob.channel(i); const float bias0 = bias ? bias[i] : 0.f; const float scale_requant_in0 = scale_requant[2 * i]; const float scale_requant_out0 = scale_requant[2 * i + 1]; int j = 0; for (; j + 3 < N; j = j + 4) { signed char* vb = bottom_tm.channel(j / 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); int sum[4] = {0}; int k = 0; for (; k + 1 < K; k = k + 2) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[2 * n]; sum[n] += (int)va[1] * vb[2 * n + 1]; } va += 2; vb += 8; } for (; k < K; k++) { for (int n = 0; n < 4; n++) { sum[n] += (int)va[0] * vb[n]; } va += 1; vb += 4; } for (int n = 0; n < 4; n++) { output[n] = float2int8(((float)sum[n] * scale_requant_in0 + bias0) * scale_requant_out0); } output += 4; } for (; j < N; j++) { int sum = 0; signed char* vb = bottom_tm.channel(j / 4 + j % 4); signed char* va = kernel_tm.channel(i / 4 + i % 4); for (int k = 0; k < K; k++) { sum += (int)va[0] * vb[0]; va += 1; vb += 1; } output[0] = float2int8(((float)sum * scale_requant_in0 + bias0) * scale_requant_out0); output++; } } } // // sgemm(int M, int N, int K, float* A, float* B, float* C) // { // for (int i=0; i<M; i++) // { // int* output = top_blob.channel(i); // for (int j=0; j<N; j++) // { // int sum = 0; // signed char* vb = (signed char*)bottom_im2row + K * j; // const signed char* va = kernel + K * i; // for (int k=0; k<K; k++) // { // sum += (int)va[0] * vb[0]; // va += 1; // vb += 1; // } // output[0] = sum; // output++; // } // } // } }
cpu_utils.h
/* * Copyright (c) Facebook, Inc. and its affiliates. * Copyright (c) Intel Corporation. * All rights reserved. * This source code is licensed under the BSD-style license found in the * LICENSE file in the root directory of this source tree. */ #pragma once #include <omp.h> #include <cstdint> #include <utility> template <typename T> using Radix_Sort_Pair = std::pair<T, T>; // histogram size per thread const int RDX_HIST_SIZE = 256; template <typename T> Radix_Sort_Pair<T>* radix_sort_parallel( Radix_Sort_Pair<T>* inp_buf, Radix_Sort_Pair<T>* tmp_buf, int64_t elements_count, int64_t max_value) { int maxthreads = omp_get_max_threads(); alignas(64) int histogram[RDX_HIST_SIZE * maxthreads], histogram_ps[RDX_HIST_SIZE * maxthreads + 1]; if (max_value == 0) return inp_buf; int num_bits = sizeof(T) * 8 - __builtin_clz(max_value); unsigned int num_passes = (num_bits + 7) / 8; #pragma omp parallel { int tid = omp_get_thread_num(); int nthreads = omp_get_num_threads(); int* local_histogram = &histogram[RDX_HIST_SIZE * tid]; int* local_histogram_ps = &histogram_ps[RDX_HIST_SIZE * tid]; int elements_count_4 = elements_count / 4 * 4; Radix_Sort_Pair<T>* input = inp_buf; Radix_Sort_Pair<T>* output = tmp_buf; for (unsigned int pass = 0; pass < num_passes; pass++) { // Step 1: compute histogram for (int i = 0; i < RDX_HIST_SIZE; i++) local_histogram[i] = 0; #pragma omp for schedule(static) for (int64_t i = 0; i < elements_count_4; i += 4) { T val_1 = input[i].first; T val_2 = input[i + 1].first; T val_3 = input[i + 2].first; T val_4 = input[i + 3].first; local_histogram[(val_1 >> (pass * 8)) & 0xFF]++; local_histogram[(val_2 >> (pass * 8)) & 0xFF]++; local_histogram[(val_3 >> (pass * 8)) & 0xFF]++; local_histogram[(val_4 >> (pass * 8)) & 0xFF]++; } if (tid == (nthreads - 1)) { for (int64_t i = elements_count_4; i < elements_count; i++) { T val = input[i].first; local_histogram[(val >> (pass * 8)) & 0xFF]++; } } #pragma omp barrier // Step 2: prefix sum if (tid == 0) { int sum = 0, prev_sum = 0; for (int bins = 0; bins < RDX_HIST_SIZE; bins++) for (int t = 0; t < nthreads; t++) { sum += histogram[t * RDX_HIST_SIZE + bins]; histogram_ps[t * RDX_HIST_SIZE + bins] = prev_sum; prev_sum = sum; } histogram_ps[RDX_HIST_SIZE * nthreads] = prev_sum; if (prev_sum != elements_count) { } } #pragma omp barrier // Step 3: scatter #pragma omp for schedule(static) for (int64_t i = 0; i < elements_count_4; i += 4) { T val_1 = input[i].first; T val_2 = input[i + 1].first; T val_3 = input[i + 2].first; T val_4 = input[i + 3].first; T bin_1 = (val_1 >> (pass * 8)) & 0xFF; T bin_2 = (val_2 >> (pass * 8)) & 0xFF; T bin_3 = (val_3 >> (pass * 8)) & 0xFF; T bin_4 = (val_4 >> (pass * 8)) & 0xFF; int pos; pos = local_histogram_ps[bin_1]++; output[pos] = input[i]; pos = local_histogram_ps[bin_2]++; output[pos] = input[i + 1]; pos = local_histogram_ps[bin_3]++; output[pos] = input[i + 2]; pos = local_histogram_ps[bin_4]++; output[pos] = input[i + 3]; } if (tid == (nthreads - 1)) { for (int64_t i = elements_count_4; i < elements_count; i++) { T val = input[i].first; int pos = local_histogram_ps[(val >> (pass * 8)) & 0xFF]++; output[pos] = input[i]; } } Radix_Sort_Pair<T>* temp = input; input = output; output = temp; #pragma omp barrier } } return (num_passes % 2 == 0 ? inp_buf : tmp_buf); }
BallTracking.h
#pragma once //#include <opencv2/core/core.hpp> //#include <opencv2/core/optim.hpp> //#include <iostream> //#include <string> //#include "spdlog/spdlog.h" //#include <ConcurrentQueue.h> //#include <omp.h> //#include <boost/lockfree/queue.hpp> class RotModel : public cv::MinProblemSolver::Function { private: cv::Mat radialFlow; cv::Mat tangenFlow; uint nPoints; //num of points to fit by function float freq; //2PI/nPoints float ampRatio; //radialFlowAmplitude = ampRatio*tangenFlow; public: int mode; static const int MODE_TRACKING = 3; static const int MODE_CALIBRATION = 4; RotModel(int fit_mode = MODE_TRACKING) { mode = fit_mode; } double calc(const double* x)const { /* Calculates SSE between actual and fitted optical flow distributions x = [amplitude, phase, offset_tan] */ float res = 0.0; if (mode == MODE_TRACKING) { #pragma omp parallel for// reduction(+ : res) for (int i = 0; i < nPoints; i++) { float r = pow( radialFlow.at<float>(i) - (x[0] * sin(i*freq + x[1])), 2 ); r += pow( tangenFlow.at<float>(i) - (x[2] + x[0] * cos(i*freq + x[1])), 2 ); res += r; } } /* x = [amplitude_rad, amplitude_tan, offset_tan, phase ] */ else { #pragma omp parallel for// reduction(+ : res) for (int i = 0; i < nPoints; i++) { float r = pow( radialFlow.at<float>(0, i) - (x[0] * sin(i*freq + x[3])), 2 ); r += pow( tangenFlow.at<float>(0, i) - (x[2] + x[1] * cos(i*freq + x[3])), 2 ); res += r; } } return res; } int getDims() const { return mode; }; void setDataPoints(cv::Mat& radial, cv::Mat& tangential) { radialFlow = radial; tangenFlow = tangential; float ch = radial.channels(); nPoints = radialFlow.size[0]; freq = 3.14151926 * 2.0 / (float)nPoints; } }; struct BallTrackingParameters { cv::Point2f polarCenter = cv::Point2f(112, 70); float visibleBallRadius = 116; //px uint roiRhoMin = 40; //px uint roiRhoMax = 100; //px uint roiDownscaledWidth = 30; //px uint roiDownscaledRhoMin = 2; //px uint roiDownscaledRhoMax = 13; //px float calibrCXYrad = 100.31; //px/rad float calibrCXYtan = 76.85; //px/rad float calibrCZ = 20.63; //px/rad }; class BallTracking { private: cv::Mat prevFrame; cv::Mat prevFit; bool visualize; cv::Ptr<cv::DownhillSolver> pDhSolver; cv::Ptr<RotModel> pRotModel; public: BallTracking(bool enableVisualize, int mode); BallTrackingParameters parameters; cv::Mat debugPlot; cv::Mat update(const cv::Mat& frame); ~BallTracking(); };
GB_binop__first_int8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__first_int8) // A.*B function (eWiseMult): GB (_AemultB_01__first_int8) // A.*B function (eWiseMult): GB (_AemultB_02__first_int8) // A.*B function (eWiseMult): GB (_AemultB_03__first_int8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__first_int8) // A*D function (colscale): GB (_AxD__first_int8) // D*A function (rowscale): GB (_DxB__first_int8) // C+=B function (dense accum): GB (_Cdense_accumB__first_int8) // C+=b function (dense accum): GB (_Cdense_accumb__first_int8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__first_int8) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: int8_t // A type: int8_t // B,b type: int8_t // BinaryOp: cij = aij #define GB_ATYPE \ int8_t #define GB_BTYPE \ int8_t #define GB_CTYPE \ int8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int8_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ ; // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = x ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_FIRST || GxB_NO_INT8 || GxB_NO_FIRST_INT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__first_int8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__first_int8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__first_int8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type int8_t int8_t bwork = (*((int8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__first_int8) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__first_int8) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *restrict Cx = (int8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__first_int8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__first_int8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__first_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__first_int8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__first_int8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t *Cx = (int8_t *) Cx_output ; int8_t x = (*((int8_t *) x_input)) ; int8_t *Bx = (int8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; ; ; Cx [p] = x ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int8_t *Cx = (int8_t *) Cx_output ; int8_t *Ax = (int8_t *) Ax_input ; int8_t y = (*((int8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int8_t aij = GBX (Ax, p, false) ; Cx [p] = aij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = x ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t x = (*((const int8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int8_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int8_t y = (*((const int8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
distort.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD IIIII SSSSS TTTTT OOO RRRR TTTTT % % D D I SS T O O R R T % % D D I SSS T O O RRRR T % % D D I SS T O O R R T % % DDDD IIIII SSSSS T OOO R R T % % % % % % MagickCore Image Distortion Methods % % % % Software Design % % Cristy % % Anthony Thyssen % % June 2007 % % % % % % Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distort.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/list.h" #include "MagickCore/matrix.h" #include "MagickCore/matrix-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/shear.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" /* Numerous internal routines for image distortions. */ static inline void AffineArgsToCoefficients(double *affine) { /* map external sx,ry,rx,sy,tx,ty to internal c0,c2,c4,c1,c3,c5 */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=affine[1]; tmp[1]=affine[2]; tmp[2]=affine[3]; tmp[3]=affine[4]; affine[3]=tmp[0]; affine[1]=tmp[1]; affine[4]=tmp[2]; affine[2]=tmp[3]; } static inline void CoefficientsToAffineArgs(double *coeff) { /* map internal c0,c1,c2,c3,c4,c5 to external sx,ry,rx,sy,tx,ty */ double tmp[4]; /* note indexes 0 and 5 remain unchanged */ tmp[0]=coeff[3]; tmp[1]=coeff[1]; tmp[2]=coeff[4]; tmp[3]=coeff[2]; coeff[1]=tmp[0]; coeff[2]=tmp[1]; coeff[3]=tmp[2]; coeff[4]=tmp[3]; } static void InvertAffineCoefficients(const double *coeff,double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 50 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[1]*coeff[3]); inverse[0]=determinant*coeff[4]; inverse[1]=determinant*(-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[2]*coeff[4]); inverse[3]=determinant*(-coeff[3]); inverse[4]=determinant*coeff[0]; inverse[5]=determinant*(coeff[2]*coeff[3]-coeff[0]*coeff[5]); } static void InvertPerspectiveCoefficients(const double *coeff, double *inverse) { /* From "Digital Image Warping" by George Wolberg, page 53 */ double determinant; determinant=PerceptibleReciprocal(coeff[0]*coeff[4]-coeff[3]*coeff[1]); inverse[0]=determinant*(coeff[4]-coeff[7]*coeff[5]); inverse[1]=determinant*(coeff[7]*coeff[2]-coeff[1]); inverse[2]=determinant*(coeff[1]*coeff[5]-coeff[4]*coeff[2]); inverse[3]=determinant*(coeff[6]*coeff[5]-coeff[3]); inverse[4]=determinant*(coeff[0]-coeff[6]*coeff[2]); inverse[5]=determinant*(coeff[3]*coeff[2]-coeff[0]*coeff[5]); inverse[6]=determinant*(coeff[3]*coeff[7]-coeff[6]*coeff[4]); inverse[7]=determinant*(coeff[6]*coeff[1]-coeff[0]*coeff[7]); } /* * Polynomial Term Defining Functions * * Order must either be an integer, or 1.5 to produce * the 2 number_valuesal polynomial function... * affine 1 (3) u = c0 + c1*x + c2*y * bilinear 1.5 (4) u = '' + c3*x*y * quadratic 2 (6) u = '' + c4*x*x + c5*y*y * cubic 3 (10) u = '' + c6*x^3 + c7*x*x*y + c8*x*y*y + c9*y^3 * quartic 4 (15) u = '' + c10*x^4 + ... + c14*y^4 * quintic 5 (21) u = '' + c15*x^5 + ... + c20*y^5 * number in parenthesis minimum number of points needed. * Anything beyond quintic, has not been implemented until * a more automated way of determining terms is found. * Note the slight re-ordering of the terms for a quadratic polynomial * which is to allow the use of a bi-linear (order=1.5) polynomial. * All the later polynomials are ordered simply from x^N to y^N */ static size_t poly_number_terms(double order) { /* Return the number of terms for a 2d polynomial */ if ( order < 1 || order > 5 || ( order != floor(order) && (order-1.5) > MagickEpsilon) ) return 0; /* invalid polynomial order */ return((size_t) floor((order+1)*(order+2)/2)); } static double poly_basis_fn(ssize_t n, double x, double y) { /* Return the result for this polynomial term */ switch(n) { case 0: return( 1.0 ); /* constant */ case 1: return( x ); case 2: return( y ); /* affine order = 1 terms = 3 */ case 3: return( x*y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x*x ); case 5: return( y*y ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x*x ); case 7: return( x*x*y ); case 8: return( x*y*y ); case 9: return( y*y*y ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x*x ); case 11: return( x*x*x*y ); case 12: return( x*x*y*y ); case 13: return( x*y*y*y ); case 14: return( y*y*y*y ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x*x ); case 16: return( x*x*x*x*y ); case 17: return( x*x*x*y*y ); case 18: return( x*x*y*y*y ); case 19: return( x*y*y*y*y ); case 20: return( y*y*y*y*y ); /* quintic order = 5 terms = 21 */ } return( 0 ); /* should never happen */ } static const char *poly_basis_str(ssize_t n) { /* return the result for this polynomial term */ switch(n) { case 0: return(""); /* constant */ case 1: return("*ii"); case 2: return("*jj"); /* affine order = 1 terms = 3 */ case 3: return("*ii*jj"); /* bilinear order = 1.5 terms = 4 */ case 4: return("*ii*ii"); case 5: return("*jj*jj"); /* quadratic order = 2 terms = 6 */ case 6: return("*ii*ii*ii"); case 7: return("*ii*ii*jj"); case 8: return("*ii*jj*jj"); case 9: return("*jj*jj*jj"); /* cubic order = 3 terms = 10 */ case 10: return("*ii*ii*ii*ii"); case 11: return("*ii*ii*ii*jj"); case 12: return("*ii*ii*jj*jj"); case 13: return("*ii*jj*jj*jj"); case 14: return("*jj*jj*jj*jj"); /* quartic order = 4 terms = 15 */ case 15: return("*ii*ii*ii*ii*ii"); case 16: return("*ii*ii*ii*ii*jj"); case 17: return("*ii*ii*ii*jj*jj"); case 18: return("*ii*ii*jj*jj*jj"); case 19: return("*ii*jj*jj*jj*jj"); case 20: return("*jj*jj*jj*jj*jj"); /* quintic order = 5 terms = 21 */ } return( "UNKNOWN" ); /* should never happen */ } static double poly_basis_dx(ssize_t n, double x, double y) { /* polynomial term for x derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 1.0 ); case 2: return( 0.0 ); /* affine order = 1 terms = 3 */ case 3: return( y ); /* bilinear order = 1.5 terms = 4 */ case 4: return( x ); case 5: return( 0.0 ); /* quadratic order = 2 terms = 6 */ case 6: return( x*x ); case 7: return( x*y ); case 8: return( y*y ); case 9: return( 0.0 ); /* cubic order = 3 terms = 10 */ case 10: return( x*x*x ); case 11: return( x*x*y ); case 12: return( x*y*y ); case 13: return( y*y*y ); case 14: return( 0.0 ); /* quartic order = 4 terms = 15 */ case 15: return( x*x*x*x ); case 16: return( x*x*x*y ); case 17: return( x*x*y*y ); case 18: return( x*y*y*y ); case 19: return( y*y*y*y ); case 20: return( 0.0 ); /* quintic order = 5 terms = 21 */ } return( 0.0 ); /* should never happen */ } static double poly_basis_dy(ssize_t n, double x, double y) { /* polynomial term for y derivative */ switch(n) { case 0: return( 0.0 ); /* constant */ case 1: return( 0.0 ); case 2: return( 1.0 ); /* affine order = 1 terms = 3 */ case 3: return( x ); /* bilinear order = 1.5 terms = 4 */ case 4: return( 0.0 ); case 5: return( y ); /* quadratic order = 2 terms = 6 */ default: return( poly_basis_dx(n-1,x,y) ); /* weird but true */ } /* NOTE: the only reason that last is not true for 'quadratic' is due to the re-arrangement of terms to allow for 'bilinear' */ } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A f f i n e T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AffineTransformImage() transforms an image as dictated by the affine matrix. % It allocates the memory necessary for the new Image structure and returns % a pointer to the new image. % % The format of the AffineTransformImage method is: % % Image *AffineTransformImage(const Image *image, % AffineMatrix *affine_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o affine_matrix: the affine matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AffineTransformImage(const Image *image, const AffineMatrix *affine_matrix,ExceptionInfo *exception) { double distort[6]; Image *deskew_image; /* Affine transform image. */ assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(affine_matrix != (AffineMatrix *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); distort[0]=affine_matrix->sx; distort[1]=affine_matrix->rx; distort[2]=affine_matrix->ry; distort[3]=affine_matrix->sy; distort[4]=affine_matrix->tx; distort[5]=affine_matrix->ty; deskew_image=DistortImage(image,AffineProjectionDistortion,6,distort, MagickTrue,exception); return(deskew_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e n e r a t e C o e f f i c i e n t s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GenerateCoefficients() takes user provided input arguments and generates % the coefficients, needed to apply the specific distortion for either % distorting images (generally using control points) or generating a color % gradient from sparsely separated color points. % % The format of the GenerateCoefficients() method is: % % Image *GenerateCoefficients(const Image *image,DistortMethod method, % const size_t number_arguments,const double *arguments, % size_t number_values, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion/ sparse gradient % % o number_arguments: the number of arguments given. % % o arguments: the arguments for this distortion method. % % o number_values: the style and format of given control points, (caller type) % 0: 2 dimensional mapping of control points (Distort) % Format: u,v,x,y where u,v is the 'source' of the % the color to be plotted, for DistortImage() % N: Interpolation of control points with N values (usally r,g,b) % Format: x,y,r,g,b mapping x,y to color values r,g,b % IN future, variable number of values may be given (1 to N) % % o exception: return any errors or warnings in this structure % % Note that the returned array of double values must be freed by the % calling method using RelinquishMagickMemory(). This however may change in % the future to require a more 'method' specific method. % % Because of this this method should not be classed as stable or used % outside other MagickCore library methods. */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } static double *GenerateCoefficients(const Image *image, DistortMethod *method,const size_t number_arguments,const double *arguments, size_t number_values,ExceptionInfo *exception) { double *coeff; register size_t i; size_t number_coeff, /* number of coefficients to return (array size) */ cp_size, /* number floating point numbers per control point */ cp_x,cp_y, /* the x,y indexes for control point */ cp_values; /* index of values for this control point */ /* number_values Number of values given per control point */ if ( number_values == 0 ) { /* Image distortion using control points (or other distortion) That is generate a mapping so that x,y->u,v given u,v,x,y */ number_values = 2; /* special case: two values of u,v */ cp_values = 0; /* the values i,j are BEFORE the destination CP x,y */ cp_x = 2; /* location of x,y in input control values */ cp_y = 3; /* NOTE: cp_values, also used for later 'reverse map distort' tests */ } else { cp_x = 0; /* location of x,y in input control values */ cp_y = 1; cp_values = 2; /* and the other values are after x,y */ /* Typically in this case the values are R,G,B color values */ } cp_size = number_values+2; /* each CP defintion involves this many numbers */ /* If not enough control point pairs are found for specific distortions fall back to Affine distortion (allowing 0 to 3 point pairs) */ if ( number_arguments < 4*cp_size && ( *method == BilinearForwardDistortion || *method == BilinearReverseDistortion || *method == PerspectiveDistortion ) ) *method = AffineDistortion; number_coeff=0; switch (*method) { case AffineDistortion: /* also BarycentricColorInterpolate: */ number_coeff=3*number_values; break; case PolynomialDistortion: /* number of coefficents depend on the given polynomal 'order' */ i = poly_number_terms(arguments[0]); number_coeff = 2 + i*number_values; if ( i == 0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Polynomial", "Invalid order, should be interger 1 to 5, or 1.5"); return((double *) NULL); } if ( number_arguments < 1+i*cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Polynomial", (double) i); return((double *) NULL); } break; case BilinearReverseDistortion: number_coeff=4*number_values; break; /* The rest are constants as they are only used for image distorts */ case BilinearForwardDistortion: number_coeff=10; /* 2*4 coeff plus 2 constants */ cp_x = 0; /* Reverse src/dest coords for forward mapping */ cp_y = 1; cp_values = 2; break; #if 0 case QuadraterialDistortion: number_coeff=19; /* BilinearForward + BilinearReverse */ #endif break; case ShepardsDistortion: number_coeff=1; /* The power factor to use */ break; case ArcDistortion: number_coeff=5; break; case ScaleRotateTranslateDistortion: case AffineProjectionDistortion: case Plane2CylinderDistortion: case Cylinder2PlaneDistortion: number_coeff=6; break; case PolarDistortion: case DePolarDistortion: number_coeff=8; break; case PerspectiveDistortion: case PerspectiveProjectionDistortion: number_coeff=9; break; case BarrelDistortion: case BarrelInverseDistortion: number_coeff=10; break; default: perror("unknown method given"); /* just fail assertion */ } /* allocate the array of coefficients needed */ coeff = (double *) AcquireQuantumMemory(number_coeff,sizeof(*coeff)); if (coeff == (double *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "GenerateCoefficients"); return((double *) NULL); } /* zero out coefficients array */ for (i=0; i < number_coeff; i++) coeff[i] = 0.0; switch (*method) { case AffineDistortion: { /* Affine Distortion v = c0*x + c1*y + c2 for each 'value' given Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", "Affine", 1.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* handle special cases of not enough arguments */ if ( number_arguments == cp_size ) { /* Only 1 CP Set Given */ if ( cp_values == 0 ) { /* image distortion - translate the image */ coeff[0] = 1.0; coeff[2] = arguments[0] - arguments[2]; coeff[4] = 1.0; coeff[5] = arguments[1] - arguments[3]; } else { /* sparse gradient - use the values directly */ for (i=0; i<number_values; i++) coeff[i*3+2] = arguments[cp_values+i]; } } else { /* 2 or more points (usally 3) given. Solve a least squares simultaneous equation for coefficients. */ double **matrix, **vectors, terms[3]; MagickBooleanType status; /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(3UL,3UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*3]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),3UL,number_values); } if ( number_arguments == 2*cp_size ) { /* Only two pairs were given, but we need 3 to solve the affine. Fake extra coordinates by rotating p1 around p0 by 90 degrees. x2 = x0 - (y1-y0) y2 = y0 + (x1-x0) */ terms[0] = arguments[cp_x] - ( arguments[cp_size+cp_y] - arguments[cp_y] ); /* x2 */ terms[1] = arguments[cp_y] + + ( arguments[cp_size+cp_x] - arguments[cp_x] ); /* y2 */ terms[2] = 1; /* 1 */ if ( cp_values == 0 ) { /* Image Distortion - rotate the u,v coordients too */ double uv2[2]; uv2[0] = arguments[0] - arguments[5] + arguments[1]; /* u2 */ uv2[1] = arguments[1] + arguments[4] - arguments[0]; /* v2 */ LeastSquaresAddTerms(matrix,vectors,terms,uv2,3UL,2UL); } else { /* Sparse Gradient - use values of p0 for linear gradient */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[cp_values]),3UL,number_values); } } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,3UL,number_values); matrix = RelinquishMagickMatrix(matrix, 3UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } } return(coeff); } case AffineProjectionDistortion: { /* Arguments: Affine Matrix (forward mapping) Arguments sx, rx, ry, sy, tx, ty Where u = sx*x + ry*y + tx v = rx*x + sy*y + ty Returns coefficients (in there inverse form) ordered as... sx ry tx rx sy ty AffineProjection Distortion Notes... + Will only work with a 2 number_values for Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ double inverse[8]; if (number_arguments != 6) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs 6 coeff values'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* FUTURE: trap test for sx*sy-rx*ry == 0 (determinant = 0, no inverse) */ for(i=0; i<6UL; i++ ) inverse[i] = arguments[i]; AffineArgsToCoefficients(inverse); /* map into coefficents */ InvertAffineCoefficients(inverse, coeff); /* invert */ *method = AffineDistortion; return(coeff); } case ScaleRotateTranslateDistortion: { /* Scale, Rotate and Translate Distortion An alternative Affine Distortion Argument options, by number of arguments given: 7: x,y, sx,sy, a, nx,ny 6: x,y, s, a, nx,ny 5: x,y, sx,sy, a 4: x,y, s, a 3: x,y, a 2: s, a 1: a Where actions are (in order of application) x,y 'center' of transforms (default = image center) sx,sy scale image by this amount (default = 1) a angle of rotation (argument required) nx,ny move 'center' here (default = x,y or no movement) And convert to affine mapping coefficients ScaleRotateTranslate Distortion Notes... + Does not use a set of CPs in any normal way + Will only work with a 2 number_valuesal Image Distortion + Cannot be used for generating a sparse gradient (interpolation) */ double cosine, sine, x,y,sx,sy,a,nx,ny; /* set default center, and default scale */ x = nx = (double)(image->columns)/2.0 + (double)image->page.x; y = ny = (double)(image->rows)/2.0 + (double)image->page.y; sx = sy = 1.0; switch ( number_arguments ) { case 0: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Needs at least 1 argument'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); case 1: a = arguments[0]; break; case 2: sx = sy = arguments[0]; a = arguments[1]; break; default: x = nx = arguments[0]; y = ny = arguments[1]; switch ( number_arguments ) { case 3: a = arguments[2]; break; case 4: sx = sy = arguments[2]; a = arguments[3]; break; case 5: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; break; case 6: sx = sy = arguments[2]; a = arguments[3]; nx = arguments[4]; ny = arguments[5]; break; case 7: sx = arguments[2]; sy = arguments[3]; a = arguments[4]; nx = arguments[5]; ny = arguments[6]; break; default: coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Too Many Arguments (7 or less)'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } break; } /* Trap if sx or sy == 0 -- image is scaled out of existance! */ if ( fabs(sx) < MagickEpsilon || fabs(sy) < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Zero Scale Given'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Save the given arguments as an affine distortion */ a=DegreesToRadians(a); cosine=cos(a); sine=sin(a); *method = AffineDistortion; coeff[0]=cosine/sx; coeff[1]=sine/sx; coeff[2]=x-nx*coeff[0]-ny*coeff[1]; coeff[3]=(-sine)/sy; coeff[4]=cosine/sy; coeff[5]=y-nx*coeff[3]-ny*coeff[4]; return(coeff); } case PerspectiveDistortion: { /* Perspective Distortion (a ratio of affine distortions) p(x,y) c0*x + c1*y + c2 u = ------ = ------------------ r(x,y) c6*x + c7*y + 1 q(x,y) c3*x + c4*y + c5 v = ------ = ------------------ r(x,y) c6*x + c7*y + 1 c8 = Sign of 'r', or the denominator affine, for the actual image. This determines what part of the distorted image is 'ground' side of the horizon, the other part is 'sky' or invalid. Valid values are +1.0 or -1.0 only. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... Perspective Distortion Notes... + Can be thought of as ratio of 3 affine transformations + Not separatable: r() or c6 and c7 are used by both equations + All 8 coefficients must be determined simultaniously + Will only work with a 2 number_valuesal Image Distortion + Can not be used for generating a sparse gradient (interpolation) + It is not linear, but is simple to generate an inverse + All lines within an image remain lines. + but distances between points may vary. */ double **matrix, *vectors[1], terms[8]; size_t cp_u = cp_values, cp_v = cp_values+1; MagickBooleanType status; if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* fake 1x8 vectors matrix directly using the coefficients array */ vectors[0] = &(coeff[0]); /* 8x8 least-squares matrix (zeroed) */ matrix = AcquireMagickMatrix(8UL,8UL); if (matrix == (double **) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* Add control points for least squares solving */ for (i=0; i < number_arguments; i+=4) { terms[0]=arguments[i+cp_x]; /* c0*x */ terms[1]=arguments[i+cp_y]; /* c1*y */ terms[2]=1.0; /* c2*1 */ terms[3]=0.0; terms[4]=0.0; terms[5]=0.0; terms[6]=-terms[0]*arguments[i+cp_u]; /* 1/(c6*x) */ terms[7]=-terms[1]*arguments[i+cp_u]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_u]), 8UL,1UL); terms[0]=0.0; terms[1]=0.0; terms[2]=0.0; terms[3]=arguments[i+cp_x]; /* c3*x */ terms[4]=arguments[i+cp_y]; /* c4*y */ terms[5]=1.0; /* c5*1 */ terms[6]=-terms[3]*arguments[i+cp_v]; /* 1/(c6*x) */ terms[7]=-terms[4]*arguments[i+cp_v]; /* 1/(c7*y) */ LeastSquaresAddTerms(matrix,vectors,terms,&(arguments[i+cp_v]), 8UL,1UL); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,8UL,1UL); matrix = RelinquishMagickMatrix(matrix, 8UL); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image coordinate (first control point) in destination for determination of what part of view is 'ground'. */ coeff[8] = coeff[6]*arguments[cp_x] + coeff[7]*arguments[cp_y] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; return(coeff); } case PerspectiveProjectionDistortion: { /* Arguments: Perspective Coefficents (forward mapping) */ if (number_arguments != 8) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'Needs 8 coefficient values'", CommandOptionToMnemonic(MagickDistortOptions, *method)); return((double *) NULL); } /* FUTURE: trap test c0*c4-c3*c1 == 0 (determinate = 0, no inverse) */ InvertPerspectiveCoefficients(arguments, coeff); /* Calculate 9'th coefficient! The ground-sky determination. What is sign of the 'ground' in r() denominator affine function? Just use any valid image cocodinate in destination for determination. For a forward mapped perspective the images 0,0 coord will map to c2,c5 in the distorted image, so set the sign of denominator of that. */ coeff[8] = coeff[6]*arguments[2] + coeff[7]*arguments[5] + 1.0; coeff[8] = (coeff[8] < 0.0) ? -1.0 : +1.0; *method = PerspectiveDistortion; return(coeff); } case BilinearForwardDistortion: case BilinearReverseDistortion: { /* Bilinear Distortion (Forward mapping) v = c0*x + c1*y + c2*x*y + c3; for each 'value' given This is actually a simple polynomial Distortion! The difference however is when we need to reverse the above equation to generate a BilinearForwardDistortion (see below). Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ double **matrix, **vectors, terms[4]; MagickBooleanType status; /* check the number of arguments */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size*4 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'require at least %.20g CPs'", CommandOptionToMnemonic(MagickDistortOptions, *method), 4.0); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* create matrix, and a fake vectors matrix */ matrix = AcquireMagickMatrix(4UL,4UL); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); if (matrix == (double **) NULL || vectors == (double **) NULL) { matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x4 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[i*4]); /* Add given control point pairs for least squares solving */ for (i=0; i < number_arguments; i+=cp_size) { terms[0] = arguments[i+cp_x]; /* x */ terms[1] = arguments[i+cp_y]; /* y */ terms[2] = terms[0]*terms[1]; /* x*y */ terms[3] = 1; /* 1 */ LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),4UL,number_values); } /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,4UL,number_values); matrix = RelinquishMagickMatrix(matrix, 4UL); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( *method == BilinearForwardDistortion ) { /* Bilinear Forward Mapped Distortion The above least-squares solved for coefficents but in the forward direction, due to changes to indexing constants. i = c0*x + c1*y + c2*x*y + c3; j = c4*x + c5*y + c6*x*y + c7; where i,j are in the destination image, NOT the source. Reverse Pixel mapping however needs to use reverse of these functions. It required a full page of algbra to work out the reversed mapping formula, but resolves down to the following... c8 = c0*c5-c1*c4; c9 = 2*(c2*c5-c1*c6); // '2*a' in the quadratic formula i = i - c3; j = j - c7; b = c6*i - c2*j + c8; // So that a*y^2 + b*y + c == 0 c = c4*i - c0*j; // y = ( -b +- sqrt(bb - 4ac) ) / (2*a) r = b*b - c9*(c+c); if ( c9 != 0 ) y = ( -b + sqrt(r) ) / c9; else y = -c/b; x = ( i - c1*y) / ( c1 - c2*y ); NB: if 'r' is negative there is no solution! NB: the sign of the sqrt() should be negative if image becomes flipped or flopped, or crosses over itself. NB: techniqually coefficient c5 is not needed, anymore, but kept for completness. See Anthony Thyssen <[email protected]> or Fred Weinhaus <[email protected]> for more details. */ coeff[8] = coeff[0]*coeff[5] - coeff[1]*coeff[4]; coeff[9] = 2*(coeff[2]*coeff[5] - coeff[1]*coeff[6]); } return(coeff); } #if 0 case QuadrilateralDistortion: { /* Map a Quadrilateral to a unit square using BilinearReverse Then map that unit square back to the final Quadrilateral using BilinearForward. Input Arguments are sets of control points... For Distort Images u,v, x,y ... For Sparse Gradients x,y, r,g,b ... */ /* UNDER CONSTRUCTION */ return(coeff); } #endif case PolynomialDistortion: { /* Polynomial Distortion First two coefficents are used to hole global polynomal information c0 = Order of the polynimial being created c1 = number_of_terms in one polynomial equation Rest of the coefficients map to the equations.... v = c0 + c1*x + c2*y + c3*x*y + c4*x^2 + c5*y^2 + c6*x^3 + ... for each 'value' (number_values of them) given. As such total coefficients = 2 + number_terms * number_values Input Arguments are sets of control points... For Distort Images order [u,v, x,y] ... For Sparse Gradients order [x,y, r,g,b] ... Polynomial Distortion Notes... + UNDER DEVELOPMENT -- Do not expect this to remain as is. + Currently polynomial is a reversed mapped distortion. + Order 1.5 is fudged to map into a bilinear distortion. though it is not the same order as that distortion. */ double **matrix, **vectors, *terms; size_t nterms; /* number of polynomial terms per number_values */ register ssize_t j; MagickBooleanType status; /* first two coefficients hold polynomial order information */ coeff[0] = arguments[0]; coeff[1] = (double) poly_number_terms(arguments[0]); nterms = (size_t) coeff[1]; /* create matrix, a fake vectors matrix, and least sqs terms */ matrix = AcquireMagickMatrix(nterms,nterms); vectors = (double **) AcquireQuantumMemory(number_values,sizeof(*vectors)); terms = (double *) AcquireQuantumMemory(nterms, sizeof(*terms)); if (matrix == (double **) NULL || vectors == (double **) NULL || terms == (double *) NULL ) { matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); terms = (double *) RelinquishMagickMemory(terms); coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((double *) NULL); } /* fake a number_values x3 vectors matrix from coefficients array */ for (i=0; i < number_values; i++) vectors[i] = &(coeff[2+i*nterms]); /* Add given control point pairs for least squares solving */ for (i=1; i < number_arguments; i+=cp_size) { /* NB: start = 1 not 0 */ for (j=0; j < (ssize_t) nterms; j++) terms[j] = poly_basis_fn(j,arguments[i+cp_x],arguments[i+cp_y]); LeastSquaresAddTerms(matrix,vectors,terms, &(arguments[i+cp_values]),nterms,number_values); } terms = (double *) RelinquishMagickMemory(terms); /* Solve for LeastSquares Coefficients */ status=GaussJordanElimination(matrix,vectors,nterms,number_values); matrix = RelinquishMagickMatrix(matrix, nterms); vectors = (double **) RelinquishMagickMemory(vectors); if ( status == MagickFalse ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Unsolvable Matrix'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } return(coeff); } case ArcDistortion: { /* Arc Distortion Args: arc_width rotate top_edge_radius bottom_edge_radius All but first argument are optional arc_width The angle over which to arc the image side-to-side rotate Angle to rotate image from vertical center top_radius Set top edge of source image at this radius bottom_radius Set bootom edge to this radius (radial scaling) By default, if the radii arguments are nor provided the image radius is calculated so the horizontal center-line is fits the given arc without scaling. The output image size is ALWAYS adjusted to contain the whole image, and an offset is given to position image relative to the 0,0 point of the origin, allowing users to use relative positioning onto larger background (via -flatten). The arguments are converted to these coefficients c0: angle for center of source image c1: angle scale for mapping to source image c2: radius for top of source image c3: radius scale for mapping source image c4: centerline of arc within source image Note the coefficients use a center angle, so asymptotic join is furthest from both sides of the source image. This also means that for arc angles greater than 360 the sides of the image will be trimmed equally. Arc Distortion Notes... + Does not use a set of CPs + Will only work with Image Distortion + Can not be used for generating a sparse gradient (interpolation) */ if ( number_arguments >= 1 && arguments[0] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Arc Angle Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } if ( number_arguments >= 3 && arguments[2] < MagickEpsilon ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : 'Outer Radius Too Small'", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } coeff[0] = -MagickPI2; /* -90, place at top! */ if ( number_arguments >= 1 ) coeff[1] = DegreesToRadians(arguments[0]); else coeff[1] = MagickPI2; /* zero arguments - center is at top */ if ( number_arguments >= 2 ) coeff[0] += DegreesToRadians(arguments[1]); coeff[0] /= Magick2PI; /* normalize radians */ coeff[0] -= MagickRound(coeff[0]); coeff[0] *= Magick2PI; /* de-normalize back to radians */ coeff[3] = (double)image->rows-1; coeff[2] = (double)image->columns/coeff[1] + coeff[3]/2.0; if ( number_arguments >= 3 ) { if ( number_arguments >= 4 ) coeff[3] = arguments[2] - arguments[3]; else coeff[3] *= arguments[2]/coeff[2]; coeff[2] = arguments[2]; } coeff[4] = ((double)image->columns-1.0)/2.0; return(coeff); } case PolarDistortion: case DePolarDistortion: { /* (De)Polar Distortion (same set of arguments) Args: Rmax, Rmin, Xcenter,Ycenter, Afrom,Ato DePolar can also have the extra arguments of Width, Height Coefficients 0 to 5 is the sanatized version first 6 input args Coefficient 6 is the angle to coord ratio and visa-versa Coefficient 7 is the radius to coord ratio and visa-versa WARNING: It is possible for Radius max<min and/or Angle from>to */ if ( number_arguments == 3 || ( number_arguments > 6 && *method == PolarDistortion ) || number_arguments > 8 ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* Rmax - if 0 calculate appropriate value */ if ( number_arguments >= 1 ) coeff[0] = arguments[0]; else coeff[0] = 0.0; /* Rmin - usally 0 */ coeff[1] = number_arguments >= 2 ? arguments[1] : 0.0; /* Center X,Y */ if ( number_arguments >= 4 ) { coeff[2] = arguments[2]; coeff[3] = arguments[3]; } else { /* center of actual image */ coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; } /* Angle from,to - about polar center 0 is downward */ coeff[4] = -MagickPI; if ( number_arguments >= 5 ) coeff[4] = DegreesToRadians(arguments[4]); coeff[5] = coeff[4]; if ( number_arguments >= 6 ) coeff[5] = DegreesToRadians(arguments[5]); if ( fabs(coeff[4]-coeff[5]) < MagickEpsilon ) coeff[5] += Magick2PI; /* same angle is a full circle */ /* if radius 0 or negative, its a special value... */ if ( coeff[0] < MagickEpsilon ) { /* Use closest edge if radius == 0 */ if ( fabs(coeff[0]) < MagickEpsilon ) { coeff[0]=MagickMin(fabs(coeff[2]-image->page.x), fabs(coeff[3]-image->page.y)); coeff[0]=MagickMin(coeff[0], fabs(coeff[2]-image->page.x-image->columns)); coeff[0]=MagickMin(coeff[0], fabs(coeff[3]-image->page.y-image->rows)); } /* furthest diagonal if radius == -1 */ if ( fabs(-1.0-coeff[0]) < MagickEpsilon ) { double rx,ry; rx = coeff[2]-image->page.x; ry = coeff[3]-image->page.y; coeff[0] = rx*rx+ry*ry; ry = coeff[3]-image->page.y-image->rows; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); rx = coeff[2]-image->page.x-image->columns; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); ry = coeff[3]-image->page.y; coeff[0] = MagickMax(coeff[0],rx*rx+ry*ry); coeff[0] = sqrt(coeff[0]); } } /* IF Rmax <= 0 or Rmin < 0 OR Rmax < Rmin, THEN error */ if ( coeff[0] < MagickEpsilon || coeff[1] < -MagickEpsilon || (coeff[0]-coeff[1]) < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid Radius", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* converstion ratios */ if ( *method == PolarDistortion ) { coeff[6]=(double) image->columns/(coeff[5]-coeff[4]); coeff[7]=(double) image->rows/(coeff[0]-coeff[1]); } else { /* *method == DePolarDistortion */ coeff[6]=(coeff[5]-coeff[4])/image->columns; coeff[7]=(coeff[0]-coeff[1])/image->rows; } return(coeff); } case Cylinder2PlaneDistortion: case Plane2CylinderDistortion: { /* 3D Cylinder to/from a Tangential Plane Projection between a clinder and flat plain from a point on the center line of the cylinder. The two surfaces coincide in 3D space at the given centers of distortion (perpendicular to projection point) on both images. Args: FOV_arc_width Coefficents: FOV(radians), Radius, center_x,y, dest_center_x,y FOV (Field Of View) the angular field of view of the distortion, across the width of the image, in degrees. The centers are the points of least distortion in the input and resulting images. These centers are however determined later. Coeff 0 is the FOV angle of view of image width in radians Coeff 1 is calculated radius of cylinder. Coeff 2,3 center of distortion of input image Coefficents 4,5 Center of Distortion of dest (determined later) */ if ( arguments[0] < MagickEpsilon || arguments[0] > 160.0 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : Invalid FOV Angle", CommandOptionToMnemonic(MagickDistortOptions, *method) ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } coeff[0] = DegreesToRadians(arguments[0]); if ( *method == Cylinder2PlaneDistortion ) /* image is curved around cylinder, so FOV angle (in radians) * scales directly to image X coordinate, according to its radius. */ coeff[1] = (double) image->columns/coeff[0]; else /* radius is distance away from an image with this angular FOV */ coeff[1] = (double) image->columns / ( 2 * tan(coeff[0]/2) ); coeff[2] = (double)(image->columns)/2.0+image->page.x; coeff[3] = (double)(image->rows)/2.0+image->page.y; coeff[4] = coeff[2]; coeff[5] = coeff[3]; /* assuming image size is the same */ return(coeff); } case BarrelDistortion: case BarrelInverseDistortion: { /* Barrel Distortion Rs=(A*Rd^3 + B*Rd^2 + C*Rd + D)*Rd BarrelInv Distortion Rs=Rd/(A*Rd^3 + B*Rd^2 + C*Rd + D) Where Rd is the normalized radius from corner to middle of image Input Arguments are one of the following forms (number of arguments)... 3: A,B,C 4: A,B,C,D 5: A,B,C X,Y 6: A,B,C,D X,Y 8: Ax,Bx,Cx,Dx Ay,By,Cy,Dy 10: Ax,Bx,Cx,Dx Ay,By,Cy,Dy X,Y Returns 10 coefficent values, which are de-normalized (pixel scale) Ax, Bx, Cx, Dx, Ay, By, Cy, Dy, Xc, Yc */ /* Radius de-normalization scaling factor */ double rscale = 2.0/MagickMin((double) image->columns,(double) image->rows); /* sanity check number of args must = 3,4,5,6,8,10 or error */ if ( (number_arguments < 3) || (number_arguments == 7) || (number_arguments == 9) || (number_arguments > 10) ) { coeff=(double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument", "%s : number of arguments", CommandOptionToMnemonic(MagickDistortOptions, *method) ); return((double *) NULL); } /* A,B,C,D coefficients */ coeff[0] = arguments[0]; coeff[1] = arguments[1]; coeff[2] = arguments[2]; if ((number_arguments == 3) || (number_arguments == 5) ) coeff[3] = 1.0 - coeff[0] - coeff[1] - coeff[2]; else coeff[3] = arguments[3]; /* de-normalize the coefficients */ coeff[0] *= pow(rscale,3.0); coeff[1] *= rscale*rscale; coeff[2] *= rscale; /* Y coefficients: as given OR same as X coefficients */ if ( number_arguments >= 8 ) { coeff[4] = arguments[4] * pow(rscale,3.0); coeff[5] = arguments[5] * rscale*rscale; coeff[6] = arguments[6] * rscale; coeff[7] = arguments[7]; } else { coeff[4] = coeff[0]; coeff[5] = coeff[1]; coeff[6] = coeff[2]; coeff[7] = coeff[3]; } /* X,Y Center of Distortion (image coodinates) */ if ( number_arguments == 5 ) { coeff[8] = arguments[3]; coeff[9] = arguments[4]; } else if ( number_arguments == 6 ) { coeff[8] = arguments[4]; coeff[9] = arguments[5]; } else if ( number_arguments == 10 ) { coeff[8] = arguments[8]; coeff[9] = arguments[9]; } else { /* center of the image provided (image coodinates) */ coeff[8] = (double)image->columns/2.0 + image->page.x; coeff[9] = (double)image->rows/2.0 + image->page.y; } return(coeff); } case ShepardsDistortion: { /* Shepards Distortion input arguments are the coefficents! Just check the number of arguments is valid! Args: u1,v1, x1,y1, ... OR : u1,v1, r1,g1,c1, ... */ if ( number_arguments%cp_size != 0 || number_arguments < cp_size ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument", "%s : 'requires CP's (4 numbers each)'", CommandOptionToMnemonic(MagickDistortOptions, *method)); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } /* User defined weighting power for Shepard's Method */ { const char *artifact=GetImageArtifact(image,"shepards:power"); if ( artifact != (const char *) NULL ) { coeff[0]=StringToDouble(artifact,(char **) NULL) / 2.0; if ( coeff[0] < MagickEpsilon ) { (void) ThrowMagickException(exception,GetMagickModule(), OptionError,"InvalidArgument","%s", "-define shepards:power" ); coeff=(double *) RelinquishMagickMemory(coeff); return((double *) NULL); } } else coeff[0]=1.0; /* Default power of 2 (Inverse Squared) */ } return(coeff); } default: break; } /* you should never reach this point */ perror("no method handler"); /* just fail assertion */ return((double *) NULL); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s t o r t R e s i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortResizeImage() resize image using the equivalent but slower image % distortion operator. The filter is applied using a EWA cylindrical % resampling. But like resize the final image size is limited to whole pixels % with no effects by virtual-pixels on the result. % % Note that images containing a transparency channel will be twice as slow to % resize as images one without transparency. % % The format of the DistortResizeImage method is: % % Image *DistortResizeImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the resized image. % % o rows: the number of rows in the resized image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *DistortResizeImage(const Image *image, const size_t columns,const size_t rows,ExceptionInfo *exception) { #define DistortResizeImageTag "Distort/Image" Image *resize_image, *tmp_image; RectangleInfo crop_area; double distort_args[12]; VirtualPixelMethod vp_save; /* Distort resize image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((columns == 0) || (rows == 0)) return((Image *) NULL); /* Do not short-circuit this resize if final image size is unchanged */ (void) ResetMagickMemory(distort_args,0,12*sizeof(double)); distort_args[4]=(double) image->columns; distort_args[6]=(double) columns; distort_args[9]=(double) image->rows; distort_args[11]=(double) rows; vp_save=GetImageVirtualPixelMethod(image); tmp_image=CloneImage(image,0,0,MagickTrue,exception); if ( tmp_image == (Image *) NULL ) return((Image *) NULL); (void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod, exception); if (image->alpha_trait == UndefinedPixelTrait) { /* Image has not transparency channel, so we free to use it */ (void) SetImageAlphaChannel(tmp_image,SetAlphaChannel,exception); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if ( resize_image == (Image *) NULL ) return((Image *) NULL); (void) SetImageAlphaChannel(resize_image,DeactivateAlphaChannel, exception); } else { /* Image has transparency so handle colors and alpha separatly. Basically we need to separate Virtual-Pixel alpha in the resized image, so only the actual original images alpha channel is used. distort alpha channel separately */ Image *resize_alpha; (void) SetImageAlphaChannel(tmp_image,ExtractAlphaChannel,exception); (void) SetImageAlphaChannel(tmp_image,OpaqueAlphaChannel,exception); resize_alpha=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if (resize_alpha == (Image *) NULL) return((Image *) NULL); /* distort the actual image containing alpha + VP alpha */ tmp_image=CloneImage(image,0,0,MagickTrue,exception); if ( tmp_image == (Image *) NULL ) return((Image *) NULL); (void) SetImageVirtualPixelMethod(tmp_image,TransparentVirtualPixelMethod, exception); resize_image=DistortImage(tmp_image,AffineDistortion,12,distort_args, MagickTrue,exception), tmp_image=DestroyImage(tmp_image); if ( resize_image == (Image *) NULL) { resize_alpha=DestroyImage(resize_alpha); return((Image *) NULL); } /* replace resize images alpha with the separally distorted alpha */ (void) SetImageAlphaChannel(resize_image,OffAlphaChannel,exception); (void) SetImageAlphaChannel(resize_alpha,OffAlphaChannel,exception); (void) CompositeImage(resize_image,resize_alpha,CopyAlphaCompositeOp, MagickTrue,0,0,exception); resize_alpha=DestroyImage(resize_alpha); } (void) SetImageVirtualPixelMethod(resize_image,vp_save,exception); /* Clean up the results of the Distortion */ crop_area.width=columns; crop_area.height=rows; crop_area.x=0; crop_area.y=0; tmp_image=resize_image; resize_image=CropImage(tmp_image,&crop_area,exception); tmp_image=DestroyImage(tmp_image); if (resize_image != (Image *) NULL) { resize_image->alpha_trait=image->alpha_trait; resize_image->compose=image->compose; resize_image->page.width=0; resize_image->page.height=0; } return(resize_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D i s t o r t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DistortImage() distorts an image using various distortion methods, by % mapping color lookups of the source image to a new destination image % usally of the same size as the source image, unless 'bestfit' is set to % true. % % If 'bestfit' is enabled, and distortion allows it, the destination image is % adjusted to ensure the whole source 'image' will just fit within the final % destination image, which will be sized and offset accordingly. Also in % many cases the virtual offset of the source image will be taken into % account in the mapping. % % If the '-verbose' control option has been set print to standard error the % equicelent '-fx' formula with coefficients for the function, if practical. % % The format of the DistortImage() method is: % % Image *DistortImage(const Image *image,const DistortMethod method, % const size_t number_arguments,const double *arguments, % MagickBooleanType bestfit, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be distorted. % % o method: the method of image distortion. % % ArcDistortion always ignores source image offset, and always % 'bestfit' the destination image with the top left corner offset % relative to the polar mapping center. % % Affine, Perspective, and Bilinear, do least squares fitting of the % distrotion when more than the minimum number of control point pairs % are provided. % % Perspective, and Bilinear, fall back to a Affine distortion when less % than 4 control point pairs are provided. While Affine distortions % let you use any number of control point pairs, that is Zero pairs is % a No-Op (viewport only) distortion, one pair is a translation and % two pairs of control points do a scale-rotate-translate, without any % shearing. % % o number_arguments: the number of arguments given. % % o arguments: an array of floating point arguments for this method. % % o bestfit: Attempt to 'bestfit' the size of the resulting image. % This also forces the resulting image to be a 'layered' virtual % canvas image. Can be overridden using 'distort:viewport' setting. % % o exception: return any errors or warnings in this structure % % Extra Controls from Image meta-data (artifacts)... % % o "verbose" % Output to stderr alternatives, internal coefficents, and FX % equivalents for the distortion operation (if feasible). % This forms an extra check of the distortion method, and allows users % access to the internal constants IM calculates for the distortion. % % o "distort:viewport" % Directly set the output image canvas area and offest to use for the % resulting image, rather than use the original images canvas, or a % calculated 'bestfit' canvas. % % o "distort:scale" % Scale the size of the output canvas by this amount to provide a % method of Zooming, and for super-sampling the results. % % Other settings that can effect results include % % o 'interpolate' For source image lookups (scale enlargements) % % o 'filter' Set filter to use for area-resampling (scale shrinking). % Set to 'point' to turn off and use 'interpolate' lookup % instead % */ MagickExport Image *DistortImage(const Image *image, DistortMethod method, const size_t number_arguments,const double *arguments, MagickBooleanType bestfit,ExceptionInfo *exception) { #define DistortImageTag "Distort/Image" double *coeff, output_scaling; Image *distort_image; RectangleInfo geometry; /* geometry of the distorted space viewport */ MagickBooleanType viewport_given; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Handle Special Compound Distortions */ if ( method == ResizeDistortion ) { if ( number_arguments != 2 ) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","Resize", "Invalid number of args: 2 only"); return((Image *) NULL); } distort_image=DistortResizeImage(image,(size_t)arguments[0], (size_t)arguments[1], exception); return(distort_image); } /* Convert input arguments (usually as control points for reverse mapping) into mapping coefficients to apply the distortion. Note that some distortions are mapped to other distortions, and as such do not require specific code after this point. */ coeff = GenerateCoefficients(image, &method, number_arguments, arguments, 0, exception); if ( coeff == (double *) NULL ) return((Image *) NULL); /* Determine the size and offset for a 'bestfit' destination. Usally the four corners of the source image is enough. */ /* default output image bounds, when no 'bestfit' is requested */ geometry.width=image->columns; geometry.height=image->rows; geometry.x=0; geometry.y=0; if ( method == ArcDistortion ) { bestfit = MagickTrue; /* always calculate a 'best fit' viewport */ } /* Work out the 'best fit', (required for ArcDistortion) */ if ( bestfit ) { PointInfo s,d,min,max; /* source, dest coords --mapping--> min, max coords */ MagickBooleanType fix_bounds = MagickTrue; /* enlarge bounds for VP handling */ s.x=s.y=min.x=max.x=min.y=max.y=0.0; /* keep compiler happy */ /* defines to figure out the bounds of the distorted image */ #define InitalBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = max.x = p.x; \ min.y = max.y = p.y; \ } #define ExpandBounds(p) \ { \ /* printf("%lg,%lg -> %lg,%lg\n", s.x,s.y, d.x,d.y); */ \ min.x = MagickMin(min.x,p.x); \ max.x = MagickMax(max.x,p.x); \ min.y = MagickMin(min.y,p.y); \ max.y = MagickMax(max.y,p.y); \ } switch (method) { case AffineDistortion: { double inverse[6]; InvertAffineCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; d.x = inverse[0]*s.x+inverse[1]*s.y+inverse[2]; d.y = inverse[3]*s.x+inverse[4]*s.y+inverse[5]; ExpandBounds(d); break; } case PerspectiveDistortion: { double inverse[8], scale; InvertPerspectiveCoefficients(coeff, inverse); s.x = (double) image->page.x; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); InitalBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); s.x = (double) image->page.x+image->columns; s.y = (double) image->page.y+image->rows; scale=inverse[6]*s.x+inverse[7]*s.y+1.0; scale=PerceptibleReciprocal(scale); d.x = scale*(inverse[0]*s.x+inverse[1]*s.y+inverse[2]); d.y = scale*(inverse[3]*s.x+inverse[4]*s.y+inverse[5]); ExpandBounds(d); break; } case ArcDistortion: { double a, ca, sa; /* Forward Map Corners */ a = coeff[0]-coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; InitalBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); a = coeff[0]+coeff[1]/2; ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); d.x = (coeff[2]-coeff[3])*ca; d.y = (coeff[2]-coeff[3])*sa; ExpandBounds(d); /* Orthogonal points along top of arc */ for( a=(double) (ceil((double) ((coeff[0]-coeff[1]/2.0)/MagickPI2))*MagickPI2); a<(coeff[0]+coeff[1]/2.0); a+=MagickPI2 ) { ca = cos(a); sa = sin(a); d.x = coeff[2]*ca; d.y = coeff[2]*sa; ExpandBounds(d); } /* Convert the angle_to_width and radius_to_height to appropriate scaling factors, to allow faster processing in the mapping function. */ coeff[1] = (double) (Magick2PI*image->columns/coeff[1]); coeff[3] = (double)image->rows/coeff[3]; break; } case PolarDistortion: { if (number_arguments < 2) coeff[2] = coeff[3] = 0.0; min.x = coeff[2]-coeff[0]; max.x = coeff[2]+coeff[0]; min.y = coeff[3]-coeff[0]; max.y = coeff[3]+coeff[0]; /* should be about 1.0 if Rmin = 0 */ coeff[7]=(double) geometry.height/(coeff[0]-coeff[1]); break; } case DePolarDistortion: { /* direct calculation as it needs to tile correctly * for reversibility in a DePolar-Polar cycle */ fix_bounds = MagickFalse; geometry.x = geometry.y = 0; geometry.height = (size_t) ceil(coeff[0]-coeff[1]); geometry.width = (size_t) ceil((coeff[0]-coeff[1])*(coeff[5]-coeff[4])*0.5); /* correct scaling factors relative to new size */ coeff[6]=(coeff[5]-coeff[4])/geometry.width; /* changed width */ coeff[7]=(coeff[0]-coeff[1])/geometry.height; /* should be about 1.0 */ break; } case Cylinder2PlaneDistortion: { /* direct calculation so center of distortion is either a pixel * center, or pixel edge. This allows for reversibility of the * distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil( 2.0*coeff[1]*tan(coeff[0]/2.0) ); geometry.height = (size_t) ceil( 2.0*coeff[3]/cos(coeff[0]/2.0) ); /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case Plane2CylinderDistortion: { /* direct calculation center is either pixel center, or pixel edge * so as to allow reversibility of the image distortion */ geometry.x = geometry.y = 0; geometry.width = (size_t) ceil(coeff[0]*coeff[1]); /* FOV * radius */ geometry.height = (size_t) (2*coeff[3]); /* input image height */ /* correct center of distortion relative to new size */ coeff[4] = (double) geometry.width/2.0; coeff[5] = (double) geometry.height/2.0; fix_bounds = MagickFalse; break; } case ShepardsDistortion: case BilinearForwardDistortion: case BilinearReverseDistortion: #if 0 case QuadrilateralDistortion: #endif case PolynomialDistortion: case BarrelDistortion: case BarrelInverseDistortion: default: /* no calculated bestfit available for these distortions */ bestfit = MagickFalse; fix_bounds = MagickFalse; break; } /* Set the output image geometry to calculated 'bestfit'. Yes this tends to 'over do' the file image size, ON PURPOSE! Do not do this for DePolar which needs to be exact for virtual tiling. */ if ( fix_bounds ) { geometry.x = (ssize_t) floor(min.x-0.5); geometry.y = (ssize_t) floor(min.y-0.5); geometry.width=(size_t) ceil(max.x-geometry.x+0.5); geometry.height=(size_t) ceil(max.y-geometry.y+0.5); } } /* end bestfit destination image calculations */ /* The user provided a 'viewport' expert option which may overrides some parts of the current output image geometry. This also overrides its default 'bestfit' setting. */ { const char *artifact=GetImageArtifact(image,"distort:viewport"); viewport_given = MagickFalse; if ( artifact != (const char *) NULL ) { MagickStatusType flags=ParseAbsoluteGeometry(artifact,&geometry); if (flags==NoValue) (void) ThrowMagickException(exception,GetMagickModule(), OptionWarning,"InvalidSetting","'%s' '%s'", "distort:viewport",artifact); else viewport_given = MagickTrue; } } /* Verbose output */ if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) { register ssize_t i; char image_gen[MagickPathExtent]; const char *lookup; /* Set destination image size and virtual offset */ if ( bestfit || viewport_given ) { (void) FormatLocaleString(image_gen, MagickPathExtent," -size %.20gx%.20g " "-page %+.20g%+.20g xc: +insert \\\n",(double) geometry.width, (double) geometry.height,(double) geometry.x,(double) geometry.y); lookup="v.p{ xx-v.page.x-.5, yy-v.page.y-.5 }"; } else { image_gen[0] = '\0'; /* no destination to generate */ lookup = "p{ xx-page.x-.5, yy-page.y-.5 }"; /* simplify lookup */ } switch (method) { case AffineDistortion: { double *inverse; inverse = (double *) AcquireQuantumMemory(6,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortImages"); return((Image *) NULL); } InvertAffineCoefficients(coeff, inverse); CoefficientsToAffineArgs(inverse); (void) FormatLocaleFile(stderr, "Affine Projection:\n"); (void) FormatLocaleFile(stderr, " -distort AffineProjection \\\n '"); for (i=0; i < 5; i++) (void) FormatLocaleFile(stderr, "%lf,", inverse[i]); (void) FormatLocaleFile(stderr, "%lf'\n", inverse[5]); inverse = (double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr, "Affine Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf;\n", coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf;\n", coeff[3], coeff[4], coeff[5]); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; } case PerspectiveDistortion: { double *inverse; inverse = (double *) AcquireQuantumMemory(8,sizeof(*inverse)); if (inverse == (double *) NULL) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed", "%s", "DistortCoefficients"); return((Image *) NULL); } InvertPerspectiveCoefficients(coeff, inverse); (void) FormatLocaleFile(stderr, "Perspective Projection:\n"); (void) FormatLocaleFile(stderr, " -distort PerspectiveProjection \\\n '"); for (i=0; i<4; i++) (void) FormatLocaleFile(stderr, "%lf, ", inverse[i]); (void) FormatLocaleFile(stderr, "\n "); for (; i<7; i++) (void) FormatLocaleFile(stderr, "%lf, ", inverse[i]); (void) FormatLocaleFile(stderr, "%lf'\n", inverse[7]); inverse = (double *) RelinquishMagickMemory(inverse); (void) FormatLocaleFile(stderr, "Perspective Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " rr=%+lf*ii %+lf*jj + 1;\n", coeff[6], coeff[7]); (void) FormatLocaleFile(stderr, " xx=(%+lf*ii %+lf*jj %+lf)/rr;\n", coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " yy=(%+lf*ii %+lf*jj %+lf)/rr;\n", coeff[3], coeff[4], coeff[5]); (void) FormatLocaleFile(stderr, " rr%s0 ? %s : blue' \\\n", coeff[8] < 0 ? "<" : ">", lookup); break; } case BilinearForwardDistortion: (void) FormatLocaleFile(stderr, "BilinearForward Mapping Equations:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " i = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[0], coeff[1], coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " j = %+lf*x %+lf*y %+lf*x*y %+lf;\n", coeff[4], coeff[5], coeff[6], coeff[7]); #if 0 /* for debugging */ (void) FormatLocaleFile(stderr, " c8 = %+lf c9 = 2*a = %+lf;\n", coeff[8], coeff[9]); #endif (void) FormatLocaleFile(stderr, "BilinearForward Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n", 0.5-coeff[3], 0.5-coeff[7]); (void) FormatLocaleFile(stderr, " bb=%lf*ii %+lf*jj %+lf;\n", coeff[6], -coeff[2], coeff[8]); /* Handle Special degenerate (non-quadratic) or trapezoidal case */ if ( coeff[9] != 0 ) { (void) FormatLocaleFile(stderr, " rt=bb*bb %+lf*(%lf*ii%+lf*jj);\n", -2*coeff[9], coeff[4], -coeff[0]); (void) FormatLocaleFile(stderr, " yy=( -bb + sqrt(rt) ) / %lf;\n", coeff[9]); } else (void) FormatLocaleFile(stderr, " yy=(%lf*ii%+lf*jj)/bb;\n", -coeff[4], coeff[0]); (void) FormatLocaleFile(stderr, " xx=(ii %+lf*yy)/(%lf %+lf*yy);\n", -coeff[1], coeff[0], coeff[2]); if ( coeff[9] != 0 ) (void) FormatLocaleFile(stderr, " (rt < 0 ) ? red : %s'\n", lookup); else (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; case BilinearReverseDistortion: #if 0 (void) FormatLocaleFile(stderr, "Polynomial Projection Distort:\n"); (void) FormatLocaleFile(stderr, " -distort PolynomialProjection \\\n"); (void) FormatLocaleFile(stderr, " '1.5, %lf, %lf, %lf, %lf,\n", coeff[3], coeff[0], coeff[1], coeff[2]); (void) FormatLocaleFile(stderr, " %lf, %lf, %lf, %lf'\n", coeff[7], coeff[4], coeff[5], coeff[6]); #endif (void) FormatLocaleFile(stderr, "BilinearReverse Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n", coeff[0], coeff[1], coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " yy=%+lf*ii %+lf*jj %+lf*ii*jj %+lf;\n", coeff[4], coeff[5], coeff[6], coeff[7]); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; case PolynomialDistortion: { size_t nterms = (size_t) coeff[1]; (void) FormatLocaleFile(stderr, "Polynomial (order %lg, terms %lu), FX Equivelent\n", coeff[0],(unsigned long) nterms); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x+0.5; jj=j+page.y+0.5;\n"); (void) FormatLocaleFile(stderr, " xx ="); for (i=0; i<(ssize_t) nterms; i++) { if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n "); (void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i], poly_basis_str(i)); } (void) FormatLocaleFile(stderr, ";\n yy ="); for (i=0; i<(ssize_t) nterms; i++) { if ( i != 0 && i%4 == 0 ) (void) FormatLocaleFile(stderr, "\n "); (void) FormatLocaleFile(stderr, " %+lf%s", coeff[2+i+nterms], poly_basis_str(i)); } (void) FormatLocaleFile(stderr, ";\n %s' \\\n", lookup); break; } case ArcDistortion: { (void) FormatLocaleFile(stderr, "Arc Distort, Internal Coefficients:\n"); for ( i=0; i<5; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "Arc Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x; jj=j+page.y;\n"); (void) FormatLocaleFile(stderr, " xx=(atan2(jj,ii)%+lf)/(2*pi);\n", -coeff[0]); (void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr, " xx=xx*%lf %+lf;\n", coeff[1], coeff[4]); (void) FormatLocaleFile(stderr, " yy=(%lf - hypot(ii,jj)) * %lf;\n", coeff[2], coeff[3]); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case PolarDistortion: { (void) FormatLocaleFile(stderr, "Polar Distort, Internal Coefficents\n"); for ( i=0; i<8; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "Polar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf; jj=j+page.y%+lf;\n", -coeff[2], -coeff[3]); (void) FormatLocaleFile(stderr, " xx=(atan2(ii,jj)%+lf)/(2*pi);\n", -(coeff[4]+coeff[5])/2 ); (void) FormatLocaleFile(stderr, " xx=xx-round(xx);\n"); (void) FormatLocaleFile(stderr, " xx=xx*2*pi*%lf + v.w/2;\n", coeff[6] ); (void) FormatLocaleFile(stderr, " yy=(hypot(ii,jj)%+lf)*%lf;\n", -coeff[1], coeff[7] ); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case DePolarDistortion: { (void) FormatLocaleFile(stderr, "DePolar Distort, Internal Coefficents\n"); for ( i=0; i<8; i++ ) (void) FormatLocaleFile(stderr, " c%.20g = %+lf\n", (double) i, coeff[i]); (void) FormatLocaleFile(stderr, "DePolar Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'aa=(i+.5)*%lf %+lf;\n", coeff[6], +coeff[4] ); (void) FormatLocaleFile(stderr, " rr=(j+.5)*%lf %+lf;\n", coeff[7], +coeff[1] ); (void) FormatLocaleFile(stderr, " xx=rr*sin(aa) %+lf;\n", coeff[2] ); (void) FormatLocaleFile(stderr, " yy=rr*cos(aa) %+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " v.p{xx-.5,yy-.5}' \\\n"); break; } case Cylinder2PlaneDistortion: { (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]); (void) FormatLocaleFile(stderr, "Cylinder to Plane Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n", -coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr, " aa=atan(ii/%+lf);\n", coeff[1] ); (void) FormatLocaleFile(stderr, " xx=%lf*aa%+lf;\n", coeff[1], coeff[2] ); (void) FormatLocaleFile(stderr, " yy=jj*cos(aa)%+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; } case Plane2CylinderDistortion: { (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, Internal Coefficents\n"); (void) FormatLocaleFile(stderr, " cylinder_radius = %+lf\n", coeff[1]); (void) FormatLocaleFile(stderr, "Plane to Cylinder Distort, FX Equivelent:\n"); (void) FormatLocaleFile(stderr, "%s", image_gen); (void) FormatLocaleFile(stderr, " -fx 'ii=i+page.x%+lf+0.5; jj=j+page.y%+lf+0.5;\n", -coeff[4], -coeff[5]); (void) FormatLocaleFile(stderr, " ii=ii/%+lf;\n", coeff[1] ); (void) FormatLocaleFile(stderr, " xx=%lf*tan(ii)%+lf;\n", coeff[1], coeff[2] ); (void) FormatLocaleFile(stderr, " yy=jj/cos(ii)%+lf;\n", coeff[3] ); (void) FormatLocaleFile(stderr, " %s' \\\n", lookup); break; } case BarrelDistortion: case BarrelInverseDistortion: { double xc,yc; /* NOTE: This does the barrel roll in pixel coords not image coords ** The internal distortion must do it in image coordinates, ** so that is what the center coeff (8,9) is given in. */ xc = ((double)image->columns-1.0)/2.0 + image->page.x; yc = ((double)image->rows-1.0)/2.0 + image->page.y; (void) FormatLocaleFile(stderr, "Barrel%s Distort, FX Equivelent:\n", method == BarrelDistortion ? "" : "Inv"); (void) FormatLocaleFile(stderr, "%s", image_gen); if ( fabs(coeff[8]-xc-0.5) < 0.1 && fabs(coeff[9]-yc-0.5) < 0.1 ) (void) FormatLocaleFile(stderr, " -fx 'xc=(w-1)/2; yc=(h-1)/2;\n"); else (void) FormatLocaleFile(stderr, " -fx 'xc=%lf; yc=%lf;\n", coeff[8]-0.5, coeff[9]-0.5); (void) FormatLocaleFile(stderr, " ii=i-xc; jj=j-yc; rr=hypot(ii,jj);\n"); (void) FormatLocaleFile(stderr, " ii=ii%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/", coeff[0],coeff[1],coeff[2],coeff[3]); (void) FormatLocaleFile(stderr, " jj=jj%s(%lf*rr*rr*rr %+lf*rr*rr %+lf*rr %+lf);\n", method == BarrelDistortion ? "*" : "/", coeff[4],coeff[5],coeff[6],coeff[7]); (void) FormatLocaleFile(stderr, " v.p{fx*ii+xc,fy*jj+yc}' \\\n"); } default: break; } } /* The user provided a 'scale' expert option will scale the output image size, by the factor given allowing for super-sampling of the distorted image space. Any scaling factors must naturally be halved as a result. */ { const char *artifact; artifact=GetImageArtifact(image,"distort:scale"); output_scaling = 1.0; if (artifact != (const char *) NULL) { output_scaling = fabs(StringToDouble(artifact,(char **) NULL)); geometry.width=(size_t) (output_scaling*geometry.width+0.5); geometry.height=(size_t) (output_scaling*geometry.height+0.5); geometry.x=(ssize_t) (output_scaling*geometry.x+0.5); geometry.y=(ssize_t) (output_scaling*geometry.y+0.5); if ( output_scaling < 0.1 ) { coeff = (double *) RelinquishMagickMemory(coeff); (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s", "-set option:distort:scale" ); return((Image *) NULL); } output_scaling = 1/output_scaling; } } #define ScaleFilter(F,A,B,C,D) \ ScaleResampleFilter( (F), \ output_scaling*(A), output_scaling*(B), \ output_scaling*(C), output_scaling*(D) ) /* Initialize the distort image attributes. */ distort_image=CloneImage(image,geometry.width,geometry.height,MagickTrue, exception); if (distort_image == (Image *) NULL) return((Image *) NULL); /* if image is ColorMapped - change it to DirectClass */ if (SetImageStorageClass(distort_image,DirectClass,exception) == MagickFalse) { distort_image=DestroyImage(distort_image); return((Image *) NULL); } if ((IsPixelInfoGray(&distort_image->background_color) == MagickFalse) && (IsGrayColorspace(distort_image->colorspace) != MagickFalse)) (void) SetImageColorspace(distort_image,sRGBColorspace,exception); if (distort_image->background_color.alpha_trait != UndefinedPixelTrait) distort_image->alpha_trait=BlendPixelTrait; distort_image->page.x=geometry.x; distort_image->page.y=geometry.y; { /* ----- MAIN CODE ----- Sample the source image to each pixel in the distort image. */ CacheView *distort_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; ResampleFilter **magick_restrict resample_filter; ssize_t j; status=MagickTrue; progress=0; GetPixelInfo(distort_image,&zero); resample_filter=AcquireResampleFilterThreadSet(image, UndefinedVirtualPixelMethod,MagickFalse,exception); distort_view=AcquireAuthenticCacheView(distort_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,distort_image,distort_image->rows,1) #endif for (j=0; j < (ssize_t) distort_image->rows; j++) { const int id = GetOpenMPThreadId(); double validity; /* how mathematically valid is this the mapping */ MagickBooleanType sync; PixelInfo pixel, /* pixel color to assign to distorted image */ invalid; /* the color to assign when distort result is invalid */ PointInfo d, s; /* transform destination image x,y to source image x,y */ register ssize_t i; register Quantum *magick_restrict q; q=QueueCacheViewAuthenticPixels(distort_view,0,j,distort_image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; /* Define constant scaling vectors for Affine Distortions Other methods are either variable, or use interpolated lookup */ switch (method) { case AffineDistortion: ScaleFilter( resample_filter[id], coeff[0], coeff[1], coeff[3], coeff[4] ); break; default: break; } /* Initialize default pixel validity * negative: pixel is invalid output 'alpha_color' * 0.0 to 1.0: antialiased, mix with resample output * 1.0 or greater: use resampled output. */ validity = 1.0; ConformPixelInfo(distort_image,&distort_image->alpha_color,&invalid, exception); for (i=0; i < (ssize_t) distort_image->columns; i++) { /* map pixel coordinate to distortion space coordinate */ d.x = (double) (geometry.x+i+0.5)*output_scaling; d.y = (double) (geometry.y+j+0.5)*output_scaling; s = d; /* default is a no-op mapping */ switch (method) { case AffineDistortion: { s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; s.y=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; /* Affine partial derivitives are constant -- set above */ break; } case PerspectiveDistortion: { double p,q,r,abs_r,abs_c6,abs_c7,scale; /* perspective is a ratio of affines */ p=coeff[0]*d.x+coeff[1]*d.y+coeff[2]; q=coeff[3]*d.x+coeff[4]*d.y+coeff[5]; r=coeff[6]*d.x+coeff[7]*d.y+1.0; /* Pixel Validity -- is it a 'sky' or 'ground' pixel */ validity = (r*coeff[8] < 0.0) ? 0.0 : 1.0; /* Determine horizon anti-alias blending */ abs_r = fabs(r)*2; abs_c6 = fabs(coeff[6]); abs_c7 = fabs(coeff[7]); if ( abs_c6 > abs_c7 ) { if ( abs_r < abs_c6*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[6]*output_scaling); } else if ( abs_r < abs_c7*output_scaling ) validity = 0.5 - coeff[8]*r/(coeff[7]*output_scaling); /* Perspective Sampling Point (if valid) */ if ( validity > 0.0 ) { /* divide by r affine, for perspective scaling */ scale = 1.0/r; s.x = p*scale; s.y = q*scale; /* Perspective Partial Derivatives or Scaling Vectors */ scale *= scale; ScaleFilter( resample_filter[id], (r*coeff[0] - p*coeff[6])*scale, (r*coeff[1] - p*coeff[7])*scale, (r*coeff[3] - q*coeff[6])*scale, (r*coeff[4] - q*coeff[7])*scale ); } break; } case BilinearReverseDistortion: { /* Reversed Mapped is just a simple polynomial */ s.x=coeff[0]*d.x+coeff[1]*d.y+coeff[2]*d.x*d.y+coeff[3]; s.y=coeff[4]*d.x+coeff[5]*d.y +coeff[6]*d.x*d.y+coeff[7]; /* Bilinear partial derivitives of scaling vectors */ ScaleFilter( resample_filter[id], coeff[0] + coeff[2]*d.y, coeff[1] + coeff[2]*d.x, coeff[4] + coeff[6]*d.y, coeff[5] + coeff[6]*d.x ); break; } case BilinearForwardDistortion: { /* Forward mapped needs reversed polynomial equations * which unfortunatally requires a square root! */ double b,c; d.x -= coeff[3]; d.y -= coeff[7]; b = coeff[6]*d.x - coeff[2]*d.y + coeff[8]; c = coeff[4]*d.x - coeff[0]*d.y; validity = 1.0; /* Handle Special degenerate (non-quadratic) case * Currently without horizon anti-alising */ if ( fabs(coeff[9]) < MagickEpsilon ) s.y = -c/b; else { c = b*b - 2*coeff[9]*c; if ( c < 0.0 ) validity = 0.0; else s.y = ( -b + sqrt(c) )/coeff[9]; } if ( validity > 0.0 ) s.x = ( d.x - coeff[1]*s.y) / ( coeff[0] + coeff[2]*s.y ); /* NOTE: the sign of the square root should be -ve for parts where the source image becomes 'flipped' or 'mirrored'. FUTURE: Horizon handling FUTURE: Scaling factors or Deritives (how?) */ break; } #if 0 case BilinearDistortion: /* Bilinear mapping of any Quadrilateral to any Quadrilateral */ /* UNDER DEVELOPMENT */ break; #endif case PolynomialDistortion: { /* multi-ordered polynomial */ register ssize_t k; ssize_t nterms=(ssize_t)coeff[1]; PointInfo du,dv; /* the du,dv vectors from unit dx,dy -- derivatives */ s.x=s.y=du.x=du.y=dv.x=dv.y=0.0; for(k=0; k < nterms; k++) { s.x += poly_basis_fn(k,d.x,d.y)*coeff[2+k]; du.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k]; du.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k]; s.y += poly_basis_fn(k,d.x,d.y)*coeff[2+k+nterms]; dv.x += poly_basis_dx(k,d.x,d.y)*coeff[2+k+nterms]; dv.y += poly_basis_dy(k,d.x,d.y)*coeff[2+k+nterms]; } ScaleFilter( resample_filter[id], du.x,du.y,dv.x,dv.y ); break; } case ArcDistortion: { /* what is the angle and radius in the destination image */ s.x = (double) ((atan2(d.y,d.x) - coeff[0])/Magick2PI); s.x -= MagickRound(s.x); /* angle */ s.y = hypot(d.x,d.y); /* radius */ /* Arc Distortion Partial Scaling Vectors Are derived by mapping the perpendicular unit vectors dR and dA*R*2PI rather than trying to map dx and dy The results is a very simple orthogonal aligned ellipse. */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[1]/(Magick2PI*s.y)), 0, 0, coeff[3] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[3] ); /* now scale the angle and radius for source image lookup point */ s.x = s.x*coeff[1] + coeff[4] + image->page.x +0.5; s.y = (coeff[2] - s.y) * coeff[3] + image->page.y; break; } case PolarDistortion: { /* 2D Cartesain to Polar View */ d.x -= coeff[2]; d.y -= coeff[3]; s.x = atan2(d.x,d.y) - (coeff[4]+coeff[5])/2; s.x /= Magick2PI; s.x -= MagickRound(s.x); s.x *= Magick2PI; /* angle - relative to centerline */ s.y = hypot(d.x,d.y); /* radius */ /* Polar Scaling vectors are based on mapping dR and dA vectors This results in very simple orthogonal scaling vectors */ if ( s.y > MagickEpsilon ) ScaleFilter( resample_filter[id], (double) (coeff[6]/(Magick2PI*s.y)), 0, 0, coeff[7] ); else ScaleFilter( resample_filter[id], distort_image->columns*2, 0, 0, coeff[7] ); /* now finish mapping radius/angle to source x,y coords */ s.x = s.x*coeff[6] + (double)image->columns/2.0 + image->page.x; s.y = (s.y-coeff[1])*coeff[7] + image->page.y; break; } case DePolarDistortion: { /* @D Polar to Carteasain */ /* ignore all destination virtual offsets */ d.x = ((double)i+0.5)*output_scaling*coeff[6]+coeff[4]; d.y = ((double)j+0.5)*output_scaling*coeff[7]+coeff[1]; s.x = d.y*sin(d.x) + coeff[2]; s.y = d.y*cos(d.x) + coeff[3]; /* derivatives are usless - better to use SuperSampling */ break; } case Cylinder2PlaneDistortion: { /* 3D Cylinder to Tangential Plane */ double ax, cx; /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; d.x /= coeff[1]; /* x' = x/r */ ax=atan(d.x); /* aa = atan(x/r) = u/r */ cx=cos(ax); /* cx = cos(atan(x/r)) = 1/sqrt(x^2+u^2) */ s.x = coeff[1]*ax; /* u = r*atan(x/r) */ s.y = d.y*cx; /* v = y*cos(u/r) */ /* derivatives... (see personnal notes) */ ScaleFilter( resample_filter[id], 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); #if 0 if ( i == 0 && j == 0 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "phi = %lf\n", (double)(ax * 180.0/MagickPI) ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", 1.0/(1.0+d.x*d.x), 0.0, -d.x*s.y*cx*cx/coeff[1], s.y/d.y ); fflush(stderr); } #endif /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case Plane2CylinderDistortion: { /* 3D Cylinder to Tangential Plane */ /* relative to center of distortion */ d.x -= coeff[4]; d.y -= coeff[5]; /* is pixel valid - horizon of a infinite Virtual-Pixel Plane * (see Anthony Thyssen's personal note) */ validity = (double) (coeff[1]*MagickPI2 - fabs(d.x))/output_scaling + 0.5; if ( validity > 0.0 ) { double cx,tx; d.x /= coeff[1]; /* x'= x/r */ cx = 1/cos(d.x); /* cx = 1/cos(x/r) */ tx = tan(d.x); /* tx = tan(x/r) */ s.x = coeff[1]*tx; /* u = r * tan(x/r) */ s.y = d.y*cx; /* v = y / cos(x/r) */ /* derivatives... (see Anthony Thyssen's personal notes) */ ScaleFilter( resample_filter[id], cx*cx, 0.0, s.y*cx/coeff[1], cx ); #if 0 /*if ( i == 0 && j == 0 )*/ if ( d.x == 0.5 && d.y == 0.5 ) { fprintf(stderr, "x=%lf y=%lf u=%lf v=%lf\n", d.x*coeff[1], d.y, s.x, s.y); fprintf(stderr, "radius = %lf phi = %lf validity = %lf\n", coeff[1], (double)(d.x * 180.0/MagickPI), validity ); fprintf(stderr, "du/dx=%lf du/dx=%lf dv/dx=%lf dv/dy=%lf\n", cx*cx, 0.0, s.y*cx/coeff[1], cx); fflush(stderr); } #endif } /* add center of distortion in source */ s.x += coeff[2]; s.y += coeff[3]; break; } case BarrelDistortion: case BarrelInverseDistortion: { /* Lens Barrel Distionion Correction */ double r,fx,fy,gx,gy; /* Radial Polynomial Distortion (de-normalized) */ d.x -= coeff[8]; d.y -= coeff[9]; r = sqrt(d.x*d.x+d.y*d.y); if ( r > MagickEpsilon ) { fx = ((coeff[0]*r + coeff[1])*r + coeff[2])*r + coeff[3]; fy = ((coeff[4]*r + coeff[5])*r + coeff[6])*r + coeff[7]; gx = ((3*coeff[0]*r + 2*coeff[1])*r + coeff[2])/r; gy = ((3*coeff[4]*r + 2*coeff[5])*r + coeff[6])/r; /* adjust functions and scaling for 'inverse' form */ if ( method == BarrelInverseDistortion ) { fx = 1/fx; fy = 1/fy; gx *= -fx*fx; gy *= -fy*fy; } /* Set the source pixel to lookup and EWA derivative vectors */ s.x = d.x*fx + coeff[8]; s.y = d.y*fy + coeff[9]; ScaleFilter( resample_filter[id], gx*d.x*d.x + fx, gx*d.x*d.y, gy*d.x*d.y, gy*d.y*d.y + fy ); } else { /* Special handling to avoid divide by zero when r==0 ** ** The source and destination pixels match in this case ** which was set at the top of the loop using s = d; ** otherwise... s.x=coeff[8]; s.y=coeff[9]; */ if ( method == BarrelDistortion ) ScaleFilter( resample_filter[id], coeff[3], 0, 0, coeff[7] ); else /* method == BarrelInverseDistortion */ /* FUTURE, trap for D==0 causing division by zero */ ScaleFilter( resample_filter[id], 1.0/coeff[3], 0, 0, 1.0/coeff[7] ); } break; } case ShepardsDistortion: { /* Shepards Method, or Inverse Weighted Distance for displacement around the destination image control points The input arguments are the coefficents to the function. This is more of a 'displacement' function rather than an absolute distortion function. Note: We can not determine derivatives using shepards method so only a point sample interpolatation can be used. */ size_t i; double denominator; denominator = s.x = s.y = 0; for(i=0; i<number_arguments; i+=4) { double weight = ((double)d.x-arguments[i+2])*((double)d.x-arguments[i+2]) + ((double)d.y-arguments[i+3])*((double)d.y-arguments[i+3]); weight = pow(weight,coeff[0]); /* shepards power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; s.x += (arguments[ i ]-arguments[i+2])*weight; s.y += (arguments[i+1]-arguments[i+3])*weight; denominator += weight; } s.x /= denominator; s.y /= denominator; s.x += d.x; /* make it as relative displacement */ s.y += d.y; break; } default: break; /* use the default no-op given above */ } /* map virtual canvas location back to real image coordinate */ if ( bestfit && method != ArcDistortion ) { s.x -= image->page.x; s.y -= image->page.y; } s.x -= 0.5; s.y -= 0.5; if ( validity <= 0.0 ) { /* result of distortion is an invalid pixel - don't resample */ SetPixelViaPixelInfo(distort_image,&invalid,q); } else { /* resample the source image to find its correct color */ (void) ResamplePixelColor(resample_filter[id],s.x,s.y,&pixel, exception); /* if validity between 0.0 and 1.0 mix result with invalid pixel */ if ( validity < 1.0 ) { /* Do a blend of sample color and invalid pixel */ /* should this be a 'Blend', or an 'Over' compose */ CompositePixelInfoBlend(&pixel,validity,&invalid,(1.0-validity), &pixel); } SetPixelViaPixelInfo(distort_image,&pixel,q); } q+=GetPixelChannels(distort_image); } sync=SyncCacheViewAuthenticPixels(distort_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_DistortImage) #endif proceed=SetImageProgress(image,DistortImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } distort_view=DestroyCacheView(distort_view); resample_filter=DestroyResampleFilterThreadSet(resample_filter); if (status == MagickFalse) distort_image=DestroyImage(distort_image); } /* Arc does not return an offset unless 'bestfit' is in effect And the user has not provided an overriding 'viewport'. */ if ( method == ArcDistortion && !bestfit && !viewport_given ) { distort_image->page.x = 0; distort_image->page.y = 0; } coeff = (double *) RelinquishMagickMemory(coeff); return(distort_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o t a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RotateImage() creates a new image that is a rotated copy of an existing % one. Positive angles rotate counter-clockwise (right-hand rule), while % negative angles rotate clockwise. Rotated images are usually larger than % the originals and have 'empty' triangular corners. X axis. Empty % triangles left over from shearing the image are filled with the background % color defined by member 'background_color' of the image. RotateImage % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the RotateImage method is: % % Image *RotateImage(const Image *image,const double degrees, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o degrees: Specifies the number of degrees to rotate the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RotateImage(const Image *image,const double degrees, ExceptionInfo *exception) { Image *distort_image, *rotate_image; double angle; PointInfo shear; size_t rotations; /* Adjust rotation angle. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); angle=degrees; while (angle < -45.0) angle+=360.0; for (rotations=0; angle > 45.0; rotations++) angle-=90.0; rotations%=4; shear.x=(-tan((double) DegreesToRadians(angle)/2.0)); shear.y=sin((double) DegreesToRadians(angle)); if ((fabs(shear.x) < MagickEpsilon) && (fabs(shear.y) < MagickEpsilon)) return(IntegralRotateImage(image,rotations,exception)); distort_image=CloneImage(image,0,0,MagickTrue,exception); if (distort_image == (Image *) NULL) return((Image *) NULL); (void) SetImageVirtualPixelMethod(distort_image,BackgroundVirtualPixelMethod, exception); rotate_image=DistortImage(distort_image,ScaleRotateTranslateDistortion,1, &degrees,MagickTrue,exception); distort_image=DestroyImage(distort_image); return(rotate_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p a r s e C o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SparseColorImage(), given a set of coordinates, interpolates the colors % found at those coordinates, across the whole image, using various methods. % % The format of the SparseColorImage() method is: % % Image *SparseColorImage(const Image *image, % const SparseColorMethod method,const size_t number_arguments, % const double *arguments,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image to be filled in. % % o method: the method to fill in the gradient between the control points. % % The methods used for SparseColor() are often simular to methods % used for DistortImage(), and even share the same code for determination % of the function coefficents, though with more dimensions (or resulting % values). % % o number_arguments: the number of arguments given. % % o arguments: array of floating point arguments for this method-- % x,y,color_values-- with color_values given as normalized values. % % o exception: return any errors or warnings in this structure % */ MagickExport Image *SparseColorImage(const Image *image, const SparseColorMethod method,const size_t number_arguments, const double *arguments,ExceptionInfo *exception) { #define SparseColorTag "Distort/SparseColor" SparseColorMethod sparse_method; double *coeff; Image *sparse_image; size_t number_colors; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); /* Determine number of color values needed per control point */ number_colors=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) number_colors++; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) number_colors++; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) number_colors++; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) number_colors++; /* Convert input arguments into mapping coefficients, this this case we are mapping (distorting) colors, rather than coordinates. */ { DistortMethod distort_method; distort_method=(DistortMethod) method; if ( distort_method >= SentinelDistortion ) distort_method = ShepardsDistortion; /* Pretend to be Shepards */ coeff = GenerateCoefficients(image, &distort_method, number_arguments, arguments, number_colors, exception); if ( coeff == (double *) NULL ) return((Image *) NULL); /* Note some Distort Methods may fall back to other simpler methods, Currently the only fallback of concern is Bilinear to Affine (Barycentric), which is alaso sparse_colr method. This also ensures correct two and one color Barycentric handling. */ sparse_method = (SparseColorMethod) distort_method; if ( distort_method == ShepardsDistortion ) sparse_method = method; /* return non-distort methods to normal */ if ( sparse_method == InverseColorInterpolate ) coeff[0]=0.5; /* sqrt() the squared distance for inverse */ } /* Verbose output */ if (IsStringTrue(GetImageArtifact(image,"verbose")) != MagickFalse) { switch (sparse_method) { case BarycentricColorInterpolate: { register ssize_t x=0; (void) FormatLocaleFile(stderr, "Barycentric Sparse Color:\n"); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf' \\\n", coeff[x], coeff[x+1], coeff[x+2]),x+=3; break; } case BilinearColorInterpolate: { register ssize_t x=0; (void) FormatLocaleFile(stderr, "Bilinear Sparse Color\n"); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel R -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel G -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) (void) FormatLocaleFile(stderr, " -channel B -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) (void) FormatLocaleFile(stderr, " -channel K -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) (void) FormatLocaleFile(stderr, " -channel A -fx '%+lf*i %+lf*j %+lf*i*j %+lf;\n", coeff[ x ], coeff[x+1], coeff[x+2], coeff[x+3]),x+=4; break; } default: /* sparse color method is too complex for FX emulation */ break; } } /* Generate new image for generated interpolated gradient. * ASIDE: Actually we could have just replaced the colors of the original * image, but IM Core policy, is if storage class could change then clone * the image. */ sparse_image=CloneImage(image,0,0,MagickTrue,exception); if (sparse_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(sparse_image,DirectClass,exception) == MagickFalse) { /* if image is ColorMapped - change it to DirectClass */ sparse_image=DestroyImage(sparse_image); return((Image *) NULL); } { /* ----- MAIN CODE ----- */ CacheView *sparse_view; MagickBooleanType status; MagickOffsetType progress; ssize_t j; status=MagickTrue; progress=0; sparse_view=AcquireAuthenticCacheView(sparse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,sparse_image,sparse_image->rows,1) #endif for (j=0; j < (ssize_t) sparse_image->rows; j++) { MagickBooleanType sync; PixelInfo pixel; /* pixel to assign to distorted image */ register ssize_t i; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(sparse_view,0,j,sparse_image->columns, 1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(sparse_image,&pixel); for (i=0; i < (ssize_t) image->columns; i++) { GetPixelInfoPixel(image,q,&pixel); switch (sparse_method) { case BarycentricColorInterpolate: { register ssize_t x=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha = coeff[x]*i +coeff[x+1]*j +coeff[x+2], x+=3; break; } case BilinearColorInterpolate: { register ssize_t x=0; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha = coeff[x]*i + coeff[x+1]*j + coeff[x+2]*i*j + coeff[x+3], x+=4; break; } case InverseColorInterpolate: case ShepardsColorInterpolate: { /* Inverse (Squared) Distance weights average (IDW) */ size_t k; double denominator; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=0.0; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=0.0; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=0.0; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=0.0; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=0.0; denominator = 0.0; for(k=0; k<number_arguments; k+=2+number_colors) { register ssize_t x=(ssize_t) k+2; double weight = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); weight = pow(weight,coeff[0]); /* inverse of power factor */ weight = ( weight < 1.0 ) ? 1.0 : 1.0/weight; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red += arguments[x++]*weight; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green += arguments[x++]*weight; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue += arguments[x++]*weight; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black += arguments[x++]*weight; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha += arguments[x++]*weight; denominator += weight; } if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red/=denominator; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green/=denominator; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue/=denominator; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black/=denominator; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha/=denominator; break; } case ManhattanColorInterpolate: { size_t k; double minimum = MagickMaximumValue; /* Just use the closest control point you can find! */ for(k=0; k<number_arguments; k+=2+number_colors) { double distance = fabs((double)i-arguments[ k ]) + fabs((double)j-arguments[k+1]); if ( distance < minimum ) { register ssize_t x=(ssize_t) k+2; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=arguments[x++]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=arguments[x++]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=arguments[x++]; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=arguments[x++]; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=arguments[x++]; minimum = distance; } } break; } case VoronoiColorInterpolate: default: { size_t k; double minimum = MagickMaximumValue; /* Just use the closest control point you can find! */ for (k=0; k<number_arguments; k+=2+number_colors) { double distance = ((double)i-arguments[ k ])*((double)i-arguments[ k ]) + ((double)j-arguments[k+1])*((double)j-arguments[k+1]); if ( distance < minimum ) { register ssize_t x=(ssize_t) k+2; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=arguments[x++]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=arguments[x++]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=arguments[x++]; if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=arguments[x++]; if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=arguments[x++]; minimum = distance; } } break; } } /* set the color directly back into the source image */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) pixel.red=ClampPixel(QuantumRange*pixel.red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) pixel.green=ClampPixel(QuantumRange*pixel.green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) pixel.blue=ClampPixel(QuantumRange*pixel.blue); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) pixel.black=ClampPixel(QuantumRange*pixel.black); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) pixel.alpha=ClampPixel(QuantumRange*pixel.alpha); SetPixelViaPixelInfo(sparse_image,&pixel,q); q+=GetPixelChannels(sparse_image); } sync=SyncCacheViewAuthenticPixels(sparse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SparseColorImage) #endif proceed=SetImageProgress(image,SparseColorTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } sparse_view=DestroyCacheView(sparse_view); if (status == MagickFalse) sparse_image=DestroyImage(sparse_image); } coeff = (double *) RelinquishMagickMemory(coeff); return(sparse_image); }
betweennessCentrality.c
#include "defs.h" double betweennessCentrality(graph* G, DOUBLE_T* BC) { mcsim_skip_instrs_begin(); VERT_T *S; /* stack of vertices in the order of non-decreasing distance from s. Also used to implicitly represent the BFS queue */ plist* P; /* predecessors of a vertex v on shortest paths from s */ DOUBLE_T* sig; /* No. of shortest paths */ LONG_T* d; /* Length of the shortest path between every pair */ DOUBLE_T* del; /* dependency of vertices */ LONG_T *in_degree, *numEdges, *pSums; LONG_T *pListMem; LONG_T* Srcs; LONG_T *start, *end; LONG_T MAX_NUM_PHASES; LONG_T *psCount; #ifdef _OPENMP omp_lock_t* vLock; LONG_T chunkSize; #endif int seed = 2387; double elapsed_time; #ifdef _OPENMP #pragma omp parallel { #endif VERT_T *myS, *myS_t; LONG_T myS_size; LONG_T i, j, k, p, count, myCount; LONG_T v, w, vert; LONG_T numV, num_traversals, n, m, phase_num; LONG_T tid, nthreads; int* stream; #ifdef DIAGNOSTIC double elapsed_time_part; #endif #ifdef _OPENMP int myLock; tid = omp_get_thread_num(); nthreads = omp_get_num_threads(); #else tid = 0; nthreads = 1; #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds(); } #endif /* numV: no. of vertices to run BFS from = 2^K4approx */ numV = 1<<K4approx; n = G->n; m = G->m; /* Permute vertices */ if (tid == 0) { Srcs = (LONG_T *) malloc(n*sizeof(LONG_T)); #ifdef _OPENMP vLock = (omp_lock_t *) malloc(n*sizeof(omp_lock_t)); #endif } #ifdef _OPENMP #pragma omp barrier #pragma omp for for (i=0; i<n; i++) { omp_init_lock(&vLock[i]); } #endif /* Initialize RNG stream */ stream = init_sprng(0, tid, nthreads, seed, SPRNG_DEFAULT); #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { Srcs[i] = i; } #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { j = n*sprng(stream); if (i != j) { #ifdef _OPENMP int l1 = omp_test_lock(&vLock[i]); if (l1) { int l2 = omp_test_lock(&vLock[j]); if (l2) { #endif k = Srcs[i]; Srcs[i] = Srcs[j]; Srcs[j] = k; #ifdef _OPENMP omp_unset_lock(&vLock[j]); } omp_unset_lock(&vLock[i]); } #endif } } #ifdef _OPENMP #pragma omp barrier #endif #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "Vertex ID permutation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif /* Start timing code from here */ if (tid == 0) { elapsed_time = get_seconds(); #ifdef VERIFYK4 MAX_NUM_PHASES = 2*sqrt(n); #else MAX_NUM_PHASES = 50; #endif } #ifdef _OPENMP #pragma omp barrier #endif /* Initialize predecessor lists */ /* The size of the predecessor list of each vertex is bounded by its in-degree. So we first compute the in-degree of every vertex */ if (tid == 0) { P = (plist *) calloc(n, sizeof(plist)); in_degree = (LONG_T *) calloc(n+1, sizeof(LONG_T)); numEdges = (LONG_T *) malloc((n+1)*sizeof(LONG_T)); pSums = (LONG_T *) malloc(nthreads*sizeof(LONG_T)); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<m; i++) { v = G->endV[i]; #ifdef _OPENMP omp_set_lock(&vLock[v]); #endif in_degree[v]++; #ifdef _OPENMP omp_unset_lock(&vLock[v]); #endif } prefix_sums(in_degree, numEdges, pSums, n); if (tid == 0) { pListMem = (LONG_T *) malloc(m*sizeof(LONG_T)); } #ifdef _OPENMP #pragma omp barrier #pragma omp for #endif for (i=0; i<n; i++) { P[i].list = pListMem + numEdges[i]; P[i].degree = in_degree[i]; P[i].count = 0; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() - elapsed_time_part; fprintf(stderr, "In-degree computation time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif /* Allocate shared memory */ if (tid == 0) { free(in_degree); free(numEdges); free(pSums); S = (VERT_T *) malloc(n*sizeof(VERT_T)); sig = (DOUBLE_T *) malloc(n*sizeof(DOUBLE_T)); d = (LONG_T *) malloc(n*sizeof(LONG_T)); del = (DOUBLE_T *) calloc(n, sizeof(DOUBLE_T)); start = (LONG_T *) malloc(MAX_NUM_PHASES*sizeof(LONG_T)); end = (LONG_T *) malloc(MAX_NUM_PHASES*sizeof(LONG_T)); psCount = (LONG_T *) malloc((nthreads+1)*sizeof(LONG_T)); } /* local memory for each thread */ myS_size = (2*n)/nthreads; myS = (LONG_T *) malloc(myS_size*sizeof(LONG_T)); num_traversals = 0; myCount = 0; #ifdef _OPENMP #pragma omp barrier #endif #ifdef _OPENMP #pragma omp for #endif for (i=0; i<n; i++) { d[i] = -1; } #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "BC initialization time: %lf seconds\n", elapsed_time_part); elapsed_time_part = get_seconds(); } #endif mcsim_skip_instrs_end(); for (p=0; p<n; p++) { mcsim_skip_instrs_begin(); i = Srcs[p]; if (G->numEdges[i+1] - G->numEdges[i] == 0) { continue; } else { num_traversals++; } if (num_traversals == numV + 1) { break; } if (tid == 0) { sig[i] = 1; d[i] = 0; S[0] = i; start[0] = 0; end[0] = 1; } count = 1; phase_num = 0; mcsim_skip_instrs_end(); #ifdef _OPENMP #pragma omp barrier #endif while (end[phase_num] - start[phase_num] > 0) { mcsim_skip_instrs_begin(); myCount = 0; #ifdef _OPENMP #pragma omp barrier #pragma omp for schedule(dynamic) #endif for (vert = start[phase_num]; vert < end[phase_num]; vert++) { v = S[vert]; for (j=G->numEdges[v]; j<G->numEdges[v+1]; j++) { #ifndef VERIFYK4 /* Filter edges with weights divisible by 8 */ if ((G->weight[j] & 7) != 0) { #endif w = G->endV[j]; if (v != w) { #ifdef _OPENMP myLock = omp_test_lock(&vLock[w]); if (myLock) { #endif /* w found for the first time? */ if (d[w] == -1) { if (myS_size == myCount) { /* Resize myS */ myS_t = (LONG_T *) malloc(2*myS_size*sizeof(VERT_T)); memcpy(myS_t, myS, myS_size*sizeof(VERT_T)); free(myS); myS = myS_t; myS_size = 2*myS_size; } myS[myCount++] = w; d[w] = d[v] + 1; sig[w] = sig[v]; P[w].list[P[w].count++] = v; } else if (d[w] == d[v] + 1) { sig[w] += sig[v]; P[w].list[P[w].count++] = v; } #ifdef _OPENMP omp_unset_lock(&vLock[w]); } else { if ((d[w] == -1) || (d[w] == d[v]+ 1)) { omp_set_lock(&vLock[w]); sig[w] += sig[v]; P[w].list[P[w].count++] = v; omp_unset_lock(&vLock[w]); } } #endif } #ifndef VERIFYK4 } #endif } } /* Merge all local stacks for next iteration */ phase_num++; psCount[tid+1] = myCount; #ifdef _OPENMP #pragma omp barrier #endif if (tid == 0) { start[phase_num] = end[phase_num-1]; psCount[0] = start[phase_num]; for(k=1; k<=nthreads; k++) { psCount[k] = psCount[k-1] + psCount[k]; } end[phase_num] = psCount[nthreads]; } #ifdef _OPENMP #pragma omp barrier #endif for (k = psCount[tid]; k < psCount[tid+1]; k++) { S[k] = myS[k-psCount[tid]]; } #ifdef _OPENMP #pragma omp barrier #endif count = end[phase_num]; mcsim_skip_instrs_end(); } phase_num--; #ifdef _OPENMP #pragma omp barrier #endif while (phase_num > 0) { mcsim_skip_instrs_begin(); #ifdef UNDOLOG DOUBLE_T *undolog_BC; undolog_BC = (DOUBLE_T *) calloc(N, sizeof(DOUBLE_T)); #endif // UNDOLOG #ifdef REDOLOG DOUBLE_T *redolog_BC; redolog_BC = (DOUBLE_T *) calloc(N, sizeof(DOUBLE_T)); #endif // REDOLOG mcsim_skip_instrs_end(); #ifdef _OPENMP #pragma omp for #endif for (j=start[phase_num]; j<end[phase_num]; j++) { w = S[j]; for (k = 0; k<P[w].count; k++) { v = P[w].list[k]; #ifdef _OPENMP omp_set_lock(&vLock[v]); #endif del[v] = del[v] + sig[v]*(1+del[w])/sig[w]; #ifdef _OPENMP omp_unset_lock(&vLock[v]); #endif } mcsim_tx_begin(); #ifdef BASELINE mcsim_log_begin(); //mcsim_skip_instrs_begin(); #ifdef UNDOLOG undolog_BC[w] = BC[w]; #endif // UNDOLOG #ifdef REDOLOG redolog_BC[w] = BC[w] + del[w]; #endif // REDOLOG //mcsim_skip_instrs_end(); mcsim_mem_fence(); mcsim_log_end(); mcsim_mem_fence(); #endif // BASELINE BC[w] += del[w]; mcsim_tx_end(); #ifdef CLWB mcsim_clwb( &( BC[w] ) ); #endif // CLWB } phase_num--; #ifdef _OPENMP #pragma omp barrier #endif // make sure undolog and redolog data structures are not discarded by compiler mcsim_skip_instrs_begin(); #ifdef UNDOLOG printf("%d\n", (int)(sizeof undolog_BC)); #endif // UNDOLOG #ifdef REDOLOG printf("%d\n", (int)(sizeof redolog_BC)); #endif // REDOLOG mcsim_skip_instrs_end(); } mcsim_skip_instrs_begin(); #ifdef _OPENMP chunkSize = n/nthreads; #pragma omp for schedule(static, chunkSize) #endif for (j=0; j<count; j++) { w = S[j]; d[w] = -1; del[w] = 0; P[w].count = 0; } mcsim_skip_instrs_end(); #ifdef _OPENMP #pragma omp barrier #endif } mcsim_skip_instrs_begin(); #ifdef DIAGNOSTIC if (tid == 0) { elapsed_time_part = get_seconds() -elapsed_time_part; fprintf(stderr, "BC computation time: %lf seconds\n", elapsed_time_part); } #endif #ifdef _OPENMP #pragma omp for for (i=0; i<n; i++) { omp_destroy_lock(&vLock[i]); } #endif free(myS); if (tid == 0) { free(S); free(pListMem); free(P); free(sig); free(d); free(del); #ifdef _OPENMP free(vLock); #endif free(start); free(end); free(psCount); elapsed_time = get_seconds() - elapsed_time; free(Srcs); } free_sprng(stream); #ifdef _OPENMP } #endif /* Verification */ #ifdef VERIFYK4 double BCval; if (SCALE % 2 == 0) { BCval = 0.5*pow(2, 3*SCALE/2)-pow(2, SCALE)+1.0; } else { BCval = 0.75*pow(2, (3*SCALE-1)/2)-pow(2, SCALE)+1.0; } int failed = 0; for (int i=0; i<G->n; i++) { if (round(BC[i] - BCval) != 0) { failed = 1; break; } } if (failed) { fprintf(stderr, "Kernel 4 failed validation!\n"); } else { fprintf(stderr, "Kernel 4 validation successful!\n"); } #endif mcsim_skip_instrs_end(); return elapsed_time; }
3d7pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 32; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-7,8)),ceild(8*t2-Nz-28,32));t3<=min(min(min(floord(Nt+Ny-4,32),floord(4*t1+Ny+5,32)),floord(8*t2+Ny+4,32)),floord(8*t1-8*t2+Nz+Ny+3,32));t3++) { for (t4=max(max(max(0,ceild(t1-63,64)),ceild(8*t2-Nz-252,256)),ceild(32*t3-Ny-252,256));t4<=min(min(min(min(floord(Nt+Nx-4,256),floord(4*t1+Nx+5,256)),floord(8*t2+Nx+4,256)),floord(32*t3+Nx+28,256)),floord(8*t1-8*t2+Nz+Nx+3,256));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),32*t3-Ny+2),256*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),32*t3+30),256*t4+254),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(32*t3,t5+1);t7<=min(32*t3+31,t5+Ny-2);t7++) { lbv=max(256*t4,t5+1); ubv=min(256*t4+255,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = (((((((coef[0][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (coef[1][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)])) + (coef[2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)])) + (coef[3][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1])) + (coef[4][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)])) + (coef[5][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)])) + (coef[6][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1]));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
GB_binop__min_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__min_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__min_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__min_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__min_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__min_uint64) // A*D function (colscale): GB (_AxD__min_uint64) // D*A function (rowscale): GB (_DxB__min_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__min_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__min_uint64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__min_uint64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__min_uint64) // C=scalar+B GB (_bind1st__min_uint64) // C=scalar+B' GB (_bind1st_tran__min_uint64) // C=A+scalar GB (_bind2nd__min_uint64) // C=A'+scalar GB (_bind2nd_tran__min_uint64) // C type: uint64_t // A type: uint64_t // A pattern? 0 // B type: uint64_t // B pattern? 0 // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_IMIN (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_UINT64 || GxB_NO_MIN_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__min_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__min_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__min_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__min_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__min_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__min_uint64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__min_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint64_t alpha_scalar ; uint64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ; beta_scalar = (*((uint64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__min_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__min_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__min_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__min_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__min_uint64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__min_uint64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB (_bind1st_tran__min_uint64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__min_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
semantics.c
/* Perform the semantic phase of parsing, i.e., the process of building tree structure, checking semantic consistency, and building RTL. These routines are used both during actual parsing and during the instantiation of template functions. Copyright (C) 1998-2020 Free Software Foundation, Inc. Written by Mark Mitchell ([email protected]) based on code found formerly in parse.y and pt.c. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "target.h" #include "bitmap.h" #include "cp-tree.h" #include "stringpool.h" #include "cgraph.h" #include "stmt.h" #include "varasm.h" #include "stor-layout.h" #include "c-family/c-objc.h" #include "tree-inline.h" #include "intl.h" #include "tree-iterator.h" #include "omp-general.h" #include "convert.h" #include "stringpool.h" #include "attribs.h" #include "gomp-constants.h" #include "predict.h" #include "memmodel.h" /* There routines provide a modular interface to perform many parsing operations. They may therefore be used during actual parsing, or during template instantiation, which may be regarded as a degenerate form of parsing. */ static tree maybe_convert_cond (tree); static tree finalize_nrv_r (tree *, int *, void *); static tree capture_decltype (tree); /* Used for OpenMP non-static data member privatization. */ static hash_map<tree, tree> *omp_private_member_map; static vec<tree> omp_private_member_vec; static bool omp_private_member_ignore_next; /* Deferred Access Checking Overview --------------------------------- Most C++ expressions and declarations require access checking to be performed during parsing. However, in several cases, this has to be treated differently. For member declarations, access checking has to be deferred until more information about the declaration is known. For example: class A { typedef int X; public: X f(); }; A::X A::f(); A::X g(); When we are parsing the function return type `A::X', we don't really know if this is allowed until we parse the function name. Furthermore, some contexts require that access checking is never performed at all. These include class heads, and template instantiations. Typical use of access checking functions is described here: 1. When we enter a context that requires certain access checking mode, the function `push_deferring_access_checks' is called with DEFERRING argument specifying the desired mode. Access checking may be performed immediately (dk_no_deferred), deferred (dk_deferred), or not performed (dk_no_check). 2. When a declaration such as a type, or a variable, is encountered, the function `perform_or_defer_access_check' is called. It maintains a vector of all deferred checks. 3. The global `current_class_type' or `current_function_decl' is then setup by the parser. `enforce_access' relies on these information to check access. 4. Upon exiting the context mentioned in step 1, `perform_deferred_access_checks' is called to check all declaration stored in the vector. `pop_deferring_access_checks' is then called to restore the previous access checking mode. In case of parsing error, we simply call `pop_deferring_access_checks' without `perform_deferred_access_checks'. */ struct GTY(()) deferred_access { /* A vector representing name-lookups for which we have deferred checking access controls. We cannot check the accessibility of names used in a decl-specifier-seq until we know what is being declared because code like: class A { class B {}; B* f(); } A::B* A::f() { return 0; } is valid, even though `A::B' is not generally accessible. */ vec<deferred_access_check, va_gc> * GTY(()) deferred_access_checks; /* The current mode of access checks. */ enum deferring_kind deferring_access_checks_kind; }; /* Data for deferred access checking. */ static GTY(()) vec<deferred_access, va_gc> *deferred_access_stack; static GTY(()) unsigned deferred_access_no_check; /* Save the current deferred access states and start deferred access checking iff DEFER_P is true. */ void push_deferring_access_checks (deferring_kind deferring) { /* For context like template instantiation, access checking disabling applies to all nested context. */ if (deferred_access_no_check || deferring == dk_no_check) deferred_access_no_check++; else { deferred_access e = {NULL, deferring}; vec_safe_push (deferred_access_stack, e); } } /* Save the current deferred access states and start deferred access checking, continuing the set of deferred checks in CHECKS. */ void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> * checks) { push_deferring_access_checks (dk_deferred); if (!deferred_access_no_check) deferred_access_stack->last().deferred_access_checks = checks; } /* Resume deferring access checks again after we stopped doing this previously. */ void resume_deferring_access_checks (void) { if (!deferred_access_no_check) deferred_access_stack->last().deferring_access_checks_kind = dk_deferred; } /* Stop deferring access checks. */ void stop_deferring_access_checks (void) { if (!deferred_access_no_check) deferred_access_stack->last().deferring_access_checks_kind = dk_no_deferred; } /* Discard the current deferred access checks and restore the previous states. */ void pop_deferring_access_checks (void) { if (deferred_access_no_check) deferred_access_no_check--; else deferred_access_stack->pop (); } /* Returns a TREE_LIST representing the deferred checks. The TREE_PURPOSE of each node is the type through which the access occurred; the TREE_VALUE is the declaration named. */ vec<deferred_access_check, va_gc> * get_deferred_access_checks (void) { if (deferred_access_no_check) return NULL; else return (deferred_access_stack->last().deferred_access_checks); } /* Take current deferred checks and combine with the previous states if we also defer checks previously. Otherwise perform checks now. */ void pop_to_parent_deferring_access_checks (void) { if (deferred_access_no_check) deferred_access_no_check--; else { vec<deferred_access_check, va_gc> *checks; deferred_access *ptr; checks = (deferred_access_stack->last ().deferred_access_checks); deferred_access_stack->pop (); ptr = &deferred_access_stack->last (); if (ptr->deferring_access_checks_kind == dk_no_deferred) { /* Check access. */ perform_access_checks (checks, tf_warning_or_error); } else { /* Merge with parent. */ int i, j; deferred_access_check *chk, *probe; FOR_EACH_VEC_SAFE_ELT (checks, i, chk) { FOR_EACH_VEC_SAFE_ELT (ptr->deferred_access_checks, j, probe) { if (probe->binfo == chk->binfo && probe->decl == chk->decl && probe->diag_decl == chk->diag_decl) goto found; } /* Insert into parent's checks. */ vec_safe_push (ptr->deferred_access_checks, *chk); found:; } } } } /* Perform the access checks in CHECKS. The TREE_PURPOSE of each node is the BINFO indicating the qualifying scope used to access the DECL node stored in the TREE_VALUE of the node. If CHECKS is empty or we aren't in SFINAE context or all the checks succeed return TRUE, otherwise FALSE. */ bool perform_access_checks (vec<deferred_access_check, va_gc> *checks, tsubst_flags_t complain) { int i; deferred_access_check *chk; location_t loc = input_location; bool ok = true; if (!checks) return true; FOR_EACH_VEC_SAFE_ELT (checks, i, chk) { input_location = chk->loc; ok &= enforce_access (chk->binfo, chk->decl, chk->diag_decl, complain); } input_location = loc; return (complain & tf_error) ? true : ok; } /* Perform the deferred access checks. After performing the checks, we still have to keep the list `deferred_access_stack->deferred_access_checks' since we may want to check access for them again later in a different context. For example: class A { typedef int X; static X a; }; A::X A::a, x; // No error for `A::a', error for `x' We have to perform deferred access of `A::X', first with `A::a', next with `x'. Return value like perform_access_checks above. */ bool perform_deferred_access_checks (tsubst_flags_t complain) { return perform_access_checks (get_deferred_access_checks (), complain); } /* Defer checking the accessibility of DECL, when looked up in BINFO. DIAG_DECL is the declaration to use to print diagnostics. Return value like perform_access_checks above. If non-NULL, report failures to AFI. */ bool perform_or_defer_access_check (tree binfo, tree decl, tree diag_decl, tsubst_flags_t complain, access_failure_info *afi) { int i; deferred_access *ptr; deferred_access_check *chk; /* Exit if we are in a context that no access checking is performed. */ if (deferred_access_no_check) return true; gcc_assert (TREE_CODE (binfo) == TREE_BINFO); ptr = &deferred_access_stack->last (); /* If we are not supposed to defer access checks, just check now. */ if (ptr->deferring_access_checks_kind == dk_no_deferred) { bool ok = enforce_access (binfo, decl, diag_decl, complain, afi); return (complain & tf_error) ? true : ok; } /* See if we are already going to perform this check. */ FOR_EACH_VEC_SAFE_ELT (ptr->deferred_access_checks, i, chk) { if (chk->decl == decl && chk->binfo == binfo && chk->diag_decl == diag_decl) { return true; } } /* If not, record the check. */ deferred_access_check new_access = {binfo, decl, diag_decl, input_location}; vec_safe_push (ptr->deferred_access_checks, new_access); return true; } /* Returns nonzero if the current statement is a full expression, i.e. temporaries created during that statement should be destroyed at the end of the statement. */ int stmts_are_full_exprs_p (void) { return current_stmt_tree ()->stmts_are_full_exprs_p; } /* T is a statement. Add it to the statement-tree. This is the C++ version. The C/ObjC frontends have a slightly different version of this function. */ tree add_stmt (tree t) { enum tree_code code = TREE_CODE (t); if (EXPR_P (t) && code != LABEL_EXPR) { if (!EXPR_HAS_LOCATION (t)) SET_EXPR_LOCATION (t, input_location); /* When we expand a statement-tree, we must know whether or not the statements are full-expressions. We record that fact here. */ if (STATEMENT_CODE_P (TREE_CODE (t))) STMT_IS_FULL_EXPR_P (t) = stmts_are_full_exprs_p (); } if (code == LABEL_EXPR || code == CASE_LABEL_EXPR) STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1; /* Add T to the statement-tree. Non-side-effect statements need to be recorded during statement expressions. */ gcc_checking_assert (!stmt_list_stack->is_empty ()); append_to_statement_list_force (t, &cur_stmt_list); return t; } /* Returns the stmt_tree to which statements are currently being added. */ stmt_tree current_stmt_tree (void) { return (cfun ? &cfun->language->base.x_stmt_tree : &scope_chain->x_stmt_tree); } /* If statements are full expressions, wrap STMT in a CLEANUP_POINT_EXPR. */ static tree maybe_cleanup_point_expr (tree expr) { if (!processing_template_decl && stmts_are_full_exprs_p ()) expr = fold_build_cleanup_point_expr (TREE_TYPE (expr), expr); return expr; } /* Like maybe_cleanup_point_expr except have the type of the new expression be void so we don't need to create a temporary variable to hold the inner expression. The reason why we do this is because the original type might be an aggregate and we cannot create a temporary variable for that type. */ tree maybe_cleanup_point_expr_void (tree expr) { if (!processing_template_decl && stmts_are_full_exprs_p ()) expr = fold_build_cleanup_point_expr (void_type_node, expr); return expr; } /* Create a declaration statement for the declaration given by the DECL. */ void add_decl_expr (tree decl) { tree r = build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl); if (DECL_INITIAL (decl) || (DECL_SIZE (decl) && TREE_SIDE_EFFECTS (DECL_SIZE (decl)))) r = maybe_cleanup_point_expr_void (r); add_stmt (r); } /* Finish a scope. */ tree do_poplevel (tree stmt_list) { tree block = NULL; if (stmts_are_full_exprs_p ()) block = poplevel (kept_level_p (), 1, 0); stmt_list = pop_stmt_list (stmt_list); if (!processing_template_decl) { stmt_list = c_build_bind_expr (input_location, block, stmt_list); /* ??? See c_end_compound_stmt re statement expressions. */ } return stmt_list; } /* Begin a new scope. */ static tree do_pushlevel (scope_kind sk) { tree ret = push_stmt_list (); if (stmts_are_full_exprs_p ()) begin_scope (sk, NULL); return ret; } /* Queue a cleanup. CLEANUP is an expression/statement to be executed when the current scope is exited. EH_ONLY is true when this is not meant to apply to normal control flow transfer. */ void push_cleanup (tree decl, tree cleanup, bool eh_only) { tree stmt = build_stmt (input_location, CLEANUP_STMT, NULL, cleanup, decl); CLEANUP_EH_ONLY (stmt) = eh_only; add_stmt (stmt); CLEANUP_BODY (stmt) = push_stmt_list (); } /* Simple infinite loop tracking for -Wreturn-type. We keep a stack of all the current loops, represented by 'NULL_TREE' if we've seen a possible exit, and 'error_mark_node' if not. This is currently used only to suppress the warning about a function with no return statements, and therefore we don't bother noting returns as possible exits. We also don't bother with gotos. */ static void begin_maybe_infinite_loop (tree cond) { /* Only track this while parsing a function, not during instantiation. */ if (!cfun || (DECL_TEMPLATE_INSTANTIATION (current_function_decl) && !processing_template_decl)) return; bool maybe_infinite = true; if (cond) { cond = fold_non_dependent_expr (cond); maybe_infinite = integer_nonzerop (cond); } vec_safe_push (cp_function_chain->infinite_loops, maybe_infinite ? error_mark_node : NULL_TREE); } /* A break is a possible exit for the current loop. */ void break_maybe_infinite_loop (void) { if (!cfun) return; cp_function_chain->infinite_loops->last() = NULL_TREE; } /* If we reach the end of the loop without seeing a possible exit, we have an infinite loop. */ static void end_maybe_infinite_loop (tree cond) { if (!cfun || (DECL_TEMPLATE_INSTANTIATION (current_function_decl) && !processing_template_decl)) return; tree current = cp_function_chain->infinite_loops->pop(); if (current != NULL_TREE) { cond = fold_non_dependent_expr (cond); if (integer_nonzerop (cond)) current_function_infinite_loop = 1; } } /* Begin a conditional that might contain a declaration. When generating normal code, we want the declaration to appear before the statement containing the conditional. When generating template code, we want the conditional to be rendered as the raw DECL_EXPR. */ static void begin_cond (tree *cond_p) { if (processing_template_decl) *cond_p = push_stmt_list (); } /* Finish such a conditional. */ static void finish_cond (tree *cond_p, tree expr) { if (processing_template_decl) { tree cond = pop_stmt_list (*cond_p); if (expr == NULL_TREE) /* Empty condition in 'for'. */ gcc_assert (empty_expr_stmt_p (cond)); else if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; else if (!empty_expr_stmt_p (cond)) expr = build2 (COMPOUND_EXPR, TREE_TYPE (expr), cond, expr); } *cond_p = expr; } /* If *COND_P specifies a conditional with a declaration, transform the loop such that while (A x = 42) { } for (; A x = 42;) { } becomes while (true) { A x = 42; if (!x) break; } for (;;) { A x = 42; if (!x) break; } The statement list for BODY will be empty if the conditional did not declare anything. */ static void simplify_loop_decl_cond (tree *cond_p, tree body) { tree cond, if_stmt; if (!TREE_SIDE_EFFECTS (body)) return; cond = *cond_p; *cond_p = boolean_true_node; if_stmt = begin_if_stmt (); cond = cp_build_unary_op (TRUTH_NOT_EXPR, cond, false, tf_warning_or_error); finish_if_stmt_cond (cond, if_stmt); finish_break_stmt (); finish_then_clause (if_stmt); finish_if_stmt (if_stmt); } /* Finish a goto-statement. */ tree finish_goto_stmt (tree destination) { if (identifier_p (destination)) destination = lookup_label (destination); /* We warn about unused labels with -Wunused. That means we have to mark the used labels as used. */ if (TREE_CODE (destination) == LABEL_DECL) TREE_USED (destination) = 1; else { destination = mark_rvalue_use (destination); if (!processing_template_decl) { destination = cp_convert (ptr_type_node, destination, tf_warning_or_error); if (error_operand_p (destination)) return NULL_TREE; destination = fold_build_cleanup_point_expr (TREE_TYPE (destination), destination); } } check_goto (destination); add_stmt (build_predict_expr (PRED_GOTO, NOT_TAKEN)); return add_stmt (build_stmt (input_location, GOTO_EXPR, destination)); } /* COND is the condition-expression for an if, while, etc., statement. Convert it to a boolean value, if appropriate. In addition, verify sequence points if -Wsequence-point is enabled. */ static tree maybe_convert_cond (tree cond) { /* Empty conditions remain empty. */ if (!cond) return NULL_TREE; /* Wait until we instantiate templates before doing conversion. */ if (type_dependent_expression_p (cond)) return cond; if (warn_sequence_point && !processing_template_decl) verify_sequence_points (cond); /* Do the conversion. */ cond = convert_from_reference (cond); if (TREE_CODE (cond) == MODIFY_EXPR && !TREE_NO_WARNING (cond) && warn_parentheses && warning_at (cp_expr_loc_or_input_loc (cond), OPT_Wparentheses, "suggest parentheses around " "assignment used as truth value")) TREE_NO_WARNING (cond) = 1; return condition_conversion (cond); } /* Finish an expression-statement, whose EXPRESSION is as indicated. */ tree finish_expr_stmt (tree expr) { tree r = NULL_TREE; location_t loc = EXPR_LOCATION (expr); if (expr != NULL_TREE) { /* If we ran into a problem, make sure we complained. */ gcc_assert (expr != error_mark_node || seen_error ()); if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); expr = convert_to_void (expr, ICV_STATEMENT, tf_warning_or_error); } else if (!type_dependent_expression_p (expr)) convert_to_void (build_non_dependent_expr (expr), ICV_STATEMENT, tf_warning_or_error); if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; /* Simplification of inner statement expressions, compound exprs, etc can result in us already having an EXPR_STMT. */ if (TREE_CODE (expr) != CLEANUP_POINT_EXPR) { if (TREE_CODE (expr) != EXPR_STMT) expr = build_stmt (loc, EXPR_STMT, expr); expr = maybe_cleanup_point_expr_void (expr); } r = add_stmt (expr); } return r; } /* Begin an if-statement. Returns a newly created IF_STMT if appropriate. */ tree begin_if_stmt (void) { tree r, scope; scope = do_pushlevel (sk_cond); r = build_stmt (input_location, IF_STMT, NULL_TREE, NULL_TREE, NULL_TREE, scope); current_binding_level->this_entity = r; begin_cond (&IF_COND (r)); return r; } /* Returns true if FN, a CALL_EXPR, is a call to std::is_constant_evaluated or __builtin_is_constant_evaluated. */ static bool is_std_constant_evaluated_p (tree fn) { /* std::is_constant_evaluated takes no arguments. */ if (call_expr_nargs (fn) != 0) return false; tree fndecl = cp_get_callee_fndecl_nofold (fn); if (fndecl == NULL_TREE) return false; if (fndecl_built_in_p (fndecl, CP_BUILT_IN_IS_CONSTANT_EVALUATED, BUILT_IN_FRONTEND)) return true; if (!decl_in_std_namespace_p (fndecl)) return false; tree name = DECL_NAME (fndecl); return name && id_equal (name, "is_constant_evaluated"); } /* Process the COND of an if-statement, which may be given by IF_STMT. */ tree finish_if_stmt_cond (tree cond, tree if_stmt) { cond = maybe_convert_cond (cond); if (IF_STMT_CONSTEXPR_P (if_stmt) && !type_dependent_expression_p (cond) && require_constant_expression (cond) && !instantiation_dependent_expression_p (cond) /* Wait until instantiation time, since only then COND has been converted to bool. */ && TYPE_MAIN_VARIANT (TREE_TYPE (cond)) == boolean_type_node) { /* if constexpr (std::is_constant_evaluated()) is always true, so give the user a clue. */ if (warn_tautological_compare) { tree t = cond; if (TREE_CODE (t) == CLEANUP_POINT_EXPR) t = TREE_OPERAND (t, 0); if (TREE_CODE (t) == CALL_EXPR && is_std_constant_evaluated_p (t)) warning_at (EXPR_LOCATION (cond), OPT_Wtautological_compare, "%qs always evaluates to true in %<if constexpr%>", "std::is_constant_evaluated"); } cond = instantiate_non_dependent_expr (cond); cond = cxx_constant_value (cond, NULL_TREE); } finish_cond (&IF_COND (if_stmt), cond); add_stmt (if_stmt); THEN_CLAUSE (if_stmt) = push_stmt_list (); return cond; } /* Finish the then-clause of an if-statement, which may be given by IF_STMT. */ tree finish_then_clause (tree if_stmt) { THEN_CLAUSE (if_stmt) = pop_stmt_list (THEN_CLAUSE (if_stmt)); return if_stmt; } /* Begin the else-clause of an if-statement. */ void begin_else_clause (tree if_stmt) { ELSE_CLAUSE (if_stmt) = push_stmt_list (); } /* Finish the else-clause of an if-statement, which may be given by IF_STMT. */ void finish_else_clause (tree if_stmt) { ELSE_CLAUSE (if_stmt) = pop_stmt_list (ELSE_CLAUSE (if_stmt)); } /* Callback for cp_walk_tree to mark all {VAR,PARM}_DECLs in a tree as read. */ static tree maybe_mark_exp_read_r (tree *tp, int *, void *) { tree t = *tp; if (VAR_P (t) || TREE_CODE (t) == PARM_DECL) mark_exp_read (t); return NULL_TREE; } /* Finish an if-statement. */ void finish_if_stmt (tree if_stmt) { tree scope = IF_SCOPE (if_stmt); IF_SCOPE (if_stmt) = NULL; if (IF_STMT_CONSTEXPR_P (if_stmt)) { /* Prevent various -Wunused warnings. We might not instantiate either of these branches, so we would not mark the variables used in that branch as read. */ cp_walk_tree_without_duplicates (&THEN_CLAUSE (if_stmt), maybe_mark_exp_read_r, NULL); cp_walk_tree_without_duplicates (&ELSE_CLAUSE (if_stmt), maybe_mark_exp_read_r, NULL); } add_stmt (do_poplevel (scope)); } /* Begin a while-statement. Returns a newly created WHILE_STMT if appropriate. */ tree begin_while_stmt (void) { tree r; r = build_stmt (input_location, WHILE_STMT, NULL_TREE, NULL_TREE); add_stmt (r); WHILE_BODY (r) = do_pushlevel (sk_block); begin_cond (&WHILE_COND (r)); return r; } /* Process the COND of a while-statement, which may be given by WHILE_STMT. */ void finish_while_stmt_cond (tree cond, tree while_stmt, bool ivdep, unsigned short unroll) { cond = maybe_convert_cond (cond); finish_cond (&WHILE_COND (while_stmt), cond); begin_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) WHILE_COND (while_stmt) = build3 (ANNOTATE_EXPR, TREE_TYPE (WHILE_COND (while_stmt)), WHILE_COND (while_stmt), build_int_cst (integer_type_node, annot_expr_ivdep_kind), integer_zero_node); if (unroll && cond != error_mark_node) WHILE_COND (while_stmt) = build3 (ANNOTATE_EXPR, TREE_TYPE (WHILE_COND (while_stmt)), WHILE_COND (while_stmt), build_int_cst (integer_type_node, annot_expr_unroll_kind), build_int_cst (integer_type_node, unroll)); simplify_loop_decl_cond (&WHILE_COND (while_stmt), WHILE_BODY (while_stmt)); } /* Finish a while-statement, which may be given by WHILE_STMT. */ void finish_while_stmt (tree while_stmt) { end_maybe_infinite_loop (boolean_true_node); WHILE_BODY (while_stmt) = do_poplevel (WHILE_BODY (while_stmt)); } /* Begin a do-statement. Returns a newly created DO_STMT if appropriate. */ tree begin_do_stmt (void) { tree r = build_stmt (input_location, DO_STMT, NULL_TREE, NULL_TREE); begin_maybe_infinite_loop (boolean_true_node); add_stmt (r); DO_BODY (r) = push_stmt_list (); return r; } /* Finish the body of a do-statement, which may be given by DO_STMT. */ void finish_do_body (tree do_stmt) { tree body = DO_BODY (do_stmt) = pop_stmt_list (DO_BODY (do_stmt)); if (TREE_CODE (body) == STATEMENT_LIST && STATEMENT_LIST_TAIL (body)) body = STATEMENT_LIST_TAIL (body)->stmt; if (IS_EMPTY_STMT (body)) warning (OPT_Wempty_body, "suggest explicit braces around empty body in %<do%> statement"); } /* Finish a do-statement, which may be given by DO_STMT, and whose COND is as indicated. */ void finish_do_stmt (tree cond, tree do_stmt, bool ivdep, unsigned short unroll) { cond = maybe_convert_cond (cond); end_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_ivdep_kind), integer_zero_node); if (unroll && cond != error_mark_node) cond = build3 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_unroll_kind), build_int_cst (integer_type_node, unroll)); DO_COND (do_stmt) = cond; } /* Finish a return-statement. The EXPRESSION returned, if any, is as indicated. */ tree finish_return_stmt (tree expr) { tree r; bool no_warning; expr = check_return_expr (expr, &no_warning); if (error_operand_p (expr) || (flag_openmp && !check_omp_return ())) { /* Suppress -Wreturn-type for this function. */ if (warn_return_type) TREE_NO_WARNING (current_function_decl) = true; return error_mark_node; } if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); if (DECL_DESTRUCTOR_P (current_function_decl) || (DECL_CONSTRUCTOR_P (current_function_decl) && targetm.cxx.cdtor_returns_this ())) { /* Similarly, all destructors must run destructors for base-classes before returning. So, all returns in a destructor get sent to the DTOR_LABEL; finish_function emits code to return a value there. */ return finish_goto_stmt (cdtor_label); } } r = build_stmt (input_location, RETURN_EXPR, expr); TREE_NO_WARNING (r) |= no_warning; r = maybe_cleanup_point_expr_void (r); r = add_stmt (r); return r; } /* Begin the scope of a for-statement or a range-for-statement. Both the returned trees are to be used in a call to begin_for_stmt or begin_range_for_stmt. */ tree begin_for_scope (tree *init) { tree scope = do_pushlevel (sk_for); if (processing_template_decl) *init = push_stmt_list (); else *init = NULL_TREE; return scope; } /* Begin a for-statement. Returns a new FOR_STMT. SCOPE and INIT should be the return of begin_for_scope, or both NULL_TREE */ tree begin_for_stmt (tree scope, tree init) { tree r; r = build_stmt (input_location, FOR_STMT, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE); if (scope == NULL_TREE) { gcc_assert (!init); scope = begin_for_scope (&init); } FOR_INIT_STMT (r) = init; FOR_SCOPE (r) = scope; return r; } /* Finish the init-statement of a for-statement, which may be given by FOR_STMT. */ void finish_init_stmt (tree for_stmt) { if (processing_template_decl) FOR_INIT_STMT (for_stmt) = pop_stmt_list (FOR_INIT_STMT (for_stmt)); add_stmt (for_stmt); FOR_BODY (for_stmt) = do_pushlevel (sk_block); begin_cond (&FOR_COND (for_stmt)); } /* Finish the COND of a for-statement, which may be given by FOR_STMT. */ void finish_for_cond (tree cond, tree for_stmt, bool ivdep, unsigned short unroll) { cond = maybe_convert_cond (cond); finish_cond (&FOR_COND (for_stmt), cond); begin_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) FOR_COND (for_stmt) = build3 (ANNOTATE_EXPR, TREE_TYPE (FOR_COND (for_stmt)), FOR_COND (for_stmt), build_int_cst (integer_type_node, annot_expr_ivdep_kind), integer_zero_node); if (unroll && cond != error_mark_node) FOR_COND (for_stmt) = build3 (ANNOTATE_EXPR, TREE_TYPE (FOR_COND (for_stmt)), FOR_COND (for_stmt), build_int_cst (integer_type_node, annot_expr_unroll_kind), build_int_cst (integer_type_node, unroll)); simplify_loop_decl_cond (&FOR_COND (for_stmt), FOR_BODY (for_stmt)); } /* Finish the increment-EXPRESSION in a for-statement, which may be given by FOR_STMT. */ void finish_for_expr (tree expr, tree for_stmt) { if (!expr) return; /* If EXPR is an overloaded function, issue an error; there is no context available to use to perform overload resolution. */ if (type_unknown_p (expr)) { cxx_incomplete_type_error (expr, TREE_TYPE (expr)); expr = error_mark_node; } if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); expr = convert_to_void (expr, ICV_THIRD_IN_FOR, tf_warning_or_error); } else if (!type_dependent_expression_p (expr)) convert_to_void (build_non_dependent_expr (expr), ICV_THIRD_IN_FOR, tf_warning_or_error); expr = maybe_cleanup_point_expr_void (expr); if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; FOR_EXPR (for_stmt) = expr; } /* Finish the body of a for-statement, which may be given by FOR_STMT. The increment-EXPR for the loop must be provided. It can also finish RANGE_FOR_STMT. */ void finish_for_stmt (tree for_stmt) { end_maybe_infinite_loop (boolean_true_node); if (TREE_CODE (for_stmt) == RANGE_FOR_STMT) RANGE_FOR_BODY (for_stmt) = do_poplevel (RANGE_FOR_BODY (for_stmt)); else FOR_BODY (for_stmt) = do_poplevel (FOR_BODY (for_stmt)); /* Pop the scope for the body of the loop. */ tree *scope_ptr = (TREE_CODE (for_stmt) == RANGE_FOR_STMT ? &RANGE_FOR_SCOPE (for_stmt) : &FOR_SCOPE (for_stmt)); tree scope = *scope_ptr; *scope_ptr = NULL; /* During parsing of the body, range for uses "__for_{range,begin,end} " decl names to make those unaccessible by code in the body. Change it to ones with underscore instead of space, so that it can be inspected in the debugger. */ tree range_for_decl[3] = { NULL_TREE, NULL_TREE, NULL_TREE }; gcc_assert (CPTI_FOR_BEGIN__IDENTIFIER == CPTI_FOR_RANGE__IDENTIFIER + 1 && CPTI_FOR_END__IDENTIFIER == CPTI_FOR_RANGE__IDENTIFIER + 2 && CPTI_FOR_RANGE_IDENTIFIER == CPTI_FOR_RANGE__IDENTIFIER + 3 && CPTI_FOR_BEGIN_IDENTIFIER == CPTI_FOR_BEGIN__IDENTIFIER + 3 && CPTI_FOR_END_IDENTIFIER == CPTI_FOR_END__IDENTIFIER + 3); for (int i = 0; i < 3; i++) { tree id = cp_global_trees[CPTI_FOR_RANGE__IDENTIFIER + i]; if (IDENTIFIER_BINDING (id) && IDENTIFIER_BINDING (id)->scope == current_binding_level) { range_for_decl[i] = IDENTIFIER_BINDING (id)->value; gcc_assert (VAR_P (range_for_decl[i]) && DECL_ARTIFICIAL (range_for_decl[i])); } } add_stmt (do_poplevel (scope)); for (int i = 0; i < 3; i++) if (range_for_decl[i]) DECL_NAME (range_for_decl[i]) = cp_global_trees[CPTI_FOR_RANGE_IDENTIFIER + i]; } /* Begin a range-for-statement. Returns a new RANGE_FOR_STMT. SCOPE and INIT should be the return of begin_for_scope, or both NULL_TREE . To finish it call finish_for_stmt(). */ tree begin_range_for_stmt (tree scope, tree init) { begin_maybe_infinite_loop (boolean_false_node); tree r = build_stmt (input_location, RANGE_FOR_STMT, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE); if (scope == NULL_TREE) { gcc_assert (!init); scope = begin_for_scope (&init); } /* Since C++20, RANGE_FOR_STMTs can use the init tree, so save it. */ RANGE_FOR_INIT_STMT (r) = init; RANGE_FOR_SCOPE (r) = scope; return r; } /* Finish the head of a range-based for statement, which may be given by RANGE_FOR_STMT. DECL must be the declaration and EXPR must be the loop expression. */ void finish_range_for_decl (tree range_for_stmt, tree decl, tree expr) { if (processing_template_decl) RANGE_FOR_INIT_STMT (range_for_stmt) = pop_stmt_list (RANGE_FOR_INIT_STMT (range_for_stmt)); RANGE_FOR_DECL (range_for_stmt) = decl; RANGE_FOR_EXPR (range_for_stmt) = expr; add_stmt (range_for_stmt); RANGE_FOR_BODY (range_for_stmt) = do_pushlevel (sk_block); } /* Finish a break-statement. */ tree finish_break_stmt (void) { /* In switch statements break is sometimes stylistically used after a return statement. This can lead to spurious warnings about control reaching the end of a non-void function when it is inlined. Note that we are calling block_may_fallthru with language specific tree nodes; this works because block_may_fallthru returns true when given something it does not understand. */ if (!block_may_fallthru (cur_stmt_list)) return void_node; note_break_stmt (); return add_stmt (build_stmt (input_location, BREAK_STMT)); } /* Finish a continue-statement. */ tree finish_continue_stmt (void) { return add_stmt (build_stmt (input_location, CONTINUE_STMT)); } /* Begin a switch-statement. Returns a new SWITCH_STMT if appropriate. */ tree begin_switch_stmt (void) { tree r, scope; scope = do_pushlevel (sk_cond); r = build_stmt (input_location, SWITCH_STMT, NULL_TREE, NULL_TREE, NULL_TREE, scope); begin_cond (&SWITCH_STMT_COND (r)); return r; } /* Finish the cond of a switch-statement. */ void finish_switch_cond (tree cond, tree switch_stmt) { tree orig_type = NULL; if (!processing_template_decl) { /* Convert the condition to an integer or enumeration type. */ tree orig_cond = cond; cond = build_expr_type_conversion (WANT_INT | WANT_ENUM, cond, true); if (cond == NULL_TREE) { error_at (cp_expr_loc_or_input_loc (orig_cond), "switch quantity not an integer"); cond = error_mark_node; } /* We want unlowered type here to handle enum bit-fields. */ orig_type = unlowered_expr_type (cond); if (TREE_CODE (orig_type) != ENUMERAL_TYPE) orig_type = TREE_TYPE (cond); if (cond != error_mark_node) { /* [stmt.switch] Integral promotions are performed. */ cond = perform_integral_promotions (cond); cond = maybe_cleanup_point_expr (cond); } } if (check_for_bare_parameter_packs (cond)) cond = error_mark_node; else if (!processing_template_decl && warn_sequence_point) verify_sequence_points (cond); finish_cond (&SWITCH_STMT_COND (switch_stmt), cond); SWITCH_STMT_TYPE (switch_stmt) = orig_type; add_stmt (switch_stmt); push_switch (switch_stmt); SWITCH_STMT_BODY (switch_stmt) = push_stmt_list (); } /* Finish the body of a switch-statement, which may be given by SWITCH_STMT. The COND to switch on is indicated. */ void finish_switch_stmt (tree switch_stmt) { tree scope; SWITCH_STMT_BODY (switch_stmt) = pop_stmt_list (SWITCH_STMT_BODY (switch_stmt)); pop_switch (); scope = SWITCH_STMT_SCOPE (switch_stmt); SWITCH_STMT_SCOPE (switch_stmt) = NULL; add_stmt (do_poplevel (scope)); } /* Begin a try-block. Returns a newly-created TRY_BLOCK if appropriate. */ tree begin_try_block (void) { tree r = build_stmt (input_location, TRY_BLOCK, NULL_TREE, NULL_TREE); add_stmt (r); TRY_STMTS (r) = push_stmt_list (); return r; } /* Likewise, for a function-try-block. The block returned in *COMPOUND_STMT is an artificial outer scope, containing the function-try-block. */ tree begin_function_try_block (tree *compound_stmt) { tree r; /* This outer scope does not exist in the C++ standard, but we need a place to put __FUNCTION__ and similar variables. */ *compound_stmt = begin_compound_stmt (0); r = begin_try_block (); FN_TRY_BLOCK_P (r) = 1; return r; } /* Finish a try-block, which may be given by TRY_BLOCK. */ void finish_try_block (tree try_block) { TRY_STMTS (try_block) = pop_stmt_list (TRY_STMTS (try_block)); TRY_HANDLERS (try_block) = push_stmt_list (); } /* Finish the body of a cleanup try-block, which may be given by TRY_BLOCK. */ void finish_cleanup_try_block (tree try_block) { TRY_STMTS (try_block) = pop_stmt_list (TRY_STMTS (try_block)); } /* Finish an implicitly generated try-block, with a cleanup is given by CLEANUP. */ void finish_cleanup (tree cleanup, tree try_block) { TRY_HANDLERS (try_block) = cleanup; CLEANUP_P (try_block) = 1; } /* Likewise, for a function-try-block. */ void finish_function_try_block (tree try_block) { finish_try_block (try_block); /* FIXME : something queer about CTOR_INITIALIZER somehow following the try block, but moving it inside. */ in_function_try_handler = 1; } /* Finish a handler-sequence for a try-block, which may be given by TRY_BLOCK. */ void finish_handler_sequence (tree try_block) { TRY_HANDLERS (try_block) = pop_stmt_list (TRY_HANDLERS (try_block)); check_handlers (TRY_HANDLERS (try_block)); } /* Finish the handler-seq for a function-try-block, given by TRY_BLOCK. COMPOUND_STMT is the outer block created by begin_function_try_block. */ void finish_function_handler_sequence (tree try_block, tree compound_stmt) { in_function_try_handler = 0; finish_handler_sequence (try_block); finish_compound_stmt (compound_stmt); } /* Begin a handler. Returns a HANDLER if appropriate. */ tree begin_handler (void) { tree r; r = build_stmt (input_location, HANDLER, NULL_TREE, NULL_TREE); add_stmt (r); /* Create a binding level for the eh_info and the exception object cleanup. */ HANDLER_BODY (r) = do_pushlevel (sk_catch); return r; } /* Finish the handler-parameters for a handler, which may be given by HANDLER. DECL is the declaration for the catch parameter, or NULL if this is a `catch (...)' clause. */ void finish_handler_parms (tree decl, tree handler) { tree type = NULL_TREE; if (processing_template_decl) { if (decl) { decl = pushdecl (decl); decl = push_template_decl (decl); HANDLER_PARMS (handler) = decl; type = TREE_TYPE (decl); } } else { type = expand_start_catch_block (decl); if (warn_catch_value && type != NULL_TREE && type != error_mark_node && !TYPE_REF_P (TREE_TYPE (decl))) { tree orig_type = TREE_TYPE (decl); if (CLASS_TYPE_P (orig_type)) { if (TYPE_POLYMORPHIC_P (orig_type)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wcatch_value_, "catching polymorphic type %q#T by value", orig_type); else if (warn_catch_value > 1) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wcatch_value_, "catching type %q#T by value", orig_type); } else if (warn_catch_value > 2) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wcatch_value_, "catching non-reference type %q#T", orig_type); } } HANDLER_TYPE (handler) = type; } /* Finish a handler, which may be given by HANDLER. The BLOCKs are the return value from the matching call to finish_handler_parms. */ void finish_handler (tree handler) { if (!processing_template_decl) expand_end_catch_block (); HANDLER_BODY (handler) = do_poplevel (HANDLER_BODY (handler)); } /* Begin a compound statement. FLAGS contains some bits that control the behavior and context. If BCS_NO_SCOPE is set, the compound statement does not define a scope. If BCS_FN_BODY is set, this is the outermost block of a function. If BCS_TRY_BLOCK is set, this is the block created on behalf of a TRY statement. Returns a token to be passed to finish_compound_stmt. */ tree begin_compound_stmt (unsigned int flags) { tree r; if (flags & BCS_NO_SCOPE) { r = push_stmt_list (); STATEMENT_LIST_NO_SCOPE (r) = 1; /* Normally, we try hard to keep the BLOCK for a statement-expression. But, if it's a statement-expression with a scopeless block, there's nothing to keep, and we don't want to accidentally keep a block *inside* the scopeless block. */ keep_next_level (false); } else { scope_kind sk = sk_block; if (flags & BCS_TRY_BLOCK) sk = sk_try; else if (flags & BCS_TRANSACTION) sk = sk_transaction; r = do_pushlevel (sk); } /* When processing a template, we need to remember where the braces were, so that we can set up identical scopes when instantiating the template later. BIND_EXPR is a handy candidate for this. Note that do_poplevel won't create a BIND_EXPR itself here (and thus result in nested BIND_EXPRs), since we don't build BLOCK nodes when processing templates. */ if (processing_template_decl) { r = build3 (BIND_EXPR, NULL, NULL, r, NULL); BIND_EXPR_TRY_BLOCK (r) = (flags & BCS_TRY_BLOCK) != 0; BIND_EXPR_BODY_BLOCK (r) = (flags & BCS_FN_BODY) != 0; TREE_SIDE_EFFECTS (r) = 1; } return r; } /* Finish a compound-statement, which is given by STMT. */ void finish_compound_stmt (tree stmt) { if (TREE_CODE (stmt) == BIND_EXPR) { tree body = do_poplevel (BIND_EXPR_BODY (stmt)); /* If the STATEMENT_LIST is empty and this BIND_EXPR isn't special, discard the BIND_EXPR so it can be merged with the containing STATEMENT_LIST. */ if (TREE_CODE (body) == STATEMENT_LIST && STATEMENT_LIST_HEAD (body) == NULL && !BIND_EXPR_BODY_BLOCK (stmt) && !BIND_EXPR_TRY_BLOCK (stmt)) stmt = body; else BIND_EXPR_BODY (stmt) = body; } else if (STATEMENT_LIST_NO_SCOPE (stmt)) stmt = pop_stmt_list (stmt); else { /* Destroy any ObjC "super" receivers that may have been created. */ objc_clear_super_receiver (); stmt = do_poplevel (stmt); } /* ??? See c_end_compound_stmt wrt statement expressions. */ add_stmt (stmt); } /* Finish an asm-statement, whose components are a STRING, some OUTPUT_OPERANDS, some INPUT_OPERANDS, some CLOBBERS and some LABELS. Also note whether the asm-statement should be considered volatile, and whether it is asm inline. */ tree finish_asm_stmt (location_t loc, int volatile_p, tree string, tree output_operands, tree input_operands, tree clobbers, tree labels, bool inline_p) { tree r; tree t; int ninputs = list_length (input_operands); int noutputs = list_length (output_operands); if (!processing_template_decl) { const char *constraint; const char **oconstraints; bool allows_mem, allows_reg, is_inout; tree operand; int i; oconstraints = XALLOCAVEC (const char *, noutputs); string = resolve_asm_operand_names (string, output_operands, input_operands, labels); for (i = 0, t = output_operands; t; t = TREE_CHAIN (t), ++i) { operand = TREE_VALUE (t); /* ??? Really, this should not be here. Users should be using a proper lvalue, dammit. But there's a long history of using casts in the output operands. In cases like longlong.h, this becomes a primitive form of typechecking -- if the cast can be removed, then the output operand had a type of the proper width; otherwise we'll get an error. Gross, but ... */ STRIP_NOPS (operand); operand = mark_lvalue_use (operand); if (!lvalue_or_else (operand, lv_asm, tf_warning_or_error)) operand = error_mark_node; if (operand != error_mark_node && (TREE_READONLY (operand) || CP_TYPE_CONST_P (TREE_TYPE (operand)) /* Functions are not modifiable, even though they are lvalues. */ || FUNC_OR_METHOD_TYPE_P (TREE_TYPE (operand)) /* If it's an aggregate and any field is const, then it is effectively const. */ || (CLASS_TYPE_P (TREE_TYPE (operand)) && C_TYPE_FIELDS_READONLY (TREE_TYPE (operand))))) cxx_readonly_error (loc, operand, lv_asm); tree *op = &operand; while (TREE_CODE (*op) == COMPOUND_EXPR) op = &TREE_OPERAND (*op, 1); switch (TREE_CODE (*op)) { case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case MODIFY_EXPR: *op = genericize_compound_lvalue (*op); op = &TREE_OPERAND (*op, 1); break; default: break; } constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); oconstraints[i] = constraint; if (parse_output_constraint (&constraint, i, ninputs, noutputs, &allows_mem, &allows_reg, &is_inout)) { /* If the operand is going to end up in memory, mark it addressable. */ if (!allows_reg && !cxx_mark_addressable (*op)) operand = error_mark_node; } else operand = error_mark_node; TREE_VALUE (t) = operand; } for (i = 0, t = input_operands; t; ++i, t = TREE_CHAIN (t)) { constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); bool constraint_parsed = parse_input_constraint (&constraint, i, ninputs, noutputs, 0, oconstraints, &allows_mem, &allows_reg); /* If the operand is going to end up in memory, don't call decay_conversion. */ if (constraint_parsed && !allows_reg && allows_mem) operand = mark_lvalue_use (TREE_VALUE (t)); else operand = decay_conversion (TREE_VALUE (t), tf_warning_or_error); /* If the type of the operand hasn't been determined (e.g., because it involves an overloaded function), then issue an error message. There's no context available to resolve the overloading. */ if (TREE_TYPE (operand) == unknown_type_node) { error_at (loc, "type of %<asm%> operand %qE could not be determined", TREE_VALUE (t)); operand = error_mark_node; } if (constraint_parsed) { /* If the operand is going to end up in memory, mark it addressable. */ if (!allows_reg && allows_mem) { /* Strip the nops as we allow this case. FIXME, this really should be rejected or made deprecated. */ STRIP_NOPS (operand); tree *op = &operand; while (TREE_CODE (*op) == COMPOUND_EXPR) op = &TREE_OPERAND (*op, 1); switch (TREE_CODE (*op)) { case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case MODIFY_EXPR: *op = genericize_compound_lvalue (*op); op = &TREE_OPERAND (*op, 1); break; default: break; } if (!cxx_mark_addressable (*op)) operand = error_mark_node; } else if (!allows_reg && !allows_mem) { /* If constraint allows neither register nor memory, try harder to get a constant. */ tree constop = maybe_constant_value (operand); if (TREE_CONSTANT (constop)) operand = constop; } } else operand = error_mark_node; TREE_VALUE (t) = operand; } } r = build_stmt (loc, ASM_EXPR, string, output_operands, input_operands, clobbers, labels); ASM_VOLATILE_P (r) = volatile_p || noutputs == 0; ASM_INLINE_P (r) = inline_p; r = maybe_cleanup_point_expr_void (r); return add_stmt (r); } /* Finish a label with the indicated NAME. Returns the new label. */ tree finish_label_stmt (tree name) { tree decl = define_label (input_location, name); if (decl == error_mark_node) return error_mark_node; add_stmt (build_stmt (input_location, LABEL_EXPR, decl)); return decl; } /* Finish a series of declarations for local labels. G++ allows users to declare "local" labels, i.e., labels with scope. This extension is useful when writing code involving statement-expressions. */ void finish_label_decl (tree name) { if (!at_function_scope_p ()) { error ("%<__label__%> declarations are only allowed in function scopes"); return; } add_decl_expr (declare_local_label (name)); } /* When DECL goes out of scope, make sure that CLEANUP is executed. */ void finish_decl_cleanup (tree decl, tree cleanup) { push_cleanup (decl, cleanup, false); } /* If the current scope exits with an exception, run CLEANUP. */ void finish_eh_cleanup (tree cleanup) { push_cleanup (NULL, cleanup, true); } /* The MEM_INITS is a list of mem-initializers, in reverse of the order they were written by the user. Each node is as for emit_mem_initializers. */ void finish_mem_initializers (tree mem_inits) { /* Reorder the MEM_INITS so that they are in the order they appeared in the source program. */ mem_inits = nreverse (mem_inits); if (processing_template_decl) { tree mem; for (mem = mem_inits; mem; mem = TREE_CHAIN (mem)) { /* If the TREE_PURPOSE is a TYPE_PACK_EXPANSION, skip the check for bare parameter packs in the TREE_VALUE, because any parameter packs in the TREE_VALUE have already been bound as part of the TREE_PURPOSE. See make_pack_expansion for more information. */ if (TREE_CODE (TREE_PURPOSE (mem)) != TYPE_PACK_EXPANSION && check_for_bare_parameter_packs (TREE_VALUE (mem))) TREE_VALUE (mem) = error_mark_node; } add_stmt (build_min_nt_loc (UNKNOWN_LOCATION, CTOR_INITIALIZER, mem_inits)); } else emit_mem_initializers (mem_inits); } /* Obfuscate EXPR if it looks like an id-expression or member access so that the call to finish_decltype in do_auto_deduction will give the right result. If EVEN_UNEVAL, do this even in unevaluated context. */ tree force_paren_expr (tree expr, bool even_uneval) { /* This is only needed for decltype(auto) in C++14. */ if (cxx_dialect < cxx14) return expr; /* If we're in unevaluated context, we can't be deducing a return/initializer type, so we don't need to mess with this. */ if (cp_unevaluated_operand && !even_uneval) return expr; if (!DECL_P (tree_strip_any_location_wrapper (expr)) && TREE_CODE (expr) != COMPONENT_REF && TREE_CODE (expr) != SCOPE_REF) return expr; location_t loc = cp_expr_location (expr); if (TREE_CODE (expr) == COMPONENT_REF || TREE_CODE (expr) == SCOPE_REF) REF_PARENTHESIZED_P (expr) = true; else if (processing_template_decl) expr = build1_loc (loc, PAREN_EXPR, TREE_TYPE (expr), expr); else { expr = build1_loc (loc, VIEW_CONVERT_EXPR, TREE_TYPE (expr), expr); REF_PARENTHESIZED_P (expr) = true; } return expr; } /* If T is an id-expression obfuscated by force_paren_expr, undo the obfuscation and return the underlying id-expression. Otherwise return T. */ tree maybe_undo_parenthesized_ref (tree t) { if (cxx_dialect < cxx14) return t; if (INDIRECT_REF_P (t) && REF_PARENTHESIZED_P (t)) { t = TREE_OPERAND (t, 0); while (TREE_CODE (t) == NON_LVALUE_EXPR || TREE_CODE (t) == NOP_EXPR) t = TREE_OPERAND (t, 0); gcc_assert (TREE_CODE (t) == ADDR_EXPR || TREE_CODE (t) == STATIC_CAST_EXPR); t = TREE_OPERAND (t, 0); } else if (TREE_CODE (t) == PAREN_EXPR) t = TREE_OPERAND (t, 0); else if (TREE_CODE (t) == VIEW_CONVERT_EXPR && REF_PARENTHESIZED_P (t)) t = TREE_OPERAND (t, 0); return t; } /* Finish a parenthesized expression EXPR. */ cp_expr finish_parenthesized_expr (cp_expr expr) { if (EXPR_P (expr)) /* This inhibits warnings in c_common_truthvalue_conversion. */ TREE_NO_WARNING (expr) = 1; if (TREE_CODE (expr) == OFFSET_REF || TREE_CODE (expr) == SCOPE_REF) /* [expr.unary.op]/3 The qualified id of a pointer-to-member must not be enclosed in parentheses. */ PTRMEM_OK_P (expr) = 0; tree stripped_expr = tree_strip_any_location_wrapper (expr); if (TREE_CODE (stripped_expr) == STRING_CST) PAREN_STRING_LITERAL_P (stripped_expr) = 1; expr = cp_expr (force_paren_expr (expr), expr.get_location ()); return expr; } /* Finish a reference to a non-static data member (DECL) that is not preceded by `.' or `->'. */ tree finish_non_static_data_member (tree decl, tree object, tree qualifying_scope) { gcc_assert (TREE_CODE (decl) == FIELD_DECL); bool try_omp_private = !object && omp_private_member_map; tree ret; if (!object) { tree scope = qualifying_scope; if (scope == NULL_TREE) { scope = context_for_name_lookup (decl); if (!TYPE_P (scope)) { /* Can happen during error recovery (c++/85014). */ gcc_assert (seen_error ()); return error_mark_node; } } object = maybe_dummy_object (scope, NULL); } object = maybe_resolve_dummy (object, true); if (object == error_mark_node) return error_mark_node; /* DR 613/850: Can use non-static data members without an associated object in sizeof/decltype/alignof. */ if (is_dummy_object (object) && cp_unevaluated_operand == 0 && (!processing_template_decl || !current_class_ref)) { if (current_function_decl && DECL_STATIC_FUNCTION_P (current_function_decl)) error ("invalid use of member %qD in static member function", decl); else error ("invalid use of non-static data member %qD", decl); inform (DECL_SOURCE_LOCATION (decl), "declared here"); return error_mark_node; } if (current_class_ptr) TREE_USED (current_class_ptr) = 1; if (processing_template_decl) { tree type = TREE_TYPE (decl); if (TYPE_REF_P (type)) /* Quals on the object don't matter. */; else if (PACK_EXPANSION_P (type)) /* Don't bother trying to represent this. */ type = NULL_TREE; else { /* Set the cv qualifiers. */ int quals = cp_type_quals (TREE_TYPE (object)); if (DECL_MUTABLE_P (decl)) quals &= ~TYPE_QUAL_CONST; quals |= cp_type_quals (TREE_TYPE (decl)); type = cp_build_qualified_type (type, quals); } if (qualifying_scope) /* Wrap this in a SCOPE_REF for now. */ ret = build_qualified_name (type, qualifying_scope, decl, /*template_p=*/false); else ret = (convert_from_reference (build_min (COMPONENT_REF, type, object, decl, NULL_TREE))); } /* If PROCESSING_TEMPLATE_DECL is nonzero here, then QUALIFYING_SCOPE is also non-null. */ else { tree access_type = TREE_TYPE (object); perform_or_defer_access_check (TYPE_BINFO (access_type), decl, decl, tf_warning_or_error); /* If the data member was named `C::M', convert `*this' to `C' first. */ if (qualifying_scope) { tree binfo = NULL_TREE; object = build_scoped_ref (object, qualifying_scope, &binfo); } ret = build_class_member_access_expr (object, decl, /*access_path=*/NULL_TREE, /*preserve_reference=*/false, tf_warning_or_error); } if (try_omp_private) { tree *v = omp_private_member_map->get (decl); if (v) ret = convert_from_reference (*v); } return ret; } /* If we are currently parsing a template and we encountered a typedef TYPEDEF_DECL that is being accessed though CONTEXT, this function adds the typedef to a list tied to the current template. At template instantiation time, that list is walked and access check performed for each typedef. LOCATION is the location of the usage point of TYPEDEF_DECL. */ void add_typedef_to_current_template_for_access_check (tree typedef_decl, tree context, location_t location) { tree template_info = NULL; tree cs = current_scope (); if (!is_typedef_decl (typedef_decl) || !context || !CLASS_TYPE_P (context) || !cs) return; if (CLASS_TYPE_P (cs) || TREE_CODE (cs) == FUNCTION_DECL) template_info = get_template_info (cs); if (template_info && TI_TEMPLATE (template_info) && !currently_open_class (context)) append_type_to_template_for_access_check (cs, typedef_decl, context, location); } /* DECL was the declaration to which a qualified-id resolved. Issue an error message if it is not accessible. If OBJECT_TYPE is non-NULL, we have just seen `x->' or `x.' and OBJECT_TYPE is the type of `*x', or `x', respectively. If the DECL was named as `A::B' then NESTED_NAME_SPECIFIER is `A'. */ void check_accessibility_of_qualified_id (tree decl, tree object_type, tree nested_name_specifier) { tree scope; tree qualifying_type = NULL_TREE; /* If we are parsing a template declaration and if decl is a typedef, add it to a list tied to the template. At template instantiation time, that list will be walked and access check performed. */ add_typedef_to_current_template_for_access_check (decl, nested_name_specifier ? nested_name_specifier : DECL_CONTEXT (decl), input_location); /* If we're not checking, return immediately. */ if (deferred_access_no_check) return; /* Determine the SCOPE of DECL. */ scope = context_for_name_lookup (decl); /* If the SCOPE is not a type, then DECL is not a member. */ if (!TYPE_P (scope)) return; /* Compute the scope through which DECL is being accessed. */ if (object_type /* OBJECT_TYPE might not be a class type; consider: class A { typedef int I; }; I *p; p->A::I::~I(); In this case, we will have "A::I" as the DECL, but "I" as the OBJECT_TYPE. */ && CLASS_TYPE_P (object_type) && DERIVED_FROM_P (scope, object_type)) /* If we are processing a `->' or `.' expression, use the type of the left-hand side. */ qualifying_type = object_type; else if (nested_name_specifier) { /* If the reference is to a non-static member of the current class, treat it as if it were referenced through `this'. */ tree ct; if (DECL_NONSTATIC_MEMBER_P (decl) && current_class_ptr && DERIVED_FROM_P (scope, ct = current_nonlambda_class_type ())) qualifying_type = ct; /* Otherwise, use the type indicated by the nested-name-specifier. */ else qualifying_type = nested_name_specifier; } else /* Otherwise, the name must be from the current class or one of its bases. */ qualifying_type = currently_open_derived_class (scope); if (qualifying_type /* It is possible for qualifying type to be a TEMPLATE_TYPE_PARM or similar in a default argument value. */ && CLASS_TYPE_P (qualifying_type) && !dependent_type_p (qualifying_type)) perform_or_defer_access_check (TYPE_BINFO (qualifying_type), decl, decl, tf_warning_or_error); } /* EXPR is the result of a qualified-id. The QUALIFYING_CLASS was the class named to the left of the "::" operator. DONE is true if this expression is a complete postfix-expression; it is false if this expression is followed by '->', '[', '(', etc. ADDRESS_P is true iff this expression is the operand of '&'. TEMPLATE_P is true iff the qualified-id was of the form "A::template B". TEMPLATE_ARG_P is true iff this qualified name appears as a template argument. */ tree finish_qualified_id_expr (tree qualifying_class, tree expr, bool done, bool address_p, bool template_p, bool template_arg_p, tsubst_flags_t complain) { gcc_assert (TYPE_P (qualifying_class)); if (error_operand_p (expr)) return error_mark_node; if ((DECL_P (expr) || BASELINK_P (expr)) && !mark_used (expr, complain)) return error_mark_node; if (template_p) { if (TREE_CODE (expr) == UNBOUND_CLASS_TEMPLATE) { /* cp_parser_lookup_name thought we were looking for a type, but we're actually looking for a declaration. */ qualifying_class = TYPE_CONTEXT (expr); expr = TYPE_IDENTIFIER (expr); } else check_template_keyword (expr); } /* If EXPR occurs as the operand of '&', use special handling that permits a pointer-to-member. */ if (address_p && done) { if (TREE_CODE (expr) == SCOPE_REF) expr = TREE_OPERAND (expr, 1); expr = build_offset_ref (qualifying_class, expr, /*address_p=*/true, complain); return expr; } /* No need to check access within an enum. */ if (TREE_CODE (qualifying_class) == ENUMERAL_TYPE && TREE_CODE (expr) != IDENTIFIER_NODE) return expr; /* Within the scope of a class, turn references to non-static members into expression of the form "this->...". */ if (template_arg_p) /* But, within a template argument, we do not want make the transformation, as there is no "this" pointer. */ ; else if (TREE_CODE (expr) == FIELD_DECL) { push_deferring_access_checks (dk_no_check); expr = finish_non_static_data_member (expr, NULL_TREE, qualifying_class); pop_deferring_access_checks (); } else if (BASELINK_P (expr)) { /* See if any of the functions are non-static members. */ /* If so, the expression may be relative to 'this'. */ if ((type_dependent_expression_p (expr) || !shared_member_p (expr)) && current_class_ptr && DERIVED_FROM_P (qualifying_class, current_nonlambda_class_type ())) expr = (build_class_member_access_expr (maybe_dummy_object (qualifying_class, NULL), expr, BASELINK_ACCESS_BINFO (expr), /*preserve_reference=*/false, complain)); else if (done) /* The expression is a qualified name whose address is not being taken. */ expr = build_offset_ref (qualifying_class, expr, /*address_p=*/false, complain); } else if (!template_p && TREE_CODE (expr) == TEMPLATE_DECL && !DECL_FUNCTION_TEMPLATE_P (expr)) { if (complain & tf_error) error ("%qE missing template arguments", expr); return error_mark_node; } else { /* In a template, return a SCOPE_REF for most qualified-ids so that we can check access at instantiation time. But if we're looking at a member of the current instantiation, we know we have access and building up the SCOPE_REF confuses non-type template argument handling. */ if (processing_template_decl && (!currently_open_class (qualifying_class) || TREE_CODE (expr) == IDENTIFIER_NODE || TREE_CODE (expr) == TEMPLATE_ID_EXPR || TREE_CODE (expr) == BIT_NOT_EXPR)) expr = build_qualified_name (TREE_TYPE (expr), qualifying_class, expr, template_p); else if (tree wrap = maybe_get_tls_wrapper_call (expr)) expr = wrap; expr = convert_from_reference (expr); } return expr; } /* Begin a statement-expression. The value returned must be passed to finish_stmt_expr. */ tree begin_stmt_expr (void) { return push_stmt_list (); } /* Process the final expression of a statement expression. EXPR can be NULL, if the final expression is empty. Return a STATEMENT_LIST containing all the statements in the statement-expression, or ERROR_MARK_NODE if there was an error. */ tree finish_stmt_expr_expr (tree expr, tree stmt_expr) { if (error_operand_p (expr)) { /* The type of the statement-expression is the type of the last expression. */ TREE_TYPE (stmt_expr) = error_mark_node; return error_mark_node; } /* If the last statement does not have "void" type, then the value of the last statement is the value of the entire expression. */ if (expr) { tree type = TREE_TYPE (expr); if (type && type_unknown_p (type)) { error ("a statement expression is an insufficient context" " for overload resolution"); TREE_TYPE (stmt_expr) = error_mark_node; return error_mark_node; } else if (processing_template_decl) { expr = build_stmt (input_location, EXPR_STMT, expr); expr = add_stmt (expr); /* Mark the last statement so that we can recognize it as such at template-instantiation time. */ EXPR_STMT_STMT_EXPR_RESULT (expr) = 1; } else if (VOID_TYPE_P (type)) { /* Just treat this like an ordinary statement. */ expr = finish_expr_stmt (expr); } else { /* It actually has a value we need to deal with. First, force it to be an rvalue so that we won't need to build up a copy constructor call later when we try to assign it to something. */ expr = force_rvalue (expr, tf_warning_or_error); if (error_operand_p (expr)) return error_mark_node; /* Update for array-to-pointer decay. */ type = TREE_TYPE (expr); /* Wrap it in a CLEANUP_POINT_EXPR and add it to the list like a normal statement, but don't convert to void or actually add the EXPR_STMT. */ if (TREE_CODE (expr) != CLEANUP_POINT_EXPR) expr = maybe_cleanup_point_expr (expr); add_stmt (expr); } /* The type of the statement-expression is the type of the last expression. */ TREE_TYPE (stmt_expr) = type; } return stmt_expr; } /* Finish a statement-expression. EXPR should be the value returned by the previous begin_stmt_expr. Returns an expression representing the statement-expression. */ tree finish_stmt_expr (tree stmt_expr, bool has_no_scope) { tree type; tree result; if (error_operand_p (stmt_expr)) { pop_stmt_list (stmt_expr); return error_mark_node; } gcc_assert (TREE_CODE (stmt_expr) == STATEMENT_LIST); type = TREE_TYPE (stmt_expr); result = pop_stmt_list (stmt_expr); TREE_TYPE (result) = type; if (processing_template_decl) { result = build_min (STMT_EXPR, type, result); TREE_SIDE_EFFECTS (result) = 1; STMT_EXPR_NO_SCOPE (result) = has_no_scope; } else if (CLASS_TYPE_P (type)) { /* Wrap the statement-expression in a TARGET_EXPR so that the temporary object created by the final expression is destroyed at the end of the full-expression containing the statement-expression. */ result = force_target_expr (type, result, tf_warning_or_error); } return result; } /* Returns the expression which provides the value of STMT_EXPR. */ tree stmt_expr_value_expr (tree stmt_expr) { tree t = STMT_EXPR_STMT (stmt_expr); if (TREE_CODE (t) == BIND_EXPR) t = BIND_EXPR_BODY (t); if (TREE_CODE (t) == STATEMENT_LIST && STATEMENT_LIST_TAIL (t)) t = STATEMENT_LIST_TAIL (t)->stmt; if (TREE_CODE (t) == EXPR_STMT) t = EXPR_STMT_EXPR (t); return t; } /* Return TRUE iff EXPR_STMT is an empty list of expression statements. */ bool empty_expr_stmt_p (tree expr_stmt) { tree body = NULL_TREE; if (expr_stmt == void_node) return true; if (expr_stmt) { if (TREE_CODE (expr_stmt) == EXPR_STMT) body = EXPR_STMT_EXPR (expr_stmt); else if (TREE_CODE (expr_stmt) == STATEMENT_LIST) body = expr_stmt; } if (body) { if (TREE_CODE (body) == STATEMENT_LIST) return tsi_end_p (tsi_start (body)); else return empty_expr_stmt_p (body); } return false; } /* Perform Koenig lookup. FN_EXPR is the postfix-expression representing the function (or functions) to call; ARGS are the arguments to the call. Returns the functions to be considered by overload resolution. */ cp_expr perform_koenig_lookup (cp_expr fn_expr, vec<tree, va_gc> *args, tsubst_flags_t complain) { tree identifier = NULL_TREE; tree functions = NULL_TREE; tree tmpl_args = NULL_TREE; bool template_id = false; location_t loc = fn_expr.get_location (); tree fn = fn_expr.get_value (); STRIP_ANY_LOCATION_WRAPPER (fn); if (TREE_CODE (fn) == TEMPLATE_ID_EXPR) { /* Use a separate flag to handle null args. */ template_id = true; tmpl_args = TREE_OPERAND (fn, 1); fn = TREE_OPERAND (fn, 0); } /* Find the name of the overloaded function. */ if (identifier_p (fn)) identifier = fn; else { functions = fn; identifier = OVL_NAME (functions); } /* A call to a namespace-scope function using an unqualified name. Do Koenig lookup -- unless any of the arguments are type-dependent. */ if (!any_type_dependent_arguments_p (args) && !any_dependent_template_arguments_p (tmpl_args)) { fn = lookup_arg_dependent (identifier, functions, args); if (!fn) { /* The unqualified name could not be resolved. */ if (complain & tf_error) fn = unqualified_fn_lookup_error (cp_expr (identifier, loc)); else fn = identifier; } } if (fn && template_id && fn != error_mark_node) fn = build2 (TEMPLATE_ID_EXPR, unknown_type_node, fn, tmpl_args); return cp_expr (fn, loc); } /* Generate an expression for `FN (ARGS)'. This may change the contents of ARGS. If DISALLOW_VIRTUAL is true, the call to FN will be not generated as a virtual call, even if FN is virtual. (This flag is set when encountering an expression where the function name is explicitly qualified. For example a call to `X::f' never generates a virtual call.) Returns code for the call. */ tree finish_call_expr (tree fn, vec<tree, va_gc> **args, bool disallow_virtual, bool koenig_p, tsubst_flags_t complain) { tree result; tree orig_fn; vec<tree, va_gc> *orig_args = *args; if (fn == error_mark_node) return error_mark_node; gcc_assert (!TYPE_P (fn)); /* If FN may be a FUNCTION_DECL obfuscated by force_paren_expr, undo it so that we can tell this is a call to a known function. */ fn = maybe_undo_parenthesized_ref (fn); STRIP_ANY_LOCATION_WRAPPER (fn); orig_fn = fn; if (processing_template_decl) { /* If FN is a local extern declaration or set thereof, look them up again at instantiation time. */ if (is_overloaded_fn (fn)) { tree ifn = get_first_fn (fn); if (TREE_CODE (ifn) == FUNCTION_DECL && DECL_LOCAL_FUNCTION_P (ifn)) orig_fn = DECL_NAME (ifn); } /* If the call expression is dependent, build a CALL_EXPR node with no type; type_dependent_expression_p recognizes expressions with no type as being dependent. */ if (type_dependent_expression_p (fn) || any_type_dependent_arguments_p (*args)) { result = build_min_nt_call_vec (orig_fn, *args); SET_EXPR_LOCATION (result, cp_expr_loc_or_input_loc (fn)); KOENIG_LOOKUP_P (result) = koenig_p; if (is_overloaded_fn (fn)) fn = get_fns (fn); if (cfun) { bool abnormal = true; for (lkp_iterator iter (fn); abnormal && iter; ++iter) { tree fndecl = STRIP_TEMPLATE (*iter); if (TREE_CODE (fndecl) != FUNCTION_DECL || !TREE_THIS_VOLATILE (fndecl)) abnormal = false; } /* FIXME: Stop warning about falling off end of non-void function. But this is wrong. Even if we only see no-return fns at this point, we could select a future-defined return fn during instantiation. Or vice-versa. */ if (abnormal) current_function_returns_abnormally = 1; } return result; } orig_args = make_tree_vector_copy (*args); if (!BASELINK_P (fn) && TREE_CODE (fn) != PSEUDO_DTOR_EXPR && TREE_TYPE (fn) != unknown_type_node) fn = build_non_dependent_expr (fn); make_args_non_dependent (*args); } if (TREE_CODE (fn) == COMPONENT_REF) { tree member = TREE_OPERAND (fn, 1); if (BASELINK_P (member)) { tree object = TREE_OPERAND (fn, 0); return build_new_method_call (object, member, args, NULL_TREE, (disallow_virtual ? LOOKUP_NORMAL | LOOKUP_NONVIRTUAL : LOOKUP_NORMAL), /*fn_p=*/NULL, complain); } } /* Per 13.3.1.1, '(&f)(...)' is the same as '(f)(...)'. */ if (TREE_CODE (fn) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (fn, 0)) == OVERLOAD) fn = TREE_OPERAND (fn, 0); if (is_overloaded_fn (fn)) fn = baselink_for_fns (fn); result = NULL_TREE; if (BASELINK_P (fn)) { tree object; /* A call to a member function. From [over.call.func]: If the keyword this is in scope and refers to the class of that member function, or a derived class thereof, then the function call is transformed into a qualified function call using (*this) as the postfix-expression to the left of the . operator.... [Otherwise] a contrived object of type T becomes the implied object argument. In this situation: struct A { void f(); }; struct B : public A {}; struct C : public A { void g() { B::f(); }}; "the class of that member function" refers to `A'. But 11.2 [class.access.base] says that we need to convert 'this' to B* as part of the access, so we pass 'B' to maybe_dummy_object. */ if (DECL_MAYBE_IN_CHARGE_CONSTRUCTOR_P (get_first_fn (fn))) { /* A constructor call always uses a dummy object. (This constructor call which has the form A::A () is actually invalid and we are going to reject it later in build_new_method_call.) */ object = build_dummy_object (BINFO_TYPE (BASELINK_ACCESS_BINFO (fn))); } else object = maybe_dummy_object (BINFO_TYPE (BASELINK_ACCESS_BINFO (fn)), NULL); result = build_new_method_call (object, fn, args, NULL_TREE, (disallow_virtual ? LOOKUP_NORMAL|LOOKUP_NONVIRTUAL : LOOKUP_NORMAL), /*fn_p=*/NULL, complain); } else if (concept_check_p (fn)) { /* FN is actually a template-id referring to a concept definition. */ tree id = unpack_concept_check (fn); tree tmpl = TREE_OPERAND (id, 0); tree args = TREE_OPERAND (id, 1); if (!function_concept_p (tmpl)) { error_at (EXPR_LOC_OR_LOC (fn, input_location), "cannot call a concept as a function"); return error_mark_node; } /* Ensure the result is wrapped as a call expression. */ result = build_concept_check (tmpl, args, tf_warning_or_error); } else if (is_overloaded_fn (fn)) { /* If the function is an overloaded builtin, resolve it. */ if (TREE_CODE (fn) == FUNCTION_DECL && (DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL || DECL_BUILT_IN_CLASS (fn) == BUILT_IN_MD)) result = resolve_overloaded_builtin (input_location, fn, *args); if (!result) { if (warn_sizeof_pointer_memaccess && (complain & tf_warning) && !vec_safe_is_empty (*args) && !processing_template_decl) { location_t sizeof_arg_loc[3]; tree sizeof_arg[3]; unsigned int i; for (i = 0; i < 3; i++) { tree t; sizeof_arg_loc[i] = UNKNOWN_LOCATION; sizeof_arg[i] = NULL_TREE; if (i >= (*args)->length ()) continue; t = (**args)[i]; if (TREE_CODE (t) != SIZEOF_EXPR) continue; if (SIZEOF_EXPR_TYPE_P (t)) sizeof_arg[i] = TREE_TYPE (TREE_OPERAND (t, 0)); else sizeof_arg[i] = TREE_OPERAND (t, 0); sizeof_arg_loc[i] = EXPR_LOCATION (t); } sizeof_pointer_memaccess_warning (sizeof_arg_loc, fn, *args, sizeof_arg, same_type_ignoring_top_level_qualifiers_p); } if ((complain & tf_warning) && TREE_CODE (fn) == FUNCTION_DECL && fndecl_built_in_p (fn, BUILT_IN_MEMSET) && vec_safe_length (*args) == 3 && !any_type_dependent_arguments_p (*args)) { tree arg0 = (*orig_args)[0]; tree arg1 = (*orig_args)[1]; tree arg2 = (*orig_args)[2]; int literal_mask = ((literal_integer_zerop (arg1) << 1) | (literal_integer_zerop (arg2) << 2)); warn_for_memset (input_location, arg0, arg2, literal_mask); } /* A call to a namespace-scope function. */ result = build_new_function_call (fn, args, complain); } } else if (TREE_CODE (fn) == PSEUDO_DTOR_EXPR) { if (!vec_safe_is_empty (*args)) error ("arguments to destructor are not allowed"); /* Mark the pseudo-destructor call as having side-effects so that we do not issue warnings about its use. */ result = build1 (NOP_EXPR, void_type_node, TREE_OPERAND (fn, 0)); TREE_SIDE_EFFECTS (result) = 1; } else if (CLASS_TYPE_P (TREE_TYPE (fn))) /* If the "function" is really an object of class type, it might have an overloaded `operator ()'. */ result = build_op_call (fn, args, complain); if (!result) /* A call where the function is unknown. */ result = cp_build_function_call_vec (fn, args, complain); if (processing_template_decl && result != error_mark_node) { if (INDIRECT_REF_P (result)) result = TREE_OPERAND (result, 0); result = build_call_vec (TREE_TYPE (result), orig_fn, orig_args); SET_EXPR_LOCATION (result, input_location); KOENIG_LOOKUP_P (result) = koenig_p; release_tree_vector (orig_args); result = convert_from_reference (result); } return result; } /* Finish a call to a postfix increment or decrement or EXPR. (Which is indicated by CODE, which should be POSTINCREMENT_EXPR or POSTDECREMENT_EXPR.) */ cp_expr finish_increment_expr (cp_expr expr, enum tree_code code) { /* input_location holds the location of the trailing operator token. Build a location of the form: expr++ ~~~~^~ with the caret at the operator token, ranging from the start of EXPR to the end of the operator token. */ location_t combined_loc = make_location (input_location, expr.get_start (), get_finish (input_location)); cp_expr result = build_x_unary_op (combined_loc, code, expr, tf_warning_or_error); /* TODO: build_x_unary_op doesn't honor the location, so set it here. */ result.set_location (combined_loc); return result; } /* Finish a use of `this'. Returns an expression for `this'. */ tree finish_this_expr (void) { tree result = NULL_TREE; if (current_class_ptr) { tree type = TREE_TYPE (current_class_ref); /* In a lambda expression, 'this' refers to the captured 'this'. */ if (LAMBDA_TYPE_P (type)) result = lambda_expr_this_capture (CLASSTYPE_LAMBDA_EXPR (type), true); else result = current_class_ptr; } if (result) /* The keyword 'this' is a prvalue expression. */ return rvalue (result); tree fn = current_nonlambda_function (); if (fn && DECL_STATIC_FUNCTION_P (fn)) error ("%<this%> is unavailable for static member functions"); else if (fn) error ("invalid use of %<this%> in non-member function"); else error ("invalid use of %<this%> at top level"); return error_mark_node; } /* Finish a pseudo-destructor expression. If SCOPE is NULL, the expression was of the form `OBJECT.~DESTRUCTOR' where DESTRUCTOR is the TYPE for the type given. If SCOPE is non-NULL, the expression was of the form `OBJECT.SCOPE::~DESTRUCTOR'. */ tree finish_pseudo_destructor_expr (tree object, tree scope, tree destructor, location_t loc) { if (object == error_mark_node || destructor == error_mark_node) return error_mark_node; gcc_assert (TYPE_P (destructor)); if (!processing_template_decl) { if (scope == error_mark_node) { error_at (loc, "invalid qualifying scope in pseudo-destructor name"); return error_mark_node; } if (is_auto (destructor)) destructor = TREE_TYPE (object); if (scope && TYPE_P (scope) && !check_dtor_name (scope, destructor)) { error_at (loc, "qualified type %qT does not match destructor name ~%qT", scope, destructor); return error_mark_node; } /* [expr.pseudo] says both: The type designated by the pseudo-destructor-name shall be the same as the object type. and: The cv-unqualified versions of the object type and of the type designated by the pseudo-destructor-name shall be the same type. We implement the more generous second sentence, since that is what most other compilers do. */ if (!same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (object), destructor)) { error_at (loc, "%qE is not of type %qT", object, destructor); return error_mark_node; } } return build3_loc (loc, PSEUDO_DTOR_EXPR, void_type_node, object, scope, destructor); } /* Finish an expression of the form CODE EXPR. */ cp_expr finish_unary_op_expr (location_t op_loc, enum tree_code code, cp_expr expr, tsubst_flags_t complain) { /* Build a location of the form: ++expr ^~~~~~ with the caret at the operator token, ranging from the start of the operator token to the end of EXPR. */ location_t combined_loc = make_location (op_loc, op_loc, expr.get_finish ()); cp_expr result = build_x_unary_op (combined_loc, code, expr, complain); /* TODO: build_x_unary_op doesn't always honor the location. */ result.set_location (combined_loc); if (result == error_mark_node) return result; if (!(complain & tf_warning)) return result; tree result_ovl = result; tree expr_ovl = expr; if (!processing_template_decl) expr_ovl = cp_fully_fold (expr_ovl); if (!CONSTANT_CLASS_P (expr_ovl) || TREE_OVERFLOW_P (expr_ovl)) return result; if (!processing_template_decl) result_ovl = cp_fully_fold (result_ovl); if (CONSTANT_CLASS_P (result_ovl) && TREE_OVERFLOW_P (result_ovl)) overflow_warning (combined_loc, result_ovl); return result; } /* Finish a compound-literal expression or C++11 functional cast with aggregate initializer. TYPE is the type to which the CONSTRUCTOR in COMPOUND_LITERAL is being cast. */ tree finish_compound_literal (tree type, tree compound_literal, tsubst_flags_t complain, fcl_t fcl_context) { if (type == error_mark_node) return error_mark_node; if (TYPE_REF_P (type)) { compound_literal = finish_compound_literal (TREE_TYPE (type), compound_literal, complain, fcl_context); /* The prvalue is then used to direct-initialize the reference. */ tree r = (perform_implicit_conversion_flags (type, compound_literal, complain, LOOKUP_NORMAL)); return convert_from_reference (r); } if (!TYPE_OBJ_P (type)) { if (complain & tf_error) error ("compound literal of non-object type %qT", type); return error_mark_node; } if (tree anode = type_uses_auto (type)) if (CLASS_PLACEHOLDER_TEMPLATE (anode)) { type = do_auto_deduction (type, compound_literal, anode, complain, adc_variable_type); if (type == error_mark_node) return error_mark_node; } /* Used to hold a copy of the compound literal in a template. */ tree orig_cl = NULL_TREE; if (processing_template_decl) { const bool dependent_p = (instantiation_dependent_expression_p (compound_literal) || dependent_type_p (type)); if (dependent_p) /* We're about to return, no need to copy. */ orig_cl = compound_literal; else /* We're going to need a copy. */ orig_cl = unshare_constructor (compound_literal); TREE_TYPE (orig_cl) = type; /* Mark the expression as a compound literal. */ TREE_HAS_CONSTRUCTOR (orig_cl) = 1; /* And as instantiation-dependent. */ CONSTRUCTOR_IS_DEPENDENT (orig_cl) = dependent_p; if (fcl_context == fcl_c99) CONSTRUCTOR_C99_COMPOUND_LITERAL (orig_cl) = 1; /* If the compound literal is dependent, we're done for now. */ if (dependent_p) return orig_cl; /* Otherwise, do go on to e.g. check narrowing. */ } type = complete_type (type); if (TYPE_NON_AGGREGATE_CLASS (type)) { /* Trying to deal with a CONSTRUCTOR instead of a TREE_LIST everywhere that deals with function arguments would be a pain, so just wrap it in a TREE_LIST. The parser set a flag so we know that it came from T{} rather than T({}). */ CONSTRUCTOR_IS_DIRECT_INIT (compound_literal) = 1; compound_literal = build_tree_list (NULL_TREE, compound_literal); return build_functional_cast (input_location, type, compound_literal, complain); } if (TREE_CODE (type) == ARRAY_TYPE && check_array_initializer (NULL_TREE, type, compound_literal)) return error_mark_node; compound_literal = reshape_init (type, compound_literal, complain); if (SCALAR_TYPE_P (type) && !BRACE_ENCLOSED_INITIALIZER_P (compound_literal)) { tree t = instantiate_non_dependent_expr_sfinae (compound_literal, complain); if (!check_narrowing (type, t, complain)) return error_mark_node; } if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE) { cp_complete_array_type_or_error (&type, compound_literal, false, complain); if (type == error_mark_node) return error_mark_node; } compound_literal = digest_init_flags (type, compound_literal, LOOKUP_NORMAL | LOOKUP_NO_NARROWING, complain); if (compound_literal == error_mark_node) return error_mark_node; /* If we're in a template, return the original compound literal. */ if (orig_cl) { if (!VECTOR_TYPE_P (type)) return get_target_expr_sfinae (orig_cl, complain); else return orig_cl; } if (TREE_CODE (compound_literal) == CONSTRUCTOR) { TREE_HAS_CONSTRUCTOR (compound_literal) = true; if (fcl_context == fcl_c99) CONSTRUCTOR_C99_COMPOUND_LITERAL (compound_literal) = 1; } /* Put static/constant array temporaries in static variables. */ /* FIXME all C99 compound literals should be variables rather than C++ temporaries, unless they are used as an aggregate initializer. */ if ((!at_function_scope_p () || CP_TYPE_CONST_P (type)) && fcl_context == fcl_c99 && TREE_CODE (type) == ARRAY_TYPE && !TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type) && initializer_constant_valid_p (compound_literal, type)) { tree decl = create_temporary_var (type); DECL_INITIAL (decl) = compound_literal; TREE_STATIC (decl) = 1; if (literal_type_p (type) && CP_TYPE_CONST_NON_VOLATILE_P (type)) { /* 5.19 says that a constant expression can include an lvalue-rvalue conversion applied to "a glvalue of literal type that refers to a non-volatile temporary object initialized with a constant expression". Rather than try to communicate that this VAR_DECL is a temporary, just mark it constexpr. */ DECL_DECLARED_CONSTEXPR_P (decl) = true; DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl) = true; TREE_CONSTANT (decl) = true; } cp_apply_type_quals_to_decl (cp_type_quals (type), decl); decl = pushdecl_top_level (decl); DECL_NAME (decl) = make_anon_name (); SET_DECL_ASSEMBLER_NAME (decl, DECL_NAME (decl)); /* Make sure the destructor is callable. */ tree clean = cxx_maybe_build_cleanup (decl, complain); if (clean == error_mark_node) return error_mark_node; return decl; } /* Represent other compound literals with TARGET_EXPR so we produce an lvalue, but can elide copies. */ if (!VECTOR_TYPE_P (type)) compound_literal = get_target_expr_sfinae (compound_literal, complain); return compound_literal; } /* Return the declaration for the function-name variable indicated by ID. */ tree finish_fname (tree id) { tree decl; decl = fname_decl (input_location, C_RID_CODE (id), id); if (processing_template_decl && current_function_decl && decl != error_mark_node) decl = DECL_NAME (decl); return decl; } /* Finish a translation unit. */ void finish_translation_unit (void) { /* In case there were missing closebraces, get us back to the global binding level. */ pop_everything (); while (current_namespace != global_namespace) pop_namespace (); /* Do file scope __FUNCTION__ et al. */ finish_fname_decls (); if (scope_chain->omp_declare_target_attribute) { if (!errorcount) error ("%<#pragma omp declare target%> without corresponding " "%<#pragma omp end declare target%>"); scope_chain->omp_declare_target_attribute = 0; } } /* Finish a template type parameter, specified as AGGR IDENTIFIER. Returns the parameter. */ tree finish_template_type_parm (tree aggr, tree identifier) { if (aggr != class_type_node) { permerror (input_location, "template type parameters must use the keyword %<class%> or %<typename%>"); aggr = class_type_node; } return build_tree_list (aggr, identifier); } /* Finish a template template parameter, specified as AGGR IDENTIFIER. Returns the parameter. */ tree finish_template_template_parm (tree aggr, tree identifier) { tree decl = build_decl (input_location, TYPE_DECL, identifier, NULL_TREE); tree tmpl = build_lang_decl (TEMPLATE_DECL, identifier, NULL_TREE); DECL_TEMPLATE_PARMS (tmpl) = current_template_parms; DECL_TEMPLATE_RESULT (tmpl) = decl; DECL_ARTIFICIAL (decl) = 1; /* Associate the constraints with the underlying declaration, not the template. */ tree reqs = TEMPLATE_PARMS_CONSTRAINTS (current_template_parms); tree constr = build_constraints (reqs, NULL_TREE); set_constraints (decl, constr); end_template_decl (); gcc_assert (DECL_TEMPLATE_PARMS (tmpl)); check_default_tmpl_args (decl, DECL_TEMPLATE_PARMS (tmpl), /*is_primary=*/true, /*is_partial=*/false, /*is_friend=*/0); return finish_template_type_parm (aggr, tmpl); } /* ARGUMENT is the default-argument value for a template template parameter. If ARGUMENT is invalid, issue error messages and return the ERROR_MARK_NODE. Otherwise, ARGUMENT itself is returned. */ tree check_template_template_default_arg (tree argument) { if (TREE_CODE (argument) != TEMPLATE_DECL && TREE_CODE (argument) != TEMPLATE_TEMPLATE_PARM && TREE_CODE (argument) != UNBOUND_CLASS_TEMPLATE) { if (TREE_CODE (argument) == TYPE_DECL) error ("invalid use of type %qT as a default value for a template " "template-parameter", TREE_TYPE (argument)); else error ("invalid default argument for a template template parameter"); return error_mark_node; } return argument; } /* Begin a class definition, as indicated by T. */ tree begin_class_definition (tree t) { if (error_operand_p (t) || error_operand_p (TYPE_MAIN_DECL (t))) return error_mark_node; if (processing_template_parmlist && !LAMBDA_TYPE_P (t)) { error ("definition of %q#T inside template parameter list", t); return error_mark_node; } /* According to the C++ ABI, decimal classes defined in ISO/IEC TR 24733 are passed the same as decimal scalar types. */ if (TREE_CODE (t) == RECORD_TYPE && !processing_template_decl) { tree ns = TYPE_CONTEXT (t); if (ns && TREE_CODE (ns) == NAMESPACE_DECL && DECL_CONTEXT (ns) == std_node && DECL_NAME (ns) && id_equal (DECL_NAME (ns), "decimal")) { const char *n = TYPE_NAME_STRING (t); if ((strcmp (n, "decimal32") == 0) || (strcmp (n, "decimal64") == 0) || (strcmp (n, "decimal128") == 0)) TYPE_TRANSPARENT_AGGR (t) = 1; } } /* A non-implicit typename comes from code like: template <typename T> struct A { template <typename U> struct A<T>::B ... This is erroneous. */ else if (TREE_CODE (t) == TYPENAME_TYPE) { error ("invalid definition of qualified type %qT", t); t = error_mark_node; } if (t == error_mark_node || ! MAYBE_CLASS_TYPE_P (t)) { t = make_class_type (RECORD_TYPE); pushtag (make_anon_name (), t, /*tag_scope=*/ts_current); } if (TYPE_BEING_DEFINED (t)) { t = make_class_type (TREE_CODE (t)); pushtag (TYPE_IDENTIFIER (t), t, /*tag_scope=*/ts_current); } maybe_process_partial_specialization (t); pushclass (t); TYPE_BEING_DEFINED (t) = 1; class_binding_level->defining_class_p = 1; if (flag_pack_struct) { tree v; TYPE_PACKED (t) = 1; /* Even though the type is being defined for the first time here, there might have been a forward declaration, so there might be cv-qualified variants of T. */ for (v = TYPE_NEXT_VARIANT (t); v; v = TYPE_NEXT_VARIANT (v)) TYPE_PACKED (v) = 1; } /* Reset the interface data, at the earliest possible moment, as it might have been set via a class foo; before. */ if (! TYPE_UNNAMED_P (t)) { struct c_fileinfo *finfo = \ get_fileinfo (LOCATION_FILE (input_location)); CLASSTYPE_INTERFACE_ONLY (t) = finfo->interface_only; SET_CLASSTYPE_INTERFACE_UNKNOWN_X (t, finfo->interface_unknown); } reset_specialization(); /* Make a declaration for this class in its own scope. */ build_self_reference (); return t; } /* Finish the member declaration given by DECL. */ void finish_member_declaration (tree decl) { if (decl == error_mark_node || decl == NULL_TREE) return; if (decl == void_type_node) /* The COMPONENT was a friend, not a member, and so there's nothing for us to do. */ return; /* We should see only one DECL at a time. */ gcc_assert (DECL_CHAIN (decl) == NULL_TREE); /* Don't add decls after definition. */ gcc_assert (TYPE_BEING_DEFINED (current_class_type) /* We can add lambda types when late parsing default arguments. */ || LAMBDA_TYPE_P (TREE_TYPE (decl))); /* Set up access control for DECL. */ TREE_PRIVATE (decl) = (current_access_specifier == access_private_node); TREE_PROTECTED (decl) = (current_access_specifier == access_protected_node); if (TREE_CODE (decl) == TEMPLATE_DECL) { TREE_PRIVATE (DECL_TEMPLATE_RESULT (decl)) = TREE_PRIVATE (decl); TREE_PROTECTED (DECL_TEMPLATE_RESULT (decl)) = TREE_PROTECTED (decl); } /* Mark the DECL as a member of the current class, unless it's a member of an enumeration. */ if (TREE_CODE (decl) != CONST_DECL) DECL_CONTEXT (decl) = current_class_type; if (TREE_CODE (decl) == USING_DECL) /* For now, ignore class-scope USING_DECLS, so that debugging backends do not see them. */ DECL_IGNORED_P (decl) = 1; /* Check for bare parameter packs in the non-static data member declaration. */ if (TREE_CODE (decl) == FIELD_DECL) { if (check_for_bare_parameter_packs (TREE_TYPE (decl))) TREE_TYPE (decl) = error_mark_node; if (check_for_bare_parameter_packs (DECL_ATTRIBUTES (decl))) DECL_ATTRIBUTES (decl) = NULL_TREE; } /* [dcl.link] A C language linkage is ignored for the names of class members and the member function type of class member functions. */ if (DECL_LANG_SPECIFIC (decl)) SET_DECL_LANGUAGE (decl, lang_cplusplus); bool add = false; /* Functions and non-functions are added differently. */ if (DECL_DECLARES_FUNCTION_P (decl)) add = add_method (current_class_type, decl, false); /* Enter the DECL into the scope of the class, if the class isn't a closure (whose fields are supposed to be unnamed). */ else if (CLASSTYPE_LAMBDA_EXPR (current_class_type) || pushdecl_class_level (decl)) add = true; if (add) { /* All TYPE_DECLs go at the end of TYPE_FIELDS. Ordinary fields go at the beginning. The reason is that legacy_nonfn_member_lookup searches the list in order, and we want a field name to override a type name so that the "struct stat hack" will work. In particular: struct S { enum E { }; static const int E = 5; int ary[S::E]; } s; is valid. */ if (TREE_CODE (decl) == TYPE_DECL) TYPE_FIELDS (current_class_type) = chainon (TYPE_FIELDS (current_class_type), decl); else { DECL_CHAIN (decl) = TYPE_FIELDS (current_class_type); TYPE_FIELDS (current_class_type) = decl; } maybe_add_class_template_decl_list (current_class_type, decl, /*friend_p=*/0); } } /* Finish processing a complete template declaration. The PARMS are the template parameters. */ void finish_template_decl (tree parms) { if (parms) end_template_decl (); else end_specialization (); } // Returns the template type of the class scope being entered. If we're // entering a constrained class scope. TYPE is the class template // scope being entered and we may need to match the intended type with // a constrained specialization. For example: // // template<Object T> // struct S { void f(); }; #1 // // template<Object T> // void S<T>::f() { } #2 // // We check, in #2, that S<T> refers precisely to the type declared by // #1 (i.e., that the constraints match). Note that the following should // be an error since there is no specialization of S<T> that is // unconstrained, but this is not diagnosed here. // // template<typename T> // void S<T>::f() { } // // We cannot diagnose this problem here since this function also matches // qualified template names that are not part of a definition. For example: // // template<Integral T, Floating_point U> // typename pair<T, U>::first_type void f(T, U); // // Here, it is unlikely that there is a partial specialization of // pair constrained for for Integral and Floating_point arguments. // // The general rule is: if a constrained specialization with matching // constraints is found return that type. Also note that if TYPE is not a // class-type (e.g. a typename type), then no fixup is needed. static tree fixup_template_type (tree type) { // Find the template parameter list at the a depth appropriate to // the scope we're trying to enter. tree parms = current_template_parms; int depth = template_class_depth (type); for (int n = processing_template_decl; n > depth && parms; --n) parms = TREE_CHAIN (parms); if (!parms) return type; tree cur_reqs = TEMPLATE_PARMS_CONSTRAINTS (parms); tree cur_constr = build_constraints (cur_reqs, NULL_TREE); // Search for a specialization whose type and constraints match. tree tmpl = CLASSTYPE_TI_TEMPLATE (type); tree specs = DECL_TEMPLATE_SPECIALIZATIONS (tmpl); while (specs) { tree spec_constr = get_constraints (TREE_VALUE (specs)); // If the type and constraints match a specialization, then we // are entering that type. if (same_type_p (type, TREE_TYPE (specs)) && equivalent_constraints (cur_constr, spec_constr)) return TREE_TYPE (specs); specs = TREE_CHAIN (specs); } // If no specialization matches, then must return the type // previously found. return type; } /* Finish processing a template-id (which names a type) of the form NAME < ARGS >. Return the TYPE_DECL for the type named by the template-id. If ENTERING_SCOPE is nonzero we are about to enter the scope of template-id indicated. */ tree finish_template_type (tree name, tree args, int entering_scope) { tree type; type = lookup_template_class (name, args, NULL_TREE, NULL_TREE, entering_scope, tf_warning_or_error | tf_user); /* If we might be entering the scope of a partial specialization, find the one with the right constraints. */ if (flag_concepts && entering_scope && CLASS_TYPE_P (type) && CLASSTYPE_TEMPLATE_INFO (type) && dependent_type_p (type) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (type))) type = fixup_template_type (type); if (type == error_mark_node) return type; else if (CLASS_TYPE_P (type) && !alias_type_or_template_p (type)) return TYPE_STUB_DECL (type); else return TYPE_NAME (type); } /* Finish processing a BASE_CLASS with the indicated ACCESS_SPECIFIER. Return a TREE_LIST containing the ACCESS_SPECIFIER and the BASE_CLASS, or NULL_TREE if an error occurred. The ACCESS_SPECIFIER is one of access_{default,public,protected_private}_node. For a virtual base we set TREE_TYPE. */ tree finish_base_specifier (tree base, tree access, bool virtual_p) { tree result; if (base == error_mark_node) { error ("invalid base-class specification"); result = NULL_TREE; } else if (! MAYBE_CLASS_TYPE_P (base)) { error ("%qT is not a class type", base); result = NULL_TREE; } else { if (cp_type_quals (base) != 0) { /* DR 484: Can a base-specifier name a cv-qualified class type? */ base = TYPE_MAIN_VARIANT (base); } result = build_tree_list (access, base); if (virtual_p) TREE_TYPE (result) = integer_type_node; } return result; } /* If FNS is a member function, a set of member functions, or a template-id referring to one or more member functions, return a BASELINK for FNS, incorporating the current access context. Otherwise, return FNS unchanged. */ tree baselink_for_fns (tree fns) { tree scope; tree cl; if (BASELINK_P (fns) || error_operand_p (fns)) return fns; scope = ovl_scope (fns); if (!CLASS_TYPE_P (scope)) return fns; cl = currently_open_derived_class (scope); if (!cl) cl = scope; cl = TYPE_BINFO (cl); return build_baselink (cl, cl, fns, /*optype=*/NULL_TREE); } /* Returns true iff DECL is a variable from a function outside the current one. */ static bool outer_var_p (tree decl) { return ((VAR_P (decl) || TREE_CODE (decl) == PARM_DECL) && DECL_FUNCTION_SCOPE_P (decl) /* Don't get confused by temporaries. */ && DECL_NAME (decl) && (DECL_CONTEXT (decl) != current_function_decl || parsing_nsdmi ())); } /* As above, but also checks that DECL is automatic. */ bool outer_automatic_var_p (tree decl) { return (outer_var_p (decl) && !TREE_STATIC (decl)); } /* DECL satisfies outer_automatic_var_p. Possibly complain about it or rewrite it for lambda capture. If ODR_USE is true, we're being called from mark_use, and we complain about use of constant variables. If ODR_USE is false, we're being called for the id-expression, and we do lambda capture. */ tree process_outer_var_ref (tree decl, tsubst_flags_t complain, bool odr_use) { if (cp_unevaluated_operand) { tree type = TREE_TYPE (decl); if (!dependent_type_p (type) && variably_modified_type_p (type, NULL_TREE)) /* VLAs are used even in unevaluated context. */; else /* It's not a use (3.2) if we're in an unevaluated context. */ return decl; } if (decl == error_mark_node) return decl; tree context = DECL_CONTEXT (decl); tree containing_function = current_function_decl; tree lambda_stack = NULL_TREE; tree lambda_expr = NULL_TREE; tree initializer = convert_from_reference (decl); /* Mark it as used now even if the use is ill-formed. */ if (!mark_used (decl, complain)) return error_mark_node; if (parsing_nsdmi ()) containing_function = NULL_TREE; if (containing_function && LAMBDA_FUNCTION_P (containing_function)) { /* Check whether we've already built a proxy. */ tree var = decl; while (is_normal_capture_proxy (var)) var = DECL_CAPTURED_VARIABLE (var); tree d = retrieve_local_specialization (var); if (d && d != decl && is_capture_proxy (d)) { if (DECL_CONTEXT (d) == containing_function) /* We already have an inner proxy. */ return d; else /* We need to capture an outer proxy. */ return process_outer_var_ref (d, complain, odr_use); } } /* If we are in a lambda function, we can move out until we hit 1. the context, 2. a non-lambda function, or 3. a non-default capturing lambda function. */ while (context != containing_function /* containing_function can be null with invalid generic lambdas. */ && containing_function && LAMBDA_FUNCTION_P (containing_function)) { tree closure = DECL_CONTEXT (containing_function); lambda_expr = CLASSTYPE_LAMBDA_EXPR (closure); if (TYPE_CLASS_SCOPE_P (closure)) /* A lambda in an NSDMI (c++/64496). */ break; if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_NONE) break; lambda_stack = tree_cons (NULL_TREE, lambda_expr, lambda_stack); containing_function = decl_function_context (containing_function); } /* In a lambda within a template, wait until instantiation time to implicitly capture a parameter pack. We want to wait because we don't know if we're capturing the whole pack or a single element, and it's OK to wait because find_parameter_packs_r walks into the lambda body. */ if (context == containing_function && DECL_PACK_P (decl)) return decl; if (lambda_expr && VAR_P (decl) && DECL_ANON_UNION_VAR_P (decl)) { if (complain & tf_error) error ("cannot capture member %qD of anonymous union", decl); return error_mark_node; } /* Do lambda capture when processing the id-expression, not when odr-using a variable. */ if (!odr_use && context == containing_function) decl = add_default_capture (lambda_stack, /*id=*/DECL_NAME (decl), initializer); /* Only an odr-use of an outer automatic variable causes an error, and a constant variable can decay to a prvalue constant without odr-use. So don't complain yet. */ else if (!odr_use && decl_constant_var_p (decl)) return decl; else if (lambda_expr) { if (complain & tf_error) { error ("%qD is not captured", decl); tree closure = LAMBDA_EXPR_CLOSURE (lambda_expr); if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_NONE) inform (location_of (closure), "the lambda has no capture-default"); else if (TYPE_CLASS_SCOPE_P (closure)) inform (UNKNOWN_LOCATION, "lambda in local class %q+T cannot " "capture variables from the enclosing context", TYPE_CONTEXT (closure)); inform (DECL_SOURCE_LOCATION (decl), "%q#D declared here", decl); } return error_mark_node; } else { if (complain & tf_error) { error (VAR_P (decl) ? G_("use of local variable with automatic storage from " "containing function") : G_("use of parameter from containing function")); inform (DECL_SOURCE_LOCATION (decl), "%q#D declared here", decl); } return error_mark_node; } return decl; } /* ID_EXPRESSION is a representation of parsed, but unprocessed, id-expression. (See cp_parser_id_expression for details.) SCOPE, if non-NULL, is the type or namespace used to explicitly qualify ID_EXPRESSION. DECL is the entity to which that name has been resolved. *CONSTANT_EXPRESSION_P is true if we are presently parsing a constant-expression. In that case, *NON_CONSTANT_EXPRESSION_P will be set to true if this expression isn't permitted in a constant-expression, but it is otherwise not set by this function. *ALLOW_NON_CONSTANT_EXPRESSION_P is true if we are parsing a constant-expression, but a non-constant expression is also permissible. DONE is true if this expression is a complete postfix-expression; it is false if this expression is followed by '->', '[', '(', etc. ADDRESS_P is true iff this expression is the operand of '&'. TEMPLATE_P is true iff the qualified-id was of the form "A::template B". TEMPLATE_ARG_P is true iff this qualified name appears as a template argument. If an error occurs, and it is the kind of error that might cause the parser to abort a tentative parse, *ERROR_MSG is filled in. It is the caller's responsibility to issue the message. *ERROR_MSG will be a string with static storage duration, so the caller need not "free" it. Return an expression for the entity, after issuing appropriate diagnostics. This function is also responsible for transforming a reference to a non-static member into a COMPONENT_REF that makes the use of "this" explicit. Upon return, *IDK will be filled in appropriately. */ static cp_expr finish_id_expression_1 (tree id_expression, tree decl, tree scope, cp_id_kind *idk, bool integral_constant_expression_p, bool allow_non_integral_constant_expression_p, bool *non_integral_constant_expression_p, bool template_p, bool done, bool address_p, bool template_arg_p, const char **error_msg, location_t location) { decl = strip_using_decl (decl); /* Initialize the output parameters. */ *idk = CP_ID_KIND_NONE; *error_msg = NULL; if (id_expression == error_mark_node) return error_mark_node; /* If we have a template-id, then no further lookup is required. If the template-id was for a template-class, we will sometimes have a TYPE_DECL at this point. */ else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR || TREE_CODE (decl) == TYPE_DECL) ; /* Look up the name. */ else { if (decl == error_mark_node) { /* Name lookup failed. */ if (scope && (!TYPE_P (scope) || (!dependent_type_p (scope) && !(identifier_p (id_expression) && IDENTIFIER_CONV_OP_P (id_expression) && dependent_type_p (TREE_TYPE (id_expression)))))) { /* If the qualifying type is non-dependent (and the name does not name a conversion operator to a dependent type), issue an error. */ qualified_name_lookup_error (scope, id_expression, decl, location); return error_mark_node; } else if (!scope) { /* It may be resolved via Koenig lookup. */ *idk = CP_ID_KIND_UNQUALIFIED; return id_expression; } else decl = id_expression; } /* Remember that the name was used in the definition of the current class so that we can check later to see if the meaning would have been different after the class was entirely defined. */ if (!scope && decl != error_mark_node && identifier_p (id_expression)) maybe_note_name_used_in_class (id_expression, decl); /* A use in unevaluated operand might not be instantiated appropriately if tsubst_copy builds a dummy parm, or if we never instantiate a generic lambda, so mark it now. */ if (processing_template_decl && cp_unevaluated_operand) mark_type_use (decl); /* Disallow uses of local variables from containing functions, except within lambda-expressions. */ if (outer_automatic_var_p (decl)) { decl = process_outer_var_ref (decl, tf_warning_or_error); if (decl == error_mark_node) return error_mark_node; } /* Also disallow uses of function parameters outside the function body, except inside an unevaluated context (i.e. decltype). */ if (TREE_CODE (decl) == PARM_DECL && DECL_CONTEXT (decl) == NULL_TREE && !cp_unevaluated_operand) { *error_msg = G_("use of parameter outside function body"); return error_mark_node; } } /* If we didn't find anything, or what we found was a type, then this wasn't really an id-expression. */ if (TREE_CODE (decl) == TEMPLATE_DECL && !DECL_FUNCTION_TEMPLATE_P (decl)) { *error_msg = G_("missing template arguments"); return error_mark_node; } else if (TREE_CODE (decl) == TYPE_DECL || TREE_CODE (decl) == NAMESPACE_DECL) { *error_msg = G_("expected primary-expression"); return error_mark_node; } /* If the name resolved to a template parameter, there is no need to look it up again later. */ if ((TREE_CODE (decl) == CONST_DECL && DECL_TEMPLATE_PARM_P (decl)) || TREE_CODE (decl) == TEMPLATE_PARM_INDEX) { tree r; *idk = CP_ID_KIND_NONE; if (TREE_CODE (decl) == TEMPLATE_PARM_INDEX) decl = TEMPLATE_PARM_DECL (decl); r = DECL_INITIAL (decl); if (CLASS_TYPE_P (TREE_TYPE (r)) && !CP_TYPE_CONST_P (TREE_TYPE (r))) { /* If the entity is a template parameter object for a template parameter of type T, the type of the expression is const T. */ tree ctype = TREE_TYPE (r); ctype = cp_build_qualified_type (ctype, (cp_type_quals (ctype) | TYPE_QUAL_CONST)); r = build1 (VIEW_CONVERT_EXPR, ctype, r); } r = convert_from_reference (r); if (integral_constant_expression_p && !dependent_type_p (TREE_TYPE (decl)) && !(INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (r)))) { if (!allow_non_integral_constant_expression_p) error ("template parameter %qD of type %qT is not allowed in " "an integral constant expression because it is not of " "integral or enumeration type", decl, TREE_TYPE (decl)); *non_integral_constant_expression_p = true; } return r; } else { bool dependent_p = type_dependent_expression_p (decl); /* If the declaration was explicitly qualified indicate that. The semantics of `A::f(3)' are different than `f(3)' if `f' is virtual. */ *idk = (scope ? CP_ID_KIND_QUALIFIED : (TREE_CODE (decl) == TEMPLATE_ID_EXPR ? CP_ID_KIND_TEMPLATE_ID : (dependent_p ? CP_ID_KIND_UNQUALIFIED_DEPENDENT : CP_ID_KIND_UNQUALIFIED))); if (dependent_p && DECL_P (decl) && any_dependent_type_attributes_p (DECL_ATTRIBUTES (decl))) /* Dependent type attributes on the decl mean that the TREE_TYPE is wrong, so just return the identifier. */ return id_expression; if (DECL_CLASS_TEMPLATE_P (decl)) { error ("use of class template %qT as expression", decl); return error_mark_node; } if (TREE_CODE (decl) == TREE_LIST) { /* Ambiguous reference to base members. */ error ("request for member %qD is ambiguous in " "multiple inheritance lattice", id_expression); print_candidates (decl); return error_mark_node; } /* Mark variable-like entities as used. Functions are similarly marked either below or after overload resolution. */ if ((VAR_P (decl) || TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == CONST_DECL || TREE_CODE (decl) == RESULT_DECL) && !mark_used (decl)) return error_mark_node; /* Only certain kinds of names are allowed in constant expression. Template parameters have already been handled above. */ if (! error_operand_p (decl) && !dependent_p && integral_constant_expression_p && !decl_constant_var_p (decl) && TREE_CODE (decl) != CONST_DECL && !builtin_valid_in_constant_expr_p (decl) && !concept_check_p (decl)) { if (!allow_non_integral_constant_expression_p) { error ("%qD cannot appear in a constant-expression", decl); return error_mark_node; } *non_integral_constant_expression_p = true; } if (tree wrap = maybe_get_tls_wrapper_call (decl)) /* Replace an evaluated use of the thread_local variable with a call to its wrapper. */ decl = wrap; else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR && !dependent_p && variable_template_p (TREE_OPERAND (decl, 0)) && !concept_check_p (decl)) { decl = finish_template_variable (decl); mark_used (decl); decl = convert_from_reference (decl); } else if (concept_check_p (decl)) { /* Nothing more to do. All of the analysis for concept checks is done by build_conept_id, called from the parser. */ } else if (scope) { if (TREE_CODE (decl) == SCOPE_REF) { gcc_assert (same_type_p (scope, TREE_OPERAND (decl, 0))); decl = TREE_OPERAND (decl, 1); } decl = (adjust_result_of_qualified_name_lookup (decl, scope, current_nonlambda_class_type())); if (TREE_CODE (decl) == FUNCTION_DECL) mark_used (decl); cp_warn_deprecated_use_scopes (scope); if (TYPE_P (scope)) decl = finish_qualified_id_expr (scope, decl, done, address_p, template_p, template_arg_p, tf_warning_or_error); else decl = convert_from_reference (decl); } else if (TREE_CODE (decl) == FIELD_DECL) { /* Since SCOPE is NULL here, this is an unqualified name. Access checking has been performed during name lookup already. Turn off checking to avoid duplicate errors. */ push_deferring_access_checks (dk_no_check); decl = finish_non_static_data_member (decl, NULL_TREE, /*qualifying_scope=*/NULL_TREE); pop_deferring_access_checks (); } else if (is_overloaded_fn (decl)) { /* We only need to look at the first function, because all the fns share the attribute we're concerned with (all member fns or all non-members). */ tree first_fn = get_first_fn (decl); first_fn = STRIP_TEMPLATE (first_fn); /* [basic.def.odr]: "A function whose name appears as a potentially-evaluated expression is odr-used if it is the unique lookup result". But only mark it if it's a complete postfix-expression; in a call, ADL might select a different function, and we'll call mark_used in build_over_call. */ if (done && !really_overloaded_fn (decl) && !mark_used (first_fn)) return error_mark_node; if (!template_arg_p && (TREE_CODE (first_fn) == USING_DECL || (TREE_CODE (first_fn) == FUNCTION_DECL && DECL_FUNCTION_MEMBER_P (first_fn) && !shared_member_p (decl)))) { /* A set of member functions. */ decl = maybe_dummy_object (DECL_CONTEXT (first_fn), 0); return finish_class_member_access_expr (decl, id_expression, /*template_p=*/false, tf_warning_or_error); } decl = baselink_for_fns (decl); } else { if (DECL_P (decl) && DECL_NONLOCAL (decl) && DECL_CLASS_SCOPE_P (decl)) { tree context = context_for_name_lookup (decl); if (context != current_class_type) { tree path = currently_open_derived_class (context); perform_or_defer_access_check (TYPE_BINFO (path), decl, decl, tf_warning_or_error); } } decl = convert_from_reference (decl); } } return cp_expr (decl, location); } /* As per finish_id_expression_1, but adding a wrapper node around the result if needed to express LOCATION. */ cp_expr finish_id_expression (tree id_expression, tree decl, tree scope, cp_id_kind *idk, bool integral_constant_expression_p, bool allow_non_integral_constant_expression_p, bool *non_integral_constant_expression_p, bool template_p, bool done, bool address_p, bool template_arg_p, const char **error_msg, location_t location) { cp_expr result = finish_id_expression_1 (id_expression, decl, scope, idk, integral_constant_expression_p, allow_non_integral_constant_expression_p, non_integral_constant_expression_p, template_p, done, address_p, template_arg_p, error_msg, location); return result.maybe_add_location_wrapper (); } /* Implement the __typeof keyword: Return the type of EXPR, suitable for use as a type-specifier. */ tree finish_typeof (tree expr) { tree type; if (type_dependent_expression_p (expr)) { type = cxx_make_type (TYPEOF_TYPE); TYPEOF_TYPE_EXPR (type) = expr; SET_TYPE_STRUCTURAL_EQUALITY (type); return type; } expr = mark_type_use (expr); type = unlowered_expr_type (expr); if (!type || type == unknown_type_node) { error ("type of %qE is unknown", expr); return error_mark_node; } return type; } /* Implement the __underlying_type keyword: Return the underlying type of TYPE, suitable for use as a type-specifier. */ tree finish_underlying_type (tree type) { tree underlying_type; if (processing_template_decl) { underlying_type = cxx_make_type (UNDERLYING_TYPE); UNDERLYING_TYPE_TYPE (underlying_type) = type; SET_TYPE_STRUCTURAL_EQUALITY (underlying_type); return underlying_type; } if (!complete_type_or_else (type, NULL_TREE)) return error_mark_node; if (TREE_CODE (type) != ENUMERAL_TYPE) { error ("%qT is not an enumeration type", type); return error_mark_node; } underlying_type = ENUM_UNDERLYING_TYPE (type); /* Fixup necessary in this case because ENUM_UNDERLYING_TYPE includes TYPE_MIN_VALUE and TYPE_MAX_VALUE information. See finish_enum_value_list for details. */ if (!ENUM_FIXED_UNDERLYING_TYPE_P (type)) underlying_type = c_common_type_for_mode (TYPE_MODE (underlying_type), TYPE_UNSIGNED (underlying_type)); return underlying_type; } /* Implement the __direct_bases keyword: Return the direct base classes of type. */ tree calculate_direct_bases (tree type, tsubst_flags_t complain) { if (!complete_type_or_maybe_complain (type, NULL_TREE, complain) || !NON_UNION_CLASS_TYPE_P (type)) return make_tree_vec (0); releasing_vec vector; vec<tree, va_gc> *base_binfos = BINFO_BASE_BINFOS (TYPE_BINFO (type)); tree binfo; unsigned i; /* Virtual bases are initialized first */ for (i = 0; base_binfos->iterate (i, &binfo); i++) if (BINFO_VIRTUAL_P (binfo)) vec_safe_push (vector, binfo); /* Now non-virtuals */ for (i = 0; base_binfos->iterate (i, &binfo); i++) if (!BINFO_VIRTUAL_P (binfo)) vec_safe_push (vector, binfo); tree bases_vec = make_tree_vec (vector->length ()); for (i = 0; i < vector->length (); ++i) TREE_VEC_ELT (bases_vec, i) = BINFO_TYPE ((*vector)[i]); return bases_vec; } /* Implement the __bases keyword: Return the base classes of type */ /* Find morally non-virtual base classes by walking binfo hierarchy */ /* Virtual base classes are handled separately in finish_bases */ static tree dfs_calculate_bases_pre (tree binfo, void * /*data_*/) { /* Don't walk bases of virtual bases */ return BINFO_VIRTUAL_P (binfo) ? dfs_skip_bases : NULL_TREE; } static tree dfs_calculate_bases_post (tree binfo, void *data_) { vec<tree, va_gc> **data = ((vec<tree, va_gc> **) data_); if (!BINFO_VIRTUAL_P (binfo)) vec_safe_push (*data, BINFO_TYPE (binfo)); return NULL_TREE; } /* Calculates the morally non-virtual base classes of a class */ static vec<tree, va_gc> * calculate_bases_helper (tree type) { vec<tree, va_gc> *vector = make_tree_vector (); /* Now add non-virtual base classes in order of construction */ if (TYPE_BINFO (type)) dfs_walk_all (TYPE_BINFO (type), dfs_calculate_bases_pre, dfs_calculate_bases_post, &vector); return vector; } tree calculate_bases (tree type, tsubst_flags_t complain) { if (!complete_type_or_maybe_complain (type, NULL_TREE, complain) || !NON_UNION_CLASS_TYPE_P (type)) return make_tree_vec (0); releasing_vec vector; tree bases_vec = NULL_TREE; unsigned i; vec<tree, va_gc> *vbases; tree binfo; /* First go through virtual base classes */ for (vbases = CLASSTYPE_VBASECLASSES (type), i = 0; vec_safe_iterate (vbases, i, &binfo); i++) { releasing_vec vbase_bases = calculate_bases_helper (BINFO_TYPE (binfo)); vec_safe_splice (vector, vbase_bases); } /* Now for the non-virtual bases */ releasing_vec nonvbases = calculate_bases_helper (type); vec_safe_splice (vector, nonvbases); /* Note that during error recovery vector->length can even be zero. */ if (vector->length () > 1) { /* Last element is entire class, so don't copy */ bases_vec = make_tree_vec (vector->length () - 1); for (i = 0; i < vector->length () - 1; ++i) TREE_VEC_ELT (bases_vec, i) = (*vector)[i]; } else bases_vec = make_tree_vec (0); return bases_vec; } tree finish_bases (tree type, bool direct) { tree bases = NULL_TREE; if (!processing_template_decl) { /* Parameter packs can only be used in templates */ error ("parameter pack %<__bases%> only valid in template declaration"); return error_mark_node; } bases = cxx_make_type (BASES); BASES_TYPE (bases) = type; BASES_DIRECT (bases) = direct; SET_TYPE_STRUCTURAL_EQUALITY (bases); return bases; } /* Perform C++-specific checks for __builtin_offsetof before calling fold_offsetof. */ tree finish_offsetof (tree object_ptr, tree expr, location_t loc) { /* If we're processing a template, we can't finish the semantics yet. Otherwise we can fold the entire expression now. */ if (processing_template_decl) { expr = build2 (OFFSETOF_EXPR, size_type_node, expr, object_ptr); SET_EXPR_LOCATION (expr, loc); return expr; } if (expr == error_mark_node) return error_mark_node; if (TREE_CODE (expr) == PSEUDO_DTOR_EXPR) { error ("cannot apply %<offsetof%> to destructor %<~%T%>", TREE_OPERAND (expr, 2)); return error_mark_node; } if (FUNC_OR_METHOD_TYPE_P (TREE_TYPE (expr)) || TREE_TYPE (expr) == unknown_type_node) { while (TREE_CODE (expr) == COMPONENT_REF || TREE_CODE (expr) == COMPOUND_EXPR) expr = TREE_OPERAND (expr, 1); if (DECL_P (expr)) { error ("cannot apply %<offsetof%> to member function %qD", expr); inform (DECL_SOURCE_LOCATION (expr), "declared here"); } else error ("cannot apply %<offsetof%> to member function"); return error_mark_node; } if (TREE_CODE (expr) == CONST_DECL) { error ("cannot apply %<offsetof%> to an enumerator %qD", expr); return error_mark_node; } if (REFERENCE_REF_P (expr)) expr = TREE_OPERAND (expr, 0); if (!complete_type_or_else (TREE_TYPE (TREE_TYPE (object_ptr)), object_ptr)) return error_mark_node; if (warn_invalid_offsetof && CLASS_TYPE_P (TREE_TYPE (TREE_TYPE (object_ptr))) && CLASSTYPE_NON_STD_LAYOUT (TREE_TYPE (TREE_TYPE (object_ptr))) && cp_unevaluated_operand == 0) warning_at (loc, OPT_Winvalid_offsetof, "%<offsetof%> within " "non-standard-layout type %qT is conditionally-supported", TREE_TYPE (TREE_TYPE (object_ptr))); return fold_offsetof (expr); } /* Replace the AGGR_INIT_EXPR at *TP with an equivalent CALL_EXPR. This function is broken out from the above for the benefit of the tree-ssa project. */ void simplify_aggr_init_expr (tree *tp) { tree aggr_init_expr = *tp; /* Form an appropriate CALL_EXPR. */ tree fn = AGGR_INIT_EXPR_FN (aggr_init_expr); tree slot = AGGR_INIT_EXPR_SLOT (aggr_init_expr); tree type = TREE_TYPE (slot); tree call_expr; enum style_t { ctor, arg, pcc } style; if (AGGR_INIT_VIA_CTOR_P (aggr_init_expr)) style = ctor; #ifdef PCC_STATIC_STRUCT_RETURN else if (1) style = pcc; #endif else { gcc_assert (TREE_ADDRESSABLE (type)); style = arg; } call_expr = build_call_array_loc (input_location, TREE_TYPE (TREE_TYPE (TREE_TYPE (fn))), fn, aggr_init_expr_nargs (aggr_init_expr), AGGR_INIT_EXPR_ARGP (aggr_init_expr)); TREE_NOTHROW (call_expr) = TREE_NOTHROW (aggr_init_expr); CALL_FROM_THUNK_P (call_expr) = AGGR_INIT_FROM_THUNK_P (aggr_init_expr); CALL_EXPR_OPERATOR_SYNTAX (call_expr) = CALL_EXPR_OPERATOR_SYNTAX (aggr_init_expr); CALL_EXPR_ORDERED_ARGS (call_expr) = CALL_EXPR_ORDERED_ARGS (aggr_init_expr); CALL_EXPR_REVERSE_ARGS (call_expr) = CALL_EXPR_REVERSE_ARGS (aggr_init_expr); if (style == ctor) { /* Replace the first argument to the ctor with the address of the slot. */ cxx_mark_addressable (slot); CALL_EXPR_ARG (call_expr, 0) = build1 (ADDR_EXPR, build_pointer_type (type), slot); } else if (style == arg) { /* Just mark it addressable here, and leave the rest to expand_call{,_inline}. */ cxx_mark_addressable (slot); CALL_EXPR_RETURN_SLOT_OPT (call_expr) = true; call_expr = build2 (INIT_EXPR, TREE_TYPE (call_expr), slot, call_expr); } else if (style == pcc) { /* If we're using the non-reentrant PCC calling convention, then we need to copy the returned value out of the static buffer into the SLOT. */ push_deferring_access_checks (dk_no_check); call_expr = build_aggr_init (slot, call_expr, DIRECT_BIND | LOOKUP_ONLYCONVERTING, tf_warning_or_error); pop_deferring_access_checks (); call_expr = build2 (COMPOUND_EXPR, TREE_TYPE (slot), call_expr, slot); } if (AGGR_INIT_ZERO_FIRST (aggr_init_expr)) { tree init = build_zero_init (type, NULL_TREE, /*static_storage_p=*/false); init = build2 (INIT_EXPR, void_type_node, slot, init); call_expr = build2 (COMPOUND_EXPR, TREE_TYPE (call_expr), init, call_expr); } *tp = call_expr; } /* Emit all thunks to FN that should be emitted when FN is emitted. */ void emit_associated_thunks (tree fn) { /* When we use vcall offsets, we emit thunks with the virtual functions to which they thunk. The whole point of vcall offsets is so that you can know statically the entire set of thunks that will ever be needed for a given virtual function, thereby enabling you to output all the thunks with the function itself. */ if (DECL_VIRTUAL_P (fn) /* Do not emit thunks for extern template instantiations. */ && ! DECL_REALLY_EXTERN (fn)) { tree thunk; for (thunk = DECL_THUNKS (fn); thunk; thunk = DECL_CHAIN (thunk)) { if (!THUNK_ALIAS (thunk)) { use_thunk (thunk, /*emit_p=*/1); if (DECL_RESULT_THUNK_P (thunk)) { tree probe; for (probe = DECL_THUNKS (thunk); probe; probe = DECL_CHAIN (probe)) use_thunk (probe, /*emit_p=*/1); } } else gcc_assert (!DECL_THUNKS (thunk)); } } } /* Generate RTL for FN. */ bool expand_or_defer_fn_1 (tree fn) { /* When the parser calls us after finishing the body of a template function, we don't really want to expand the body. */ if (processing_template_decl) { /* Normally, collection only occurs in rest_of_compilation. So, if we don't collect here, we never collect junk generated during the processing of templates until we hit a non-template function. It's not safe to do this inside a nested class, though, as the parser may have local state that is not a GC root. */ if (!function_depth) ggc_collect (); return false; } gcc_assert (DECL_SAVED_TREE (fn)); /* We make a decision about linkage for these functions at the end of the compilation. Until that point, we do not want the back end to output them -- but we do want it to see the bodies of these functions so that it can inline them as appropriate. */ if (DECL_DECLARED_INLINE_P (fn) || DECL_IMPLICIT_INSTANTIATION (fn)) { if (DECL_INTERFACE_KNOWN (fn)) /* We've already made a decision as to how this function will be handled. */; else if (!at_eof || DECL_IMMEDIATE_FUNCTION_P (fn) || DECL_OMP_DECLARE_REDUCTION_P (fn)) tentative_decl_linkage (fn); else import_export_decl (fn); /* If the user wants us to keep all inline functions, then mark this function as needed so that finish_file will make sure to output it later. Similarly, all dllexport'd functions must be emitted; there may be callers in other DLLs. */ if (DECL_DECLARED_INLINE_P (fn) && !DECL_REALLY_EXTERN (fn) && !DECL_IMMEDIATE_FUNCTION_P (fn) && !DECL_OMP_DECLARE_REDUCTION_P (fn) && (flag_keep_inline_functions || (flag_keep_inline_dllexport && lookup_attribute ("dllexport", DECL_ATTRIBUTES (fn))))) { mark_needed (fn); DECL_EXTERNAL (fn) = 0; } } /* If this is a constructor or destructor body, we have to clone it. */ if (maybe_clone_body (fn)) { /* We don't want to process FN again, so pretend we've written it out, even though we haven't. */ TREE_ASM_WRITTEN (fn) = 1; /* If this is a constexpr function, keep DECL_SAVED_TREE. */ if (!DECL_DECLARED_CONSTEXPR_P (fn)) DECL_SAVED_TREE (fn) = NULL_TREE; return false; } /* There's no reason to do any of the work here if we're only doing semantic analysis; this code just generates RTL. */ if (flag_syntax_only) { /* Pretend that this function has been written out so that we don't try to expand it again. */ TREE_ASM_WRITTEN (fn) = 1; return false; } if (DECL_OMP_DECLARE_REDUCTION_P (fn)) return false; return true; } void expand_or_defer_fn (tree fn) { if (expand_or_defer_fn_1 (fn)) { function_depth++; /* Expand or defer, at the whim of the compilation unit manager. */ cgraph_node::finalize_function (fn, function_depth > 1); emit_associated_thunks (fn); function_depth--; } } class nrv_data { public: nrv_data () : visited (37) {} tree var; tree result; hash_table<nofree_ptr_hash <tree_node> > visited; }; /* Helper function for walk_tree, used by finalize_nrv below. */ static tree finalize_nrv_r (tree* tp, int* walk_subtrees, void* data) { class nrv_data *dp = (class nrv_data *)data; tree_node **slot; /* No need to walk into types. There wouldn't be any need to walk into non-statements, except that we have to consider STMT_EXPRs. */ if (TYPE_P (*tp)) *walk_subtrees = 0; /* Change all returns to just refer to the RESULT_DECL; this is a nop, but differs from using NULL_TREE in that it indicates that we care about the value of the RESULT_DECL. */ else if (TREE_CODE (*tp) == RETURN_EXPR) TREE_OPERAND (*tp, 0) = dp->result; /* Change all cleanups for the NRV to only run when an exception is thrown. */ else if (TREE_CODE (*tp) == CLEANUP_STMT && CLEANUP_DECL (*tp) == dp->var) CLEANUP_EH_ONLY (*tp) = 1; /* Replace the DECL_EXPR for the NRV with an initialization of the RESULT_DECL, if needed. */ else if (TREE_CODE (*tp) == DECL_EXPR && DECL_EXPR_DECL (*tp) == dp->var) { tree init; if (DECL_INITIAL (dp->var) && DECL_INITIAL (dp->var) != error_mark_node) init = build2 (INIT_EXPR, void_type_node, dp->result, DECL_INITIAL (dp->var)); else init = build_empty_stmt (EXPR_LOCATION (*tp)); DECL_INITIAL (dp->var) = NULL_TREE; SET_EXPR_LOCATION (init, EXPR_LOCATION (*tp)); *tp = init; } /* And replace all uses of the NRV with the RESULT_DECL. */ else if (*tp == dp->var) *tp = dp->result; /* Avoid walking into the same tree more than once. Unfortunately, we can't just use walk_tree_without duplicates because it would only call us for the first occurrence of dp->var in the function body. */ slot = dp->visited.find_slot (*tp, INSERT); if (*slot) *walk_subtrees = 0; else *slot = *tp; /* Keep iterating. */ return NULL_TREE; } /* Called from finish_function to implement the named return value optimization by overriding all the RETURN_EXPRs and pertinent CLEANUP_STMTs and replacing all occurrences of VAR with RESULT, the RESULT_DECL for the function. */ void finalize_nrv (tree *tp, tree var, tree result) { class nrv_data data; /* Copy name from VAR to RESULT. */ DECL_NAME (result) = DECL_NAME (var); /* Don't forget that we take its address. */ TREE_ADDRESSABLE (result) = TREE_ADDRESSABLE (var); /* Finally set DECL_VALUE_EXPR to avoid assigning a stack slot at -O0 for the original var and debug info uses RESULT location for VAR. */ SET_DECL_VALUE_EXPR (var, result); DECL_HAS_VALUE_EXPR_P (var) = 1; data.var = var; data.result = result; cp_walk_tree (tp, finalize_nrv_r, &data, 0); } /* Create CP_OMP_CLAUSE_INFO for clause C. Returns true if it is invalid. */ bool cxx_omp_create_clause_info (tree c, tree type, bool need_default_ctor, bool need_copy_ctor, bool need_copy_assignment, bool need_dtor) { int save_errorcount = errorcount; tree info, t; /* Always allocate 3 elements for simplicity. These are the function decls for the ctor, dtor, and assignment op. This layout is known to the three lang hooks, cxx_omp_clause_default_init, cxx_omp_clause_copy_init, and cxx_omp_clause_assign_op. */ info = make_tree_vec (3); CP_OMP_CLAUSE_INFO (c) = info; if (need_default_ctor || need_copy_ctor) { if (need_default_ctor) t = get_default_ctor (type); else t = get_copy_ctor (type, tf_warning_or_error); if (t && !trivial_fn_p (t)) TREE_VEC_ELT (info, 0) = t; } if (need_dtor && TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type)) TREE_VEC_ELT (info, 1) = get_dtor (type, tf_warning_or_error); if (need_copy_assignment) { t = get_copy_assign (type); if (t && !trivial_fn_p (t)) TREE_VEC_ELT (info, 2) = t; } return errorcount != save_errorcount; } /* If DECL is DECL_OMP_PRIVATIZED_MEMBER, return corresponding FIELD_DECL, otherwise return DECL itself. */ static tree omp_clause_decl_field (tree decl) { if (VAR_P (decl) && DECL_HAS_VALUE_EXPR_P (decl) && DECL_ARTIFICIAL (decl) && DECL_LANG_SPECIFIC (decl) && DECL_OMP_PRIVATIZED_MEMBER (decl)) { tree f = DECL_VALUE_EXPR (decl); if (INDIRECT_REF_P (f)) f = TREE_OPERAND (f, 0); if (TREE_CODE (f) == COMPONENT_REF) { f = TREE_OPERAND (f, 1); gcc_assert (TREE_CODE (f) == FIELD_DECL); return f; } } return NULL_TREE; } /* Adjust DECL if needed for printing using %qE. */ static tree omp_clause_printable_decl (tree decl) { tree t = omp_clause_decl_field (decl); if (t) return t; return decl; } /* For a FIELD_DECL F and corresponding DECL_OMP_PRIVATIZED_MEMBER VAR_DECL T that doesn't need a DECL_EXPR added, record it for privatization. */ static void omp_note_field_privatization (tree f, tree t) { if (!omp_private_member_map) omp_private_member_map = new hash_map<tree, tree>; tree &v = omp_private_member_map->get_or_insert (f); if (v == NULL_TREE) { v = t; omp_private_member_vec.safe_push (f); /* Signal that we don't want to create DECL_EXPR for this dummy var. */ omp_private_member_vec.safe_push (integer_zero_node); } } /* Privatize FIELD_DECL T, return corresponding DECL_OMP_PRIVATIZED_MEMBER dummy VAR_DECL. */ tree omp_privatize_field (tree t, bool shared) { tree m = finish_non_static_data_member (t, NULL_TREE, NULL_TREE); if (m == error_mark_node) return error_mark_node; if (!omp_private_member_map && !shared) omp_private_member_map = new hash_map<tree, tree>; if (TYPE_REF_P (TREE_TYPE (t))) { gcc_assert (INDIRECT_REF_P (m)); m = TREE_OPERAND (m, 0); } tree vb = NULL_TREE; tree &v = shared ? vb : omp_private_member_map->get_or_insert (t); if (v == NULL_TREE) { v = create_temporary_var (TREE_TYPE (m)); retrofit_lang_decl (v); DECL_OMP_PRIVATIZED_MEMBER (v) = 1; SET_DECL_VALUE_EXPR (v, m); DECL_HAS_VALUE_EXPR_P (v) = 1; if (!shared) omp_private_member_vec.safe_push (t); } return v; } /* Helper function for handle_omp_array_sections. Called recursively to handle multiple array-section-subscripts. C is the clause, T current expression (initially OMP_CLAUSE_DECL), which is either a TREE_LIST for array-section-subscript (TREE_PURPOSE is low-bound expression if specified, TREE_VALUE length expression if specified, TREE_CHAIN is what it has been specified after, or some decl. TYPES vector is populated with array section types, MAYBE_ZERO_LEN set to true if any of the array-section-subscript could have length of zero (explicit or implicit), FIRST_NON_ONE is the index of the first array-section-subscript which is known not to have length of one. Given say: map(a[:b][2:1][:c][:2][:d][e:f][2:5]) FIRST_NON_ONE will be 3, array-section-subscript [:b], [2:1] and [:c] all are or may have length of 1, array-section-subscript [:2] is the first one known not to have length 1. For array-section-subscript <= FIRST_NON_ONE we diagnose non-contiguous arrays if low bound isn't 0 or length isn't the array domain max + 1, for > FIRST_NON_ONE we can if MAYBE_ZERO_LEN is false. MAYBE_ZERO_LEN will be true in the above case though, as some lengths could be zero. */ static tree handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types, bool &maybe_zero_len, unsigned int &first_non_one, enum c_omp_region_type ort) { tree ret, low_bound, length, type; if (TREE_CODE (t) != TREE_LIST) { if (error_operand_p (t)) return error_mark_node; if (REFERENCE_REF_P (t) && TREE_CODE (TREE_OPERAND (t, 0)) == COMPONENT_REF) t = TREE_OPERAND (t, 0); ret = t; if (TREE_CODE (t) == COMPONENT_REF && (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FROM) && !type_dependent_expression_p (t)) { if (TREE_CODE (TREE_OPERAND (t, 1)) == FIELD_DECL && DECL_BIT_FIELD (TREE_OPERAND (t, 1))) { error_at (OMP_CLAUSE_LOCATION (c), "bit-field %qE in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } while (TREE_CODE (t) == COMPONENT_REF) { if (TREE_TYPE (TREE_OPERAND (t, 0)) && TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == UNION_TYPE) { error_at (OMP_CLAUSE_LOCATION (c), "%qE is a member of a union", t); return error_mark_node; } t = TREE_OPERAND (t, 0); if (ort == C_ORT_ACC && TREE_CODE (t) == INDIRECT_REF) t = TREE_OPERAND (t, 0); } if (REFERENCE_REF_P (t)) t = TREE_OPERAND (t, 0); } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) return NULL_TREE; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } else if (ort == C_ORT_OMP && TREE_CODE (t) == PARM_DECL && DECL_ARTIFICIAL (t) && DECL_NAME (t) == this_identifier) { error_at (OMP_CLAUSE_LOCATION (c), "%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); return error_mark_node; } else if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && VAR_P (t) && CP_DECL_THREAD_LOCAL_P (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (type_dependent_expression_p (ret)) return NULL_TREE; ret = convert_from_reference (ret); return ret; } if (ort == C_ORT_OMP && (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION) && TREE_CODE (TREE_CHAIN (t)) == FIELD_DECL) TREE_CHAIN (t) = omp_privatize_field (TREE_CHAIN (t), false); ret = handle_omp_array_sections_1 (c, TREE_CHAIN (t), types, maybe_zero_len, first_non_one, ort); if (ret == error_mark_node || ret == NULL_TREE) return ret; type = TREE_TYPE (ret); low_bound = TREE_PURPOSE (t); length = TREE_VALUE (t); if ((low_bound && type_dependent_expression_p (low_bound)) || (length && type_dependent_expression_p (length))) return NULL_TREE; if (low_bound == error_mark_node || length == error_mark_node) return error_mark_node; if (low_bound && !INTEGRAL_TYPE_P (TREE_TYPE (low_bound))) { error_at (OMP_CLAUSE_LOCATION (c), "low bound %qE of array section does not have integral type", low_bound); return error_mark_node; } if (length && !INTEGRAL_TYPE_P (TREE_TYPE (length))) { error_at (OMP_CLAUSE_LOCATION (c), "length %qE of array section does not have integral type", length); return error_mark_node; } if (low_bound) low_bound = mark_rvalue_use (low_bound); if (length) length = mark_rvalue_use (length); /* We need to reduce to real constant-values for checks below. */ if (length) length = fold_simple (length); if (low_bound) low_bound = fold_simple (low_bound); if (low_bound && TREE_CODE (low_bound) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (low_bound)) > TYPE_PRECISION (sizetype)) low_bound = fold_convert (sizetype, low_bound); if (length && TREE_CODE (length) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (length)) > TYPE_PRECISION (sizetype)) length = fold_convert (sizetype, length); if (low_bound == NULL_TREE) low_bound = integer_zero_node; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH)) { if (length != integer_one_node) { error_at (OMP_CLAUSE_LOCATION (c), "expected single pointer in %qs clause", c_omp_map_clause_name (c, ort == C_ORT_ACC)); return error_mark_node; } } if (length != NULL_TREE) { if (!integer_nonzerop (length)) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION) { if (integer_zerop (length)) { error_at (OMP_CLAUSE_LOCATION (c), "zero length array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } else maybe_zero_len = true; } if (first_non_one == types.length () && (TREE_CODE (length) != INTEGER_CST || integer_onep (length))) first_non_one++; } if (TREE_CODE (type) == ARRAY_TYPE) { if (length == NULL_TREE && (TYPE_DOMAIN (type) == NULL_TREE || TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE)) { error_at (OMP_CLAUSE_LOCATION (c), "for unknown bound array type length expression must " "be specified"); return error_mark_node; } if (TREE_CODE (low_bound) == INTEGER_CST && tree_int_cst_sgn (low_bound) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative low bound in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && tree_int_cst_sgn (length) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative length in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (TYPE_DOMAIN (type) && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) == INTEGER_CST) { tree size = fold_convert (sizetype, TYPE_MAX_VALUE (TYPE_DOMAIN (type))); size = size_binop (PLUS_EXPR, size, size_one_node); if (TREE_CODE (low_bound) == INTEGER_CST) { if (tree_int_cst_lt (size, low_bound)) { error_at (OMP_CLAUSE_LOCATION (c), "low bound %qE above array section size " "in %qs clause", low_bound, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (tree_int_cst_equal (size, low_bound)) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION) { error_at (OMP_CLAUSE_LOCATION (c), "zero length array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } maybe_zero_len = true; } else if (length == NULL_TREE && first_non_one == types.length () && tree_int_cst_equal (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), low_bound)) first_non_one++; } else if (length == NULL_TREE) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_IN_REDUCTION && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_TASK_REDUCTION) maybe_zero_len = true; if (first_non_one == types.length ()) first_non_one++; } if (length && TREE_CODE (length) == INTEGER_CST) { if (tree_int_cst_lt (size, length)) { error_at (OMP_CLAUSE_LOCATION (c), "length %qE above array section size " "in %qs clause", length, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (TREE_CODE (low_bound) == INTEGER_CST) { tree lbpluslen = size_binop (PLUS_EXPR, fold_convert (sizetype, low_bound), fold_convert (sizetype, length)); if (TREE_CODE (lbpluslen) == INTEGER_CST && tree_int_cst_lt (size, lbpluslen)) { error_at (OMP_CLAUSE_LOCATION (c), "high bound %qE above array section size " "in %qs clause", lbpluslen, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } } } else if (length == NULL_TREE) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_REDUCTION && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_IN_REDUCTION && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_TASK_REDUCTION) maybe_zero_len = true; if (first_non_one == types.length ()) first_non_one++; } /* For [lb:] we will need to evaluate lb more than once. */ if (length == NULL_TREE && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND) { tree lb = cp_save_expr (low_bound); if (lb != low_bound) { TREE_PURPOSE (t) = lb; low_bound = lb; } } } else if (TYPE_PTR_P (type)) { if (length == NULL_TREE) { error_at (OMP_CLAUSE_LOCATION (c), "for pointer type length expression must be specified"); return error_mark_node; } if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && tree_int_cst_sgn (length) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative length in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } /* If there is a pointer type anywhere but in the very first array-section-subscript, the array section can't be contiguous. */ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && TREE_CODE (TREE_CHAIN (t)) == TREE_LIST) { error_at (OMP_CLAUSE_LOCATION (c), "array section is not contiguous in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } else { error_at (OMP_CLAUSE_LOCATION (c), "%qE does not have pointer or array type", ret); return error_mark_node; } if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND) types.safe_push (TREE_TYPE (ret)); /* We will need to evaluate lb more than once. */ tree lb = cp_save_expr (low_bound); if (lb != low_bound) { TREE_PURPOSE (t) = lb; low_bound = lb; } /* Temporarily disable -fstrong-eval-order for array reductions. The SAVE_EXPR and COMPOUND_EXPR added if low_bound has side-effects is something the middle-end can't cope with and more importantly, it needs to be the actual base variable that is privatized, not some temporary assigned previous value of it. That, together with OpenMP saying how many times the side-effects are evaluated is unspecified, makes int *a, *b; ... reduction(+:a[a = b, 3:10]) really unspecified. */ warning_sentinel s (flag_strong_eval_order, OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION); ret = grok_array_decl (OMP_CLAUSE_LOCATION (c), ret, low_bound, false); return ret; } /* Handle array sections for clause C. */ static bool handle_omp_array_sections (tree c, enum c_omp_region_type ort) { bool maybe_zero_len = false; unsigned int first_non_one = 0; auto_vec<tree, 10> types; tree *tp = &OMP_CLAUSE_DECL (c); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND && TREE_CODE (*tp) == TREE_LIST && TREE_PURPOSE (*tp) && TREE_CODE (TREE_PURPOSE (*tp)) == TREE_VEC) tp = &TREE_VALUE (*tp); tree first = handle_omp_array_sections_1 (c, *tp, types, maybe_zero_len, first_non_one, ort); if (first == error_mark_node) return true; if (first == NULL_TREE) return false; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND) { tree t = *tp; tree tem = NULL_TREE; if (processing_template_decl) return false; /* Need to evaluate side effects in the length expressions if any. */ while (TREE_CODE (t) == TREE_LIST) { if (TREE_VALUE (t) && TREE_SIDE_EFFECTS (TREE_VALUE (t))) { if (tem == NULL_TREE) tem = TREE_VALUE (t); else tem = build2 (COMPOUND_EXPR, TREE_TYPE (tem), TREE_VALUE (t), tem); } t = TREE_CHAIN (t); } if (tem) first = build2 (COMPOUND_EXPR, TREE_TYPE (first), tem, first); *tp = first; } else { unsigned int num = types.length (), i; tree t, side_effects = NULL_TREE, size = NULL_TREE; tree condition = NULL_TREE; if (int_size_in_bytes (TREE_TYPE (first)) <= 0) maybe_zero_len = true; if (processing_template_decl && maybe_zero_len) return false; for (i = num, t = OMP_CLAUSE_DECL (c); i > 0; t = TREE_CHAIN (t)) { tree low_bound = TREE_PURPOSE (t); tree length = TREE_VALUE (t); i--; if (low_bound && TREE_CODE (low_bound) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (low_bound)) > TYPE_PRECISION (sizetype)) low_bound = fold_convert (sizetype, low_bound); if (length && TREE_CODE (length) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (length)) > TYPE_PRECISION (sizetype)) length = fold_convert (sizetype, length); if (low_bound == NULL_TREE) low_bound = integer_zero_node; if (!maybe_zero_len && i > first_non_one) { if (integer_nonzerop (low_bound)) goto do_warn_noncontiguous; if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && TYPE_DOMAIN (types[i]) && TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])) && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (types[i]))) == INTEGER_CST) { tree size; size = size_binop (PLUS_EXPR, TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])), size_one_node); if (!tree_int_cst_equal (length, size)) { do_warn_noncontiguous: error_at (OMP_CLAUSE_LOCATION (c), "array section is not contiguous in %qs " "clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return true; } } if (!processing_template_decl && length != NULL_TREE && TREE_SIDE_EFFECTS (length)) { if (side_effects == NULL_TREE) side_effects = length; else side_effects = build2 (COMPOUND_EXPR, TREE_TYPE (side_effects), length, side_effects); } } else if (processing_template_decl) continue; else { tree l; if (i > first_non_one && ((length && integer_nonzerop (length)) || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION)) continue; if (length) l = fold_convert (sizetype, length); else { l = size_binop (PLUS_EXPR, TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])), size_one_node); l = size_binop (MINUS_EXPR, l, fold_convert (sizetype, low_bound)); } if (i > first_non_one) { l = fold_build2 (NE_EXPR, boolean_type_node, l, size_zero_node); if (condition == NULL_TREE) condition = l; else condition = fold_build2 (BIT_AND_EXPR, boolean_type_node, l, condition); } else if (size == NULL_TREE) { size = size_in_bytes (TREE_TYPE (types[i])); tree eltype = TREE_TYPE (types[num - 1]); while (TREE_CODE (eltype) == ARRAY_TYPE) eltype = TREE_TYPE (eltype); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION) size = size_binop (EXACT_DIV_EXPR, size, size_in_bytes (eltype)); size = size_binop (MULT_EXPR, size, l); if (condition) size = fold_build3 (COND_EXPR, sizetype, condition, size, size_zero_node); } else size = size_binop (MULT_EXPR, size, l); } } if (!processing_template_decl) { if (side_effects) size = build2 (COMPOUND_EXPR, sizetype, side_effects, size); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION) { size = size_binop (MINUS_EXPR, size, size_one_node); size = save_expr (size); tree index_type = build_index_type (size); tree eltype = TREE_TYPE (first); while (TREE_CODE (eltype) == ARRAY_TYPE) eltype = TREE_TYPE (eltype); tree type = build_array_type (eltype, index_type); tree ptype = build_pointer_type (eltype); if (TYPE_REF_P (TREE_TYPE (t)) && INDIRECT_TYPE_P (TREE_TYPE (TREE_TYPE (t)))) t = convert_from_reference (t); else if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) t = build_fold_addr_expr (t); tree t2 = build_fold_addr_expr (first); t2 = fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, t2); t2 = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, ptrdiff_type_node, t2, fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, t)); if (tree_fits_shwi_p (t2)) t = build2 (MEM_REF, type, t, build_int_cst (ptype, tree_to_shwi (t2))); else { t2 = fold_convert_loc (OMP_CLAUSE_LOCATION (c), sizetype, t2); t = build2_loc (OMP_CLAUSE_LOCATION (c), POINTER_PLUS_EXPR, TREE_TYPE (t), t, t2); t = build2 (MEM_REF, type, t, build_int_cst (ptype, 0)); } OMP_CLAUSE_DECL (c) = t; return false; } OMP_CLAUSE_DECL (c) = first; OMP_CLAUSE_SIZE (c) = size; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP || (TREE_CODE (t) == COMPONENT_REF && TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE)) return false; if (ort == C_ORT_OMP || ort == C_ORT_ACC) switch (OMP_CLAUSE_MAP_KIND (c)) { case GOMP_MAP_ALLOC: case GOMP_MAP_IF_PRESENT: case GOMP_MAP_TO: case GOMP_MAP_FROM: case GOMP_MAP_TOFROM: case GOMP_MAP_ALWAYS_TO: case GOMP_MAP_ALWAYS_FROM: case GOMP_MAP_ALWAYS_TOFROM: case GOMP_MAP_RELEASE: case GOMP_MAP_DELETE: case GOMP_MAP_FORCE_TO: case GOMP_MAP_FORCE_FROM: case GOMP_MAP_FORCE_TOFROM: case GOMP_MAP_FORCE_PRESENT: OMP_CLAUSE_MAP_MAYBE_ZERO_LENGTH_ARRAY_SECTION (c) = 1; break; default: break; } tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); if ((ort & C_ORT_OMP_DECLARE_SIMD) != C_ORT_OMP && ort != C_ORT_ACC) OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_POINTER); else if (TREE_CODE (t) == COMPONENT_REF) { gomp_map_kind k = (ort == C_ORT_ACC) ? GOMP_MAP_ATTACH_DETACH : GOMP_MAP_ALWAYS_POINTER; OMP_CLAUSE_SET_MAP_KIND (c2, k); } else if (REFERENCE_REF_P (t) && TREE_CODE (TREE_OPERAND (t, 0)) == COMPONENT_REF) { t = TREE_OPERAND (t, 0); gomp_map_kind k = (ort == C_ORT_ACC) ? GOMP_MAP_ATTACH_DETACH : GOMP_MAP_ALWAYS_POINTER; OMP_CLAUSE_SET_MAP_KIND (c2, k); } else OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_FIRSTPRIVATE_POINTER); if (OMP_CLAUSE_MAP_KIND (c2) != GOMP_MAP_FIRSTPRIVATE_POINTER && !cxx_mark_addressable (t)) return false; OMP_CLAUSE_DECL (c2) = t; t = build_fold_addr_expr (first); t = fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, t); tree ptr = OMP_CLAUSE_DECL (c2); ptr = convert_from_reference (ptr); if (!INDIRECT_TYPE_P (TREE_TYPE (ptr))) ptr = build_fold_addr_expr (ptr); t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, ptrdiff_type_node, t, fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, ptr)); OMP_CLAUSE_SIZE (c2) = t; OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c); OMP_CLAUSE_CHAIN (c) = c2; ptr = OMP_CLAUSE_DECL (c2); if (OMP_CLAUSE_MAP_KIND (c2) != GOMP_MAP_FIRSTPRIVATE_POINTER && TYPE_REF_P (TREE_TYPE (ptr)) && INDIRECT_TYPE_P (TREE_TYPE (TREE_TYPE (ptr)))) { tree c3 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (c3, OMP_CLAUSE_MAP_KIND (c2)); OMP_CLAUSE_DECL (c3) = ptr; if (OMP_CLAUSE_MAP_KIND (c2) == GOMP_MAP_ALWAYS_POINTER) OMP_CLAUSE_DECL (c2) = build_simple_mem_ref (ptr); else OMP_CLAUSE_DECL (c2) = convert_from_reference (ptr); OMP_CLAUSE_SIZE (c3) = size_zero_node; OMP_CLAUSE_CHAIN (c3) = OMP_CLAUSE_CHAIN (c2); OMP_CLAUSE_CHAIN (c2) = c3; } } } return false; } /* Return identifier to look up for omp declare reduction. */ tree omp_reduction_id (enum tree_code reduction_code, tree reduction_id, tree type) { const char *p = NULL; const char *m = NULL; switch (reduction_code) { case PLUS_EXPR: case MULT_EXPR: case MINUS_EXPR: case BIT_AND_EXPR: case BIT_XOR_EXPR: case BIT_IOR_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: reduction_id = ovl_op_identifier (false, reduction_code); break; case MIN_EXPR: p = "min"; break; case MAX_EXPR: p = "max"; break; default: break; } if (p == NULL) { if (TREE_CODE (reduction_id) != IDENTIFIER_NODE) return error_mark_node; p = IDENTIFIER_POINTER (reduction_id); } if (type != NULL_TREE) m = mangle_type_string (TYPE_MAIN_VARIANT (type)); const char prefix[] = "omp declare reduction "; size_t lenp = sizeof (prefix); if (strncmp (p, prefix, lenp - 1) == 0) lenp = 1; size_t len = strlen (p); size_t lenm = m ? strlen (m) + 1 : 0; char *name = XALLOCAVEC (char, lenp + len + lenm); if (lenp > 1) memcpy (name, prefix, lenp - 1); memcpy (name + lenp - 1, p, len + 1); if (m) { name[lenp + len - 1] = '~'; memcpy (name + lenp + len, m, lenm); } return get_identifier (name); } /* Lookup OpenMP UDR ID for TYPE, return the corresponding artificial FUNCTION_DECL or NULL_TREE if not found. */ static tree omp_reduction_lookup (location_t loc, tree id, tree type, tree *baselinkp, vec<tree> *ambiguousp) { tree orig_id = id; tree baselink = NULL_TREE; if (identifier_p (id)) { cp_id_kind idk; bool nonint_cst_expression_p; const char *error_msg; id = omp_reduction_id (ERROR_MARK, id, type); tree decl = lookup_name (id); if (decl == NULL_TREE) decl = error_mark_node; id = finish_id_expression (id, decl, NULL_TREE, &idk, false, true, &nonint_cst_expression_p, false, true, false, false, &error_msg, loc); if (idk == CP_ID_KIND_UNQUALIFIED && identifier_p (id)) { vec<tree, va_gc> *args = NULL; vec_safe_push (args, build_reference_type (type)); id = perform_koenig_lookup (id, args, tf_none); } } else if (TREE_CODE (id) == SCOPE_REF) id = lookup_qualified_name (TREE_OPERAND (id, 0), omp_reduction_id (ERROR_MARK, TREE_OPERAND (id, 1), type), false, false); tree fns = id; id = NULL_TREE; if (fns && is_overloaded_fn (fns)) { for (lkp_iterator iter (get_fns (fns)); iter; ++iter) { tree fndecl = *iter; if (TREE_CODE (fndecl) == FUNCTION_DECL) { tree argtype = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); if (same_type_p (TREE_TYPE (argtype), type)) { id = fndecl; break; } } } if (id && BASELINK_P (fns)) { if (baselinkp) *baselinkp = fns; else baselink = fns; } } if (!id && CLASS_TYPE_P (type) && TYPE_BINFO (type)) { vec<tree> ambiguous = vNULL; tree binfo = TYPE_BINFO (type), base_binfo, ret = NULL_TREE; unsigned int ix; if (ambiguousp == NULL) ambiguousp = &ambiguous; for (ix = 0; BINFO_BASE_ITERATE (binfo, ix, base_binfo); ix++) { id = omp_reduction_lookup (loc, orig_id, BINFO_TYPE (base_binfo), baselinkp ? baselinkp : &baselink, ambiguousp); if (id == NULL_TREE) continue; if (!ambiguousp->is_empty ()) ambiguousp->safe_push (id); else if (ret != NULL_TREE) { ambiguousp->safe_push (ret); ambiguousp->safe_push (id); ret = NULL_TREE; } else ret = id; } if (ambiguousp != &ambiguous) return ret; if (!ambiguous.is_empty ()) { const char *str = _("candidates are:"); unsigned int idx; tree udr; error_at (loc, "user defined reduction lookup is ambiguous"); FOR_EACH_VEC_ELT (ambiguous, idx, udr) { inform (DECL_SOURCE_LOCATION (udr), "%s %#qD", str, udr); if (idx == 0) str = get_spaces (str); } ambiguous.release (); ret = error_mark_node; baselink = NULL_TREE; } id = ret; } if (id && baselink) perform_or_defer_access_check (BASELINK_BINFO (baselink), id, id, tf_warning_or_error); return id; } /* Helper function for cp_parser_omp_declare_reduction_exprs and tsubst_omp_udr. Remove CLEANUP_STMT for data (omp_priv variable). Also append INIT_EXPR for DECL_INITIAL of omp_priv after its DECL_EXPR. */ tree cp_remove_omp_priv_cleanup_stmt (tree *tp, int *walk_subtrees, void *data) { if (TYPE_P (*tp)) *walk_subtrees = 0; else if (TREE_CODE (*tp) == CLEANUP_STMT && CLEANUP_DECL (*tp) == (tree) data) *tp = CLEANUP_BODY (*tp); else if (TREE_CODE (*tp) == DECL_EXPR) { tree decl = DECL_EXPR_DECL (*tp); if (!processing_template_decl && decl == (tree) data && DECL_INITIAL (decl) && DECL_INITIAL (decl) != error_mark_node) { tree list = NULL_TREE; append_to_statement_list_force (*tp, &list); tree init_expr = build2 (INIT_EXPR, void_type_node, decl, DECL_INITIAL (decl)); DECL_INITIAL (decl) = NULL_TREE; append_to_statement_list_force (init_expr, &list); *tp = list; } } return NULL_TREE; } /* Data passed from cp_check_omp_declare_reduction to cp_check_omp_declare_reduction_r. */ struct cp_check_omp_declare_reduction_data { location_t loc; tree stmts[7]; bool combiner_p; }; /* Helper function for cp_check_omp_declare_reduction, called via cp_walk_tree. */ static tree cp_check_omp_declare_reduction_r (tree *tp, int *, void *data) { struct cp_check_omp_declare_reduction_data *udr_data = (struct cp_check_omp_declare_reduction_data *) data; if (SSA_VAR_P (*tp) && !DECL_ARTIFICIAL (*tp) && *tp != DECL_EXPR_DECL (udr_data->stmts[udr_data->combiner_p ? 0 : 3]) && *tp != DECL_EXPR_DECL (udr_data->stmts[udr_data->combiner_p ? 1 : 4])) { location_t loc = udr_data->loc; if (udr_data->combiner_p) error_at (loc, "%<#pragma omp declare reduction%> combiner refers to " "variable %qD which is not %<omp_out%> nor %<omp_in%>", *tp); else error_at (loc, "%<#pragma omp declare reduction%> initializer refers " "to variable %qD which is not %<omp_priv%> nor " "%<omp_orig%>", *tp); return *tp; } return NULL_TREE; } /* Diagnose violation of OpenMP #pragma omp declare reduction restrictions. */ void cp_check_omp_declare_reduction (tree udr) { tree type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (udr))); gcc_assert (TYPE_REF_P (type)); type = TREE_TYPE (type); int i; location_t loc = DECL_SOURCE_LOCATION (udr); if (type == error_mark_node) return; if (ARITHMETIC_TYPE_P (type)) { static enum tree_code predef_codes[] = { PLUS_EXPR, MULT_EXPR, MINUS_EXPR, BIT_AND_EXPR, BIT_XOR_EXPR, BIT_IOR_EXPR, TRUTH_ANDIF_EXPR, TRUTH_ORIF_EXPR }; for (i = 0; i < 8; i++) { tree id = omp_reduction_id (predef_codes[i], NULL_TREE, NULL_TREE); const char *n1 = IDENTIFIER_POINTER (DECL_NAME (udr)); const char *n2 = IDENTIFIER_POINTER (id); if (strncmp (n1, n2, IDENTIFIER_LENGTH (id)) == 0 && (n1[IDENTIFIER_LENGTH (id)] == '~' || n1[IDENTIFIER_LENGTH (id)] == '\0')) break; } if (i == 8 && TREE_CODE (type) != COMPLEX_EXPR) { const char prefix_minmax[] = "omp declare reduction m"; size_t prefix_size = sizeof (prefix_minmax) - 1; const char *n = IDENTIFIER_POINTER (DECL_NAME (udr)); if (strncmp (IDENTIFIER_POINTER (DECL_NAME (udr)), prefix_minmax, prefix_size) == 0 && ((n[prefix_size] == 'i' && n[prefix_size + 1] == 'n') || (n[prefix_size] == 'a' && n[prefix_size + 1] == 'x')) && (n[prefix_size + 2] == '~' || n[prefix_size + 2] == '\0')) i = 0; } if (i < 8) { error_at (loc, "predeclared arithmetic type %qT in " "%<#pragma omp declare reduction%>", type); return; } } else if (FUNC_OR_METHOD_TYPE_P (type) || TREE_CODE (type) == ARRAY_TYPE) { error_at (loc, "function or array type %qT in " "%<#pragma omp declare reduction%>", type); return; } else if (TYPE_REF_P (type)) { error_at (loc, "reference type %qT in %<#pragma omp declare reduction%>", type); return; } else if (TYPE_QUALS_NO_ADDR_SPACE (type)) { error_at (loc, "%<const%>, %<volatile%> or %<__restrict%>-qualified " "type %qT in %<#pragma omp declare reduction%>", type); return; } tree body = DECL_SAVED_TREE (udr); if (body == NULL_TREE || TREE_CODE (body) != STATEMENT_LIST) return; tree_stmt_iterator tsi; struct cp_check_omp_declare_reduction_data data; memset (data.stmts, 0, sizeof data.stmts); for (i = 0, tsi = tsi_start (body); i < 7 && !tsi_end_p (tsi); i++, tsi_next (&tsi)) data.stmts[i] = tsi_stmt (tsi); data.loc = loc; gcc_assert (tsi_end_p (tsi)); if (i >= 3) { gcc_assert (TREE_CODE (data.stmts[0]) == DECL_EXPR && TREE_CODE (data.stmts[1]) == DECL_EXPR); if (TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0]))) return; data.combiner_p = true; if (cp_walk_tree (&data.stmts[2], cp_check_omp_declare_reduction_r, &data, NULL)) TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0])) = 1; } if (i >= 6) { gcc_assert (TREE_CODE (data.stmts[3]) == DECL_EXPR && TREE_CODE (data.stmts[4]) == DECL_EXPR); data.combiner_p = false; if (cp_walk_tree (&data.stmts[5], cp_check_omp_declare_reduction_r, &data, NULL) || cp_walk_tree (&DECL_INITIAL (DECL_EXPR_DECL (data.stmts[3])), cp_check_omp_declare_reduction_r, &data, NULL)) TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0])) = 1; if (i == 7) gcc_assert (TREE_CODE (data.stmts[6]) == DECL_EXPR); } } /* Helper function of finish_omp_clauses. Clone STMT as if we were making an inline call. But, remap the OMP_DECL1 VAR_DECL (omp_out resp. omp_orig) to PLACEHOLDER and OMP_DECL2 VAR_DECL (omp_in resp. omp_priv) to DECL. */ static tree clone_omp_udr (tree stmt, tree omp_decl1, tree omp_decl2, tree decl, tree placeholder) { copy_body_data id; hash_map<tree, tree> decl_map; decl_map.put (omp_decl1, placeholder); decl_map.put (omp_decl2, decl); memset (&id, 0, sizeof (id)); id.src_fn = DECL_CONTEXT (omp_decl1); id.dst_fn = current_function_decl; id.src_cfun = DECL_STRUCT_FUNCTION (id.src_fn); id.decl_map = &decl_map; id.copy_decl = copy_decl_no_change; id.transform_call_graph_edges = CB_CGE_DUPLICATE; id.transform_new_cfg = true; id.transform_return_to_modify = false; id.transform_lang_insert_block = NULL; id.eh_lp_nr = 0; walk_tree (&stmt, copy_tree_body_r, &id, NULL); return stmt; } /* Helper function of finish_omp_clauses, called via cp_walk_tree. Find OMP_CLAUSE_PLACEHOLDER (passed in DATA) in *TP. */ static tree find_omp_placeholder_r (tree *tp, int *, void *data) { if (*tp == (tree) data) return *tp; return NULL_TREE; } /* Helper function of finish_omp_clauses. Handle OMP_CLAUSE_REDUCTION C. Return true if there is some error and the clause should be removed. */ static bool finish_omp_reduction_clause (tree c, bool *need_default_ctor, bool *need_dtor) { tree t = OMP_CLAUSE_DECL (c); bool predefined = false; if (TREE_CODE (t) == TREE_LIST) { gcc_assert (processing_template_decl); return false; } tree type = TREE_TYPE (t); if (TREE_CODE (t) == MEM_REF) type = TREE_TYPE (type); if (TYPE_REF_P (type)) type = TREE_TYPE (type); if (TREE_CODE (type) == ARRAY_TYPE) { tree oatype = type; gcc_assert (TREE_CODE (t) != MEM_REF); while (TREE_CODE (type) == ARRAY_TYPE) type = TREE_TYPE (type); if (!processing_template_decl) { t = require_complete_type (t); if (t == error_mark_node) return true; tree size = size_binop (EXACT_DIV_EXPR, TYPE_SIZE_UNIT (oatype), TYPE_SIZE_UNIT (type)); if (integer_zerop (size)) { error_at (OMP_CLAUSE_LOCATION (c), "%qE in %<reduction%> clause is a zero size array", omp_clause_printable_decl (t)); return true; } size = size_binop (MINUS_EXPR, size, size_one_node); size = save_expr (size); tree index_type = build_index_type (size); tree atype = build_array_type (type, index_type); tree ptype = build_pointer_type (type); if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) t = build_fold_addr_expr (t); t = build2 (MEM_REF, atype, t, build_int_cst (ptype, 0)); OMP_CLAUSE_DECL (c) = t; } } if (type == error_mark_node) return true; else if (ARITHMETIC_TYPE_P (type)) switch (OMP_CLAUSE_REDUCTION_CODE (c)) { case PLUS_EXPR: case MULT_EXPR: case MINUS_EXPR: predefined = true; break; case MIN_EXPR: case MAX_EXPR: if (TREE_CODE (type) == COMPLEX_TYPE) break; predefined = true; break; case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: if (FLOAT_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE) break; predefined = true; break; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: if (FLOAT_TYPE_P (type)) break; predefined = true; break; default: break; } else if (TYPE_READONLY (type)) { error_at (OMP_CLAUSE_LOCATION (c), "%qE has const type for %<reduction%>", omp_clause_printable_decl (t)); return true; } else if (!processing_template_decl) { t = require_complete_type (t); if (t == error_mark_node) return true; OMP_CLAUSE_DECL (c) = t; } if (predefined) { OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL_TREE; return false; } else if (processing_template_decl) { if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) == error_mark_node) return true; return false; } tree id = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c); type = TYPE_MAIN_VARIANT (type); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL_TREE; if (id == NULL_TREE) id = omp_reduction_id (OMP_CLAUSE_REDUCTION_CODE (c), NULL_TREE, NULL_TREE); id = omp_reduction_lookup (OMP_CLAUSE_LOCATION (c), id, type, NULL, NULL); if (id) { if (id == error_mark_node) return true; mark_used (id); tree body = DECL_SAVED_TREE (id); if (!body) return true; if (TREE_CODE (body) == STATEMENT_LIST) { tree_stmt_iterator tsi; tree placeholder = NULL_TREE, decl_placeholder = NULL_TREE; int i; tree stmts[7]; tree atype = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (id))); atype = TREE_TYPE (atype); bool need_static_cast = !same_type_p (type, atype); memset (stmts, 0, sizeof stmts); for (i = 0, tsi = tsi_start (body); i < 7 && !tsi_end_p (tsi); i++, tsi_next (&tsi)) stmts[i] = tsi_stmt (tsi); gcc_assert (tsi_end_p (tsi)); if (i >= 3) { gcc_assert (TREE_CODE (stmts[0]) == DECL_EXPR && TREE_CODE (stmts[1]) == DECL_EXPR); placeholder = build_lang_decl (VAR_DECL, NULL_TREE, type); DECL_ARTIFICIAL (placeholder) = 1; DECL_IGNORED_P (placeholder) = 1; OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = placeholder; if (TREE_CODE (t) == MEM_REF) { decl_placeholder = build_lang_decl (VAR_DECL, NULL_TREE, type); DECL_ARTIFICIAL (decl_placeholder) = 1; DECL_IGNORED_P (decl_placeholder) = 1; OMP_CLAUSE_REDUCTION_DECL_PLACEHOLDER (c) = decl_placeholder; } if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[0]))) cxx_mark_addressable (placeholder); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[1])) && (decl_placeholder || !TYPE_REF_P (TREE_TYPE (OMP_CLAUSE_DECL (c))))) cxx_mark_addressable (decl_placeholder ? decl_placeholder : OMP_CLAUSE_DECL (c)); tree omp_out = placeholder; tree omp_in = decl_placeholder ? decl_placeholder : convert_from_reference (OMP_CLAUSE_DECL (c)); if (need_static_cast) { tree rtype = build_reference_type (atype); omp_out = build_static_cast (input_location, rtype, omp_out, tf_warning_or_error); omp_in = build_static_cast (input_location, rtype, omp_in, tf_warning_or_error); if (omp_out == error_mark_node || omp_in == error_mark_node) return true; omp_out = convert_from_reference (omp_out); omp_in = convert_from_reference (omp_in); } OMP_CLAUSE_REDUCTION_MERGE (c) = clone_omp_udr (stmts[2], DECL_EXPR_DECL (stmts[0]), DECL_EXPR_DECL (stmts[1]), omp_in, omp_out); } if (i >= 6) { gcc_assert (TREE_CODE (stmts[3]) == DECL_EXPR && TREE_CODE (stmts[4]) == DECL_EXPR); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[3])) && (decl_placeholder || !TYPE_REF_P (TREE_TYPE (OMP_CLAUSE_DECL (c))))) cxx_mark_addressable (decl_placeholder ? decl_placeholder : OMP_CLAUSE_DECL (c)); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[4]))) cxx_mark_addressable (placeholder); tree omp_priv = decl_placeholder ? decl_placeholder : convert_from_reference (OMP_CLAUSE_DECL (c)); tree omp_orig = placeholder; if (need_static_cast) { if (i == 7) { error_at (OMP_CLAUSE_LOCATION (c), "user defined reduction with constructor " "initializer for base class %qT", atype); return true; } tree rtype = build_reference_type (atype); omp_priv = build_static_cast (input_location, rtype, omp_priv, tf_warning_or_error); omp_orig = build_static_cast (input_location, rtype, omp_orig, tf_warning_or_error); if (omp_priv == error_mark_node || omp_orig == error_mark_node) return true; omp_priv = convert_from_reference (omp_priv); omp_orig = convert_from_reference (omp_orig); } if (i == 6) *need_default_ctor = true; OMP_CLAUSE_REDUCTION_INIT (c) = clone_omp_udr (stmts[5], DECL_EXPR_DECL (stmts[4]), DECL_EXPR_DECL (stmts[3]), omp_priv, omp_orig); if (cp_walk_tree (&OMP_CLAUSE_REDUCTION_INIT (c), find_omp_placeholder_r, placeholder, NULL)) OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c) = 1; } else if (i >= 3) { if (CLASS_TYPE_P (type) && !pod_type_p (type)) *need_default_ctor = true; else { tree init; tree v = decl_placeholder ? decl_placeholder : convert_from_reference (t); if (AGGREGATE_TYPE_P (TREE_TYPE (v))) init = build_constructor (TREE_TYPE (v), NULL); else init = fold_convert (TREE_TYPE (v), integer_zero_node); OMP_CLAUSE_REDUCTION_INIT (c) = build2 (INIT_EXPR, TREE_TYPE (v), v, init); } } } } if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) *need_dtor = true; else { error_at (OMP_CLAUSE_LOCATION (c), "user defined reduction not found for %qE", omp_clause_printable_decl (t)); return true; } if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF) gcc_assert (TYPE_SIZE_UNIT (type) && TREE_CODE (TYPE_SIZE_UNIT (type)) == INTEGER_CST); return false; } /* Called from finish_struct_1. linear(this) or linear(this:step) clauses might not be finalized yet because the class has been incomplete when parsing #pragma omp declare simd methods. Fix those up now. */ void finish_omp_declare_simd_methods (tree t) { if (processing_template_decl) return; for (tree x = TYPE_FIELDS (t); x; x = DECL_CHAIN (x)) { if (TREE_CODE (x) == USING_DECL || !DECL_NONSTATIC_MEMBER_FUNCTION_P (x)) continue; tree ods = lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (x)); if (!ods || !TREE_VALUE (ods)) continue; for (tree c = TREE_VALUE (TREE_VALUE (ods)); c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR && integer_zerop (OMP_CLAUSE_DECL (c)) && OMP_CLAUSE_LINEAR_STEP (c) && TYPE_PTR_P (TREE_TYPE (OMP_CLAUSE_LINEAR_STEP (c)))) { tree s = OMP_CLAUSE_LINEAR_STEP (c); s = fold_convert_loc (OMP_CLAUSE_LOCATION (c), sizetype, s); s = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MULT_EXPR, sizetype, s, TYPE_SIZE_UNIT (t)); OMP_CLAUSE_LINEAR_STEP (c) = s; } } } /* Adjust sink depend clause to take into account pointer offsets. Return TRUE if there was a problem processing the offset, and the whole clause should be removed. */ static bool cp_finish_omp_clause_depend_sink (tree sink_clause) { tree t = OMP_CLAUSE_DECL (sink_clause); gcc_assert (TREE_CODE (t) == TREE_LIST); /* Make sure we don't adjust things twice for templates. */ if (processing_template_decl) return false; for (; t; t = TREE_CHAIN (t)) { tree decl = TREE_VALUE (t); if (TYPE_PTR_P (TREE_TYPE (decl))) { tree offset = TREE_PURPOSE (t); bool neg = wi::neg_p (wi::to_wide (offset)); offset = fold_unary (ABS_EXPR, TREE_TYPE (offset), offset); decl = mark_rvalue_use (decl); decl = convert_from_reference (decl); tree t2 = pointer_int_sum (OMP_CLAUSE_LOCATION (sink_clause), neg ? MINUS_EXPR : PLUS_EXPR, decl, offset); t2 = fold_build2_loc (OMP_CLAUSE_LOCATION (sink_clause), MINUS_EXPR, sizetype, fold_convert (sizetype, t2), fold_convert (sizetype, decl)); if (t2 == error_mark_node) return true; TREE_PURPOSE (t) = t2; } } return false; } /* Finish OpenMP iterators ITER. Return true if they are errorneous and clauses containing them should be removed. */ static bool cp_omp_finish_iterators (tree iter) { bool ret = false; for (tree it = iter; it; it = TREE_CHAIN (it)) { tree var = TREE_VEC_ELT (it, 0); tree begin = TREE_VEC_ELT (it, 1); tree end = TREE_VEC_ELT (it, 2); tree step = TREE_VEC_ELT (it, 3); tree orig_step; tree type = TREE_TYPE (var); location_t loc = DECL_SOURCE_LOCATION (var); if (type == error_mark_node) { ret = true; continue; } if (type_dependent_expression_p (var)) continue; if (!INTEGRAL_TYPE_P (type) && !POINTER_TYPE_P (type)) { error_at (loc, "iterator %qD has neither integral nor pointer type", var); ret = true; continue; } else if (TYPE_READONLY (type)) { error_at (loc, "iterator %qD has const qualified type", var); ret = true; continue; } if (type_dependent_expression_p (begin) || type_dependent_expression_p (end) || type_dependent_expression_p (step)) continue; else if (error_operand_p (step)) { ret = true; continue; } else if (!INTEGRAL_TYPE_P (TREE_TYPE (step))) { error_at (EXPR_LOC_OR_LOC (step, loc), "iterator step with non-integral type"); ret = true; continue; } begin = mark_rvalue_use (begin); end = mark_rvalue_use (end); step = mark_rvalue_use (step); begin = cp_build_c_cast (input_location, type, begin, tf_warning_or_error); end = cp_build_c_cast (input_location, type, end, tf_warning_or_error); orig_step = step; if (!processing_template_decl) step = orig_step = save_expr (step); tree stype = POINTER_TYPE_P (type) ? sizetype : type; step = cp_build_c_cast (input_location, stype, step, tf_warning_or_error); if (POINTER_TYPE_P (type) && !processing_template_decl) { begin = save_expr (begin); step = pointer_int_sum (loc, PLUS_EXPR, begin, step); step = fold_build2_loc (loc, MINUS_EXPR, sizetype, fold_convert (sizetype, step), fold_convert (sizetype, begin)); step = fold_convert (ssizetype, step); } if (!processing_template_decl) { begin = maybe_constant_value (begin); end = maybe_constant_value (end); step = maybe_constant_value (step); orig_step = maybe_constant_value (orig_step); } if (integer_zerop (step)) { error_at (loc, "iterator %qD has zero step", var); ret = true; continue; } if (begin == error_mark_node || end == error_mark_node || step == error_mark_node || orig_step == error_mark_node) { ret = true; continue; } if (!processing_template_decl) { begin = fold_build_cleanup_point_expr (TREE_TYPE (begin), begin); end = fold_build_cleanup_point_expr (TREE_TYPE (end), end); step = fold_build_cleanup_point_expr (TREE_TYPE (step), step); orig_step = fold_build_cleanup_point_expr (TREE_TYPE (orig_step), orig_step); } hash_set<tree> pset; tree it2; for (it2 = TREE_CHAIN (it); it2; it2 = TREE_CHAIN (it2)) { tree var2 = TREE_VEC_ELT (it2, 0); tree begin2 = TREE_VEC_ELT (it2, 1); tree end2 = TREE_VEC_ELT (it2, 2); tree step2 = TREE_VEC_ELT (it2, 3); location_t loc2 = DECL_SOURCE_LOCATION (var2); if (cp_walk_tree (&begin2, find_omp_placeholder_r, var, &pset)) { error_at (EXPR_LOC_OR_LOC (begin2, loc2), "begin expression refers to outer iterator %qD", var); break; } else if (cp_walk_tree (&end2, find_omp_placeholder_r, var, &pset)) { error_at (EXPR_LOC_OR_LOC (end2, loc2), "end expression refers to outer iterator %qD", var); break; } else if (cp_walk_tree (&step2, find_omp_placeholder_r, var, &pset)) { error_at (EXPR_LOC_OR_LOC (step2, loc2), "step expression refers to outer iterator %qD", var); break; } } if (it2) { ret = true; continue; } TREE_VEC_ELT (it, 1) = begin; TREE_VEC_ELT (it, 2) = end; if (processing_template_decl) TREE_VEC_ELT (it, 3) = orig_step; else { TREE_VEC_ELT (it, 3) = step; TREE_VEC_ELT (it, 4) = orig_step; } } return ret; } /* Ensure that pointers are used in OpenACC attach and detach clauses. Return true if an error has been detected. */ static bool cp_oacc_check_attachments (tree c) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) return false; /* OpenACC attach / detach clauses must be pointers. */ if (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH) { tree t = OMP_CLAUSE_DECL (c); tree type; while (TREE_CODE (t) == TREE_LIST) t = TREE_CHAIN (t); type = TREE_TYPE (t); if (TREE_CODE (type) == REFERENCE_TYPE) type = TREE_TYPE (type); if (TREE_CODE (type) != POINTER_TYPE) { error_at (OMP_CLAUSE_LOCATION (c), "expected pointer in %qs clause", c_omp_map_clause_name (c, true)); return true; } } return false; } /* For all elements of CLAUSES, validate them vs OpenMP constraints. Remove any elements from the list that are invalid. */ tree finish_omp_clauses (tree clauses, enum c_omp_region_type ort) { bitmap_head generic_head, firstprivate_head, lastprivate_head; bitmap_head aligned_head, map_head, map_field_head, oacc_reduction_head; tree c, t, *pc; tree safelen = NULL_TREE; bool branch_seen = false; bool copyprivate_seen = false; bool ordered_seen = false; bool order_seen = false; bool schedule_seen = false; bool oacc_async = false; tree last_iterators = NULL_TREE; bool last_iterators_remove = false; /* 1 if normal/task reduction has been seen, -1 if inscan reduction has been seen, -2 if mixed inscan/normal reduction diagnosed. */ int reduction_seen = 0; bitmap_obstack_initialize (NULL); bitmap_initialize (&generic_head, &bitmap_default_obstack); bitmap_initialize (&firstprivate_head, &bitmap_default_obstack); bitmap_initialize (&lastprivate_head, &bitmap_default_obstack); bitmap_initialize (&aligned_head, &bitmap_default_obstack); /* If ort == C_ORT_OMP_DECLARE_SIMD used as uniform_head instead. */ bitmap_initialize (&map_head, &bitmap_default_obstack); bitmap_initialize (&map_field_head, &bitmap_default_obstack); /* If ort == C_ORT_OMP used as nontemporal_head or use_device_xxx_head instead. */ bitmap_initialize (&oacc_reduction_head, &bitmap_default_obstack); if (ort & C_ORT_ACC) for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_ASYNC) { oacc_async = true; break; } for (pc = &clauses, c = clauses; c ; c = *pc) { bool remove = false; bool field_ok = false; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_SHARED: field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP); goto check_dup_generic; case OMP_CLAUSE_PRIVATE: field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP); goto check_dup_generic; case OMP_CLAUSE_REDUCTION: if (reduction_seen == 0) reduction_seen = OMP_CLAUSE_REDUCTION_INSCAN (c) ? -1 : 1; else if (reduction_seen != -2 && reduction_seen != (OMP_CLAUSE_REDUCTION_INSCAN (c) ? -1 : 1)) { error_at (OMP_CLAUSE_LOCATION (c), "%<inscan%> and non-%<inscan%> %<reduction%> clauses " "on the same construct"); reduction_seen = -2; } /* FALLTHRU */ case OMP_CLAUSE_IN_REDUCTION: case OMP_CLAUSE_TASK_REDUCTION: field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP); t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c, ort)) { remove = true; break; } if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_REDUCTION_INSCAN (c)) { error_at (OMP_CLAUSE_LOCATION (c), "%<inscan%> %<reduction%> clause with array " "section"); remove = true; break; } if (TREE_CODE (t) == TREE_LIST) { while (TREE_CODE (t) == TREE_LIST) t = TREE_CHAIN (t); } else { gcc_assert (TREE_CODE (t) == MEM_REF); t = TREE_OPERAND (t, 0); if (TREE_CODE (t) == POINTER_PLUS_EXPR) t = TREE_OPERAND (t, 0); if (TREE_CODE (t) == ADDR_EXPR || INDIRECT_REF_P (t)) t = TREE_OPERAND (t, 0); } tree n = omp_clause_decl_field (t); if (n) t = n; goto check_dup_generic_t; } if (oacc_async) cxx_mark_addressable (t); goto check_dup_generic; case OMP_CLAUSE_COPYPRIVATE: copyprivate_seen = true; field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP); goto check_dup_generic; case OMP_CLAUSE_COPYIN: goto check_dup_generic; case OMP_CLAUSE_LINEAR: field_ok = ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP); t = OMP_CLAUSE_DECL (c); if (ort != C_ORT_OMP_DECLARE_SIMD && OMP_CLAUSE_LINEAR_KIND (c) != OMP_CLAUSE_LINEAR_DEFAULT) { error_at (OMP_CLAUSE_LOCATION (c), "modifier should not be specified in %<linear%> " "clause on %<simd%> or %<for%> constructs"); OMP_CLAUSE_LINEAR_KIND (c) = OMP_CLAUSE_LINEAR_DEFAULT; } if ((VAR_P (t) || TREE_CODE (t) == PARM_DECL) && !type_dependent_expression_p (t)) { tree type = TREE_TYPE (t); if ((OMP_CLAUSE_LINEAR_KIND (c) == OMP_CLAUSE_LINEAR_REF || OMP_CLAUSE_LINEAR_KIND (c) == OMP_CLAUSE_LINEAR_UVAL) && !TYPE_REF_P (type)) { error_at (OMP_CLAUSE_LOCATION (c), "linear clause with %qs modifier applied to " "non-reference variable with %qT type", OMP_CLAUSE_LINEAR_KIND (c) == OMP_CLAUSE_LINEAR_REF ? "ref" : "uval", TREE_TYPE (t)); remove = true; break; } if (TYPE_REF_P (type)) type = TREE_TYPE (type); if (OMP_CLAUSE_LINEAR_KIND (c) != OMP_CLAUSE_LINEAR_REF) { if (!INTEGRAL_TYPE_P (type) && !TYPE_PTR_P (type)) { error_at (OMP_CLAUSE_LOCATION (c), "linear clause applied to non-integral " "non-pointer variable with %qT type", TREE_TYPE (t)); remove = true; break; } } } t = OMP_CLAUSE_LINEAR_STEP (c); if (t == NULL_TREE) t = integer_one_node; if (t == error_mark_node) { remove = true; break; } else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t)) && (ort != C_ORT_OMP_DECLARE_SIMD || TREE_CODE (t) != PARM_DECL || !TYPE_REF_P (TREE_TYPE (t)) || !INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (t))))) { error_at (OMP_CLAUSE_LOCATION (c), "linear step expression must be integral"); remove = true; break; } else { t = mark_rvalue_use (t); if (ort == C_ORT_OMP_DECLARE_SIMD && TREE_CODE (t) == PARM_DECL) { OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c) = 1; goto check_dup_generic; } if (!processing_template_decl && (VAR_P (OMP_CLAUSE_DECL (c)) || TREE_CODE (OMP_CLAUSE_DECL (c)) == PARM_DECL)) { if (ort == C_ORT_OMP_DECLARE_SIMD) { t = maybe_constant_value (t); if (TREE_CODE (t) != INTEGER_CST) { error_at (OMP_CLAUSE_LOCATION (c), "%<linear%> clause step %qE is neither " "constant nor a parameter", t); remove = true; break; } } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); tree type = TREE_TYPE (OMP_CLAUSE_DECL (c)); if (TYPE_REF_P (type)) type = TREE_TYPE (type); if (OMP_CLAUSE_LINEAR_KIND (c) == OMP_CLAUSE_LINEAR_REF) { type = build_pointer_type (type); tree d = fold_convert (type, OMP_CLAUSE_DECL (c)); t = pointer_int_sum (OMP_CLAUSE_LOCATION (c), PLUS_EXPR, d, t); t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, sizetype, fold_convert (sizetype, t), fold_convert (sizetype, d)); if (t == error_mark_node) { remove = true; break; } } else if (TYPE_PTR_P (type) /* Can't multiply the step yet if *this is still incomplete type. */ && (ort != C_ORT_OMP_DECLARE_SIMD || TREE_CODE (OMP_CLAUSE_DECL (c)) != PARM_DECL || !DECL_ARTIFICIAL (OMP_CLAUSE_DECL (c)) || DECL_NAME (OMP_CLAUSE_DECL (c)) != this_identifier || !TYPE_BEING_DEFINED (TREE_TYPE (type)))) { tree d = convert_from_reference (OMP_CLAUSE_DECL (c)); t = pointer_int_sum (OMP_CLAUSE_LOCATION (c), PLUS_EXPR, d, t); t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, sizetype, fold_convert (sizetype, t), fold_convert (sizetype, d)); if (t == error_mark_node) { remove = true; break; } } else t = fold_convert (type, t); } OMP_CLAUSE_LINEAR_STEP (c) = t; } goto check_dup_generic; check_dup_generic: t = omp_clause_decl_field (OMP_CLAUSE_DECL (c)); if (t) { if (!remove && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SHARED) omp_note_field_privatization (t, OMP_CLAUSE_DECL (c)); } else t = OMP_CLAUSE_DECL (c); check_dup_generic_t: if (t == current_class_ptr && ((ort != C_ORT_OMP_DECLARE_SIMD && ort != C_ORT_ACC) || (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_LINEAR && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_UNIFORM))) { error_at (OMP_CLAUSE_LOCATION (c), "%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; break; } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL && (!field_ok || TREE_CODE (t) != FIELD_DECL)) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if ((ort == C_ORT_ACC && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION) || (ort == C_ORT_OMP && (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR || (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_ADDR)))) { if (bitmap_bit_p (&oacc_reduction_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), ort == C_ORT_ACC ? "%qD appears more than once in reduction clauses" : "%qD appears more than once in data clauses", t); remove = true; } else bitmap_set_bit (&oacc_reduction_head, DECL_UID (t)); } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t)) || bitmap_bit_p (&lastprivate_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); remove = true; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE && bitmap_bit_p (&map_head, DECL_UID (t))) { if (ort == C_ORT_ACC) error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qD appears both in data and map clauses", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); if (!field_ok) break; handle_field_decl: if (!remove && TREE_CODE (t) == FIELD_DECL && t == OMP_CLAUSE_DECL (c)) { OMP_CLAUSE_DECL (c) = omp_privatize_field (t, (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED)); if (OMP_CLAUSE_DECL (c) == error_mark_node) remove = true; } break; case OMP_CLAUSE_FIRSTPRIVATE: t = omp_clause_decl_field (OMP_CLAUSE_DECL (c)); if (t) omp_note_field_privatization (t, OMP_CLAUSE_DECL (c)); else t = OMP_CLAUSE_DECL (c); if (ort != C_ORT_ACC && t == current_class_ptr) { error_at (OMP_CLAUSE_LOCATION (c), "%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; break; } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL && ((ort & C_ORT_OMP_DECLARE_SIMD) != C_ORT_OMP || TREE_CODE (t) != FIELD_DECL)) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in clause %<firstprivate%>", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in clause %<firstprivate%>", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); remove = true; } else if (bitmap_bit_p (&map_head, DECL_UID (t))) { if (ort == C_ORT_ACC) error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qD appears both in data and map clauses", t); remove = true; } else bitmap_set_bit (&firstprivate_head, DECL_UID (t)); goto handle_field_decl; case OMP_CLAUSE_LASTPRIVATE: t = omp_clause_decl_field (OMP_CLAUSE_DECL (c)); if (t) omp_note_field_privatization (t, OMP_CLAUSE_DECL (c)); else t = OMP_CLAUSE_DECL (c); if (ort != C_ORT_ACC && t == current_class_ptr) { error_at (OMP_CLAUSE_LOCATION (c), "%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; break; } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL && ((ort & C_ORT_OMP_DECLARE_SIMD) != C_ORT_OMP || TREE_CODE (t) != FIELD_DECL)) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in clause %<lastprivate%>", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in clause %<lastprivate%>", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&lastprivate_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); remove = true; } else bitmap_set_bit (&lastprivate_head, DECL_UID (t)); goto handle_field_decl; case OMP_CLAUSE_IF: t = OMP_CLAUSE_IF_EXPR (c); t = maybe_convert_cond (t); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_IF_EXPR (c) = t; break; case OMP_CLAUSE_FINAL: t = OMP_CLAUSE_FINAL_EXPR (c); t = maybe_convert_cond (t); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_FINAL_EXPR (c) = t; break; case OMP_CLAUSE_GANG: /* Operand 1 is the gang static: argument. */ t = OMP_CLAUSE_OPERAND (c, 1); if (t != NULL_TREE) { if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<gang%> static expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) != 1 && t != integer_minus_one_node) { warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<gang%> static value must be " "positive"); t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } } OMP_CLAUSE_OPERAND (c, 1) = t; } /* Check operand 0, the num argument. */ /* FALLTHRU */ case OMP_CLAUSE_WORKER: case OMP_CLAUSE_VECTOR: if (OMP_CLAUSE_OPERAND (c, 0) == NULL_TREE) break; /* FALLTHRU */ case OMP_CLAUSE_NUM_TASKS: case OMP_CLAUSE_NUM_TEAMS: case OMP_CLAUSE_NUM_THREADS: case OMP_CLAUSE_NUM_GANGS: case OMP_CLAUSE_NUM_WORKERS: case OMP_CLAUSE_VECTOR_LENGTH: t = OMP_CLAUSE_OPERAND (c, 0); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_GANG: error_at (OMP_CLAUSE_LOCATION (c), "%<gang%> num expression must be integral"); break; case OMP_CLAUSE_VECTOR: error_at (OMP_CLAUSE_LOCATION (c), "%<vector%> length expression must be integral"); break; case OMP_CLAUSE_WORKER: error_at (OMP_CLAUSE_LOCATION (c), "%<worker%> num expression must be integral"); break; default: error_at (OMP_CLAUSE_LOCATION (c), "%qs expression must be integral", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); } remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) != 1) { switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_GANG: warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<gang%> num value must be positive"); break; case OMP_CLAUSE_VECTOR: warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<vector%> length value must be " "positive"); break; case OMP_CLAUSE_WORKER: warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<worker%> num value must be " "positive"); break; default: warning_at (OMP_CLAUSE_LOCATION (c), 0, "%qs value must be positive", omp_clause_code_name [OMP_CLAUSE_CODE (c)]); } t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_OPERAND (c, 0) = t; } break; case OMP_CLAUSE_SCHEDULE: t = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c); if (t == NULL) ; else if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "schedule chunk size expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) != 1) { warning_at (OMP_CLAUSE_LOCATION (c), 0, "chunk size value must be positive"); t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t; } if (!remove) schedule_seen = true; break; case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_SAFELEN: t = OMP_CLAUSE_OPERAND (c, 0); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qs length expression must be integral", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) != INTEGER_CST || tree_int_cst_sgn (t) != 1) { error_at (OMP_CLAUSE_LOCATION (c), "%qs length expression must be positive " "constant integer expression", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } OMP_CLAUSE_OPERAND (c, 0) = t; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SAFELEN) safelen = c; } break; case OMP_CLAUSE_ASYNC: t = OMP_CLAUSE_ASYNC_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<async%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_ASYNC_EXPR (c) = t; } break; case OMP_CLAUSE_WAIT: t = OMP_CLAUSE_WAIT_EXPR (c); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_WAIT_EXPR (c) = t; break; case OMP_CLAUSE_THREAD_LIMIT: t = OMP_CLAUSE_THREAD_LIMIT_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<thread_limit%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) != 1) { warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<thread_limit%> value must be positive"); t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_THREAD_LIMIT_EXPR (c) = t; } break; case OMP_CLAUSE_DEVICE: t = OMP_CLAUSE_DEVICE_ID (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<device%> id must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_DEVICE_ID (c) = t; } break; case OMP_CLAUSE_DIST_SCHEDULE: t = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c); if (t == NULL) ; else if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<dist_schedule%> chunk size expression must be " "integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c) = t; } break; case OMP_CLAUSE_ALIGNED: t = OMP_CLAUSE_DECL (c); if (t == current_class_ptr && ort != C_ORT_OMP_DECLARE_SIMD) { error_at (OMP_CLAUSE_LOCATION (c), "%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; break; } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in %<aligned%> clause", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in %<aligned%> clause", t); remove = true; } else if (!type_dependent_expression_p (t) && !TYPE_PTR_P (TREE_TYPE (t)) && TREE_CODE (TREE_TYPE (t)) != ARRAY_TYPE && (!TYPE_REF_P (TREE_TYPE (t)) || (!INDIRECT_TYPE_P (TREE_TYPE (TREE_TYPE (t))) && (TREE_CODE (TREE_TYPE (TREE_TYPE (t))) != ARRAY_TYPE)))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE in %<aligned%> clause is neither a pointer nor " "an array nor a reference to pointer or array", t); remove = true; } else if (bitmap_bit_p (&aligned_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in %<aligned%> clauses", t); remove = true; } else bitmap_set_bit (&aligned_head, DECL_UID (t)); t = OMP_CLAUSE_ALIGNED_ALIGNMENT (c); if (t == error_mark_node) remove = true; else if (t == NULL_TREE) break; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<aligned%> clause alignment expression must " "be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) != INTEGER_CST || tree_int_cst_sgn (t) != 1) { error_at (OMP_CLAUSE_LOCATION (c), "%<aligned%> clause alignment expression must " "be positive constant integer expression"); remove = true; } else t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_ALIGNED_ALIGNMENT (c) = t; } break; case OMP_CLAUSE_NONTEMPORAL: t = OMP_CLAUSE_DECL (c); if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in %<nontemporal%> clause", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in %<nontemporal%> clause", t); remove = true; } else if (bitmap_bit_p (&oacc_reduction_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in %<nontemporal%> " "clauses", t); remove = true; } else bitmap_set_bit (&oacc_reduction_head, DECL_UID (t)); break; case OMP_CLAUSE_DEPEND: t = OMP_CLAUSE_DECL (c); if (t == NULL_TREE) { gcc_assert (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE); break; } if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK) { if (cp_finish_omp_clause_depend_sink (c)) remove = true; break; } if (TREE_CODE (t) == TREE_LIST && TREE_PURPOSE (t) && TREE_CODE (TREE_PURPOSE (t)) == TREE_VEC) { if (TREE_PURPOSE (t) != last_iterators) last_iterators_remove = cp_omp_finish_iterators (TREE_PURPOSE (t)); last_iterators = TREE_PURPOSE (t); t = TREE_VALUE (t); if (last_iterators_remove) t = error_mark_node; } else last_iterators = NULL_TREE; if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c, ort)) remove = true; else if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_DEPOBJ) { error_at (OMP_CLAUSE_LOCATION (c), "%<depend%> clause with %<depobj%> dependence " "type on array section"); remove = true; } break; } if (t == error_mark_node) remove = true; else if (ort != C_ORT_ACC && t == current_class_ptr) { error_at (OMP_CLAUSE_LOCATION (c), "%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; } else if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; else if (!lvalue_p (t)) { if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not lvalue expression nor array section " "in %<depend%> clause", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not lvalue expression nor array section " "in %<depend%> clause", t); remove = true; } else if (TREE_CODE (t) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (t, 1)) == FIELD_DECL && DECL_BIT_FIELD (TREE_OPERAND (t, 1))) { error_at (OMP_CLAUSE_LOCATION (c), "bit-field %qE in %qs clause", t, "depend"); remove = true; } else if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_DEPOBJ) { if (!c_omp_depend_t_p (TYPE_REF_P (TREE_TYPE (t)) ? TREE_TYPE (TREE_TYPE (t)) : TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE does not have %<omp_depend_t%> type in " "%<depend%> clause with %<depobj%> dependence " "type", t); remove = true; } } else if (c_omp_depend_t_p (TYPE_REF_P (TREE_TYPE (t)) ? TREE_TYPE (TREE_TYPE (t)) : TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE should not have %<omp_depend_t%> type in " "%<depend%> clause with dependence type other than " "%<depobj%>", t); remove = true; } if (!remove) { tree addr = cp_build_addr_expr (t, tf_warning_or_error); if (addr == error_mark_node) remove = true; else { t = cp_build_indirect_ref (OMP_CLAUSE_LOCATION (c), addr, RO_UNARY_STAR, tf_warning_or_error); if (t == error_mark_node) remove = true; else if (TREE_CODE (OMP_CLAUSE_DECL (c)) == TREE_LIST && TREE_PURPOSE (OMP_CLAUSE_DECL (c)) && (TREE_CODE (TREE_PURPOSE (OMP_CLAUSE_DECL (c))) == TREE_VEC)) TREE_VALUE (OMP_CLAUSE_DECL (c)) = t; else OMP_CLAUSE_DECL (c) = t; } } break; case OMP_CLAUSE_MAP: case OMP_CLAUSE_TO: case OMP_CLAUSE_FROM: case OMP_CLAUSE__CACHE_: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c, ort)) remove = true; else { t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) != TREE_LIST && !type_dependent_expression_p (t) && !cp_omp_mappable_type (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "array section does not have mappable type " "in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); cp_omp_emit_unmappable_type_notes (TREE_TYPE (t)); remove = true; } while (TREE_CODE (t) == ARRAY_REF) t = TREE_OPERAND (t, 0); if (TREE_CODE (t) == COMPONENT_REF && TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) { while (TREE_CODE (t) == COMPONENT_REF) t = TREE_OPERAND (t, 0); if (REFERENCE_REF_P (t)) t = TREE_OPERAND (t, 0); if (bitmap_bit_p (&map_field_head, DECL_UID (t))) break; if (bitmap_bit_p (&map_head, DECL_UID (t))) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in motion" " clauses", t); else if (ort == C_ORT_ACC) error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data" " clauses", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in map" " clauses", t); remove = true; } else { bitmap_set_bit (&map_head, DECL_UID (t)); bitmap_set_bit (&map_field_head, DECL_UID (t)); } } } if (cp_oacc_check_attachments (c)) remove = true; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH)) /* In this case, we have a single array element which is a pointer, and we already set OMP_CLAUSE_SIZE in handle_omp_array_sections above. For attach/detach clauses, reset the OMP_CLAUSE_SIZE (representing a bias) to zero here. */ OMP_CLAUSE_SIZE (c) = size_zero_node; break; } if (t == error_mark_node) { remove = true; break; } /* OpenACC attach / detach clauses must be pointers. */ if (cp_oacc_check_attachments (c)) { remove = true; break; } if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_DETACH)) /* For attach/detach clauses, set OMP_CLAUSE_SIZE (representing a bias) to zero here, so it is not set erroneously to the pointer size later on in gimplify.c. */ OMP_CLAUSE_SIZE (c) = size_zero_node; if (REFERENCE_REF_P (t) && TREE_CODE (TREE_OPERAND (t, 0)) == COMPONENT_REF) { t = TREE_OPERAND (t, 0); OMP_CLAUSE_DECL (c) = t; } if (ort == C_ORT_ACC && TREE_CODE (t) == COMPONENT_REF && TREE_CODE (TREE_OPERAND (t, 0)) == INDIRECT_REF) t = TREE_OPERAND (TREE_OPERAND (t, 0), 0); if (TREE_CODE (t) == COMPONENT_REF && ((ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP || ort == C_ORT_ACC) && OMP_CLAUSE_CODE (c) != OMP_CLAUSE__CACHE_) { if (type_dependent_expression_p (t)) break; if (TREE_CODE (TREE_OPERAND (t, 1)) == FIELD_DECL && DECL_BIT_FIELD (TREE_OPERAND (t, 1))) { error_at (OMP_CLAUSE_LOCATION (c), "bit-field %qE in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (!cp_omp_mappable_type (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE does not have a mappable type in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); cp_omp_emit_unmappable_type_notes (TREE_TYPE (t)); remove = true; } while (TREE_CODE (t) == COMPONENT_REF) { if (TREE_TYPE (TREE_OPERAND (t, 0)) && (TREE_CODE (TREE_TYPE (TREE_OPERAND (t, 0))) == UNION_TYPE)) { error_at (OMP_CLAUSE_LOCATION (c), "%qE is a member of a union", t); remove = true; break; } t = TREE_OPERAND (t, 0); } if (remove) break; if (REFERENCE_REF_P (t)) t = TREE_OPERAND (t, 0); if (VAR_P (t) || TREE_CODE (t) == PARM_DECL) { if (bitmap_bit_p (&map_field_head, DECL_UID (t))) goto handle_map_references; } } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ALWAYS_POINTER || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_ATTACH_DETACH)) break; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (VAR_P (t) && CP_DECL_THREAD_LOCAL_P (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (ort != C_ORT_ACC && t == current_class_ptr) { error_at (OMP_CLAUSE_LOCATION (c), "%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; break; } else if (!processing_template_decl && !TYPE_REF_P (TREE_TYPE (t)) && (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP || (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER)) && !cxx_mark_addressable (t)) remove = true; else if (!(OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER || (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER))) && t == OMP_CLAUSE_DECL (c) && !type_dependent_expression_p (t) && !cp_omp_mappable_type (TYPE_REF_P (TREE_TYPE (t)) ? TREE_TYPE (TREE_TYPE (t)) : TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD does not have a mappable type in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); cp_omp_emit_unmappable_type_notes (TREE_TYPE (t)); remove = true; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FORCE_DEVICEPTR && !type_dependent_expression_p (t) && !INDIRECT_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a pointer variable", t); remove = true; } else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER) { if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); remove = true; } else if (bitmap_bit_p (&map_head, DECL_UID (t))) { if (ort == C_ORT_ACC) error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qD appears both in data and map clauses", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); } else if (bitmap_bit_p (&map_head, DECL_UID (t)) && (ort != C_ORT_ACC || !bitmap_bit_p (&map_field_head, DECL_UID (t)))) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in motion clauses", t); if (ort == C_ORT_ACC) error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in map clauses", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { if (ort == C_ORT_ACC) error_at (OMP_CLAUSE_LOCATION (c), "%qD appears more than once in data clauses", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qD appears both in data and map clauses", t); remove = true; } else { bitmap_set_bit (&map_head, DECL_UID (t)); if (t != OMP_CLAUSE_DECL (c) && TREE_CODE (OMP_CLAUSE_DECL (c)) == COMPONENT_REF) bitmap_set_bit (&map_field_head, DECL_UID (t)); } handle_map_references: if (!remove && !processing_template_decl && ort != C_ORT_DECLARE_SIMD && TYPE_REF_P (TREE_TYPE (OMP_CLAUSE_DECL (c)))) { t = OMP_CLAUSE_DECL (c); if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) { OMP_CLAUSE_DECL (c) = build_simple_mem_ref (t); if (OMP_CLAUSE_SIZE (c) == NULL_TREE) OMP_CLAUSE_SIZE (c) = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (t))); } else if (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER && (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_REFERENCE) && (OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_ALWAYS_POINTER)) { tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); if (TREE_CODE (t) == COMPONENT_REF) { gomp_map_kind k = (ort == C_ORT_ACC) ? GOMP_MAP_ATTACH_DETACH : GOMP_MAP_ALWAYS_POINTER; OMP_CLAUSE_SET_MAP_KIND (c2, k); } else OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_FIRSTPRIVATE_REFERENCE); OMP_CLAUSE_DECL (c2) = t; OMP_CLAUSE_SIZE (c2) = size_zero_node; OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c); OMP_CLAUSE_CHAIN (c) = c2; OMP_CLAUSE_DECL (c) = build_simple_mem_ref (t); if (OMP_CLAUSE_SIZE (c) == NULL_TREE) OMP_CLAUSE_SIZE (c) = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (t))); c = c2; } } break; case OMP_CLAUSE_TO_DECLARE: case OMP_CLAUSE_LINK: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == FUNCTION_DECL && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO_DECLARE) ; else if (!VAR_P (t)) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TO_DECLARE) { if (TREE_CODE (t) == TEMPLATE_ID_EXPR) error_at (OMP_CLAUSE_LOCATION (c), "template %qE in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else if (really_overloaded_fn (t)) error_at (OMP_CLAUSE_LOCATION (c), "overloaded function name %qE in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is neither a variable nor a function name " "in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); } else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (DECL_THREAD_LOCAL_P (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (!cp_omp_mappable_type (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD does not have a mappable type in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); cp_omp_emit_unmappable_type_notes (TREE_TYPE (t)); remove = true; } if (remove) break; if (bitmap_bit_p (&generic_head, DECL_UID (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE appears more than once on the same " "%<declare target%> directive", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); break; case OMP_CLAUSE_UNIFORM: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not an argument in %<uniform%> clause", t); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not an argument in %<uniform%> clause", t); remove = true; break; } /* map_head bitmap is used as uniform_head if declare_simd. */ bitmap_set_bit (&map_head, DECL_UID (t)); goto check_dup_generic; case OMP_CLAUSE_GRAINSIZE: t = OMP_CLAUSE_GRAINSIZE_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<grainsize%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) != 1) { warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<grainsize%> value must be positive"); t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_GRAINSIZE_EXPR (c) = t; } break; case OMP_CLAUSE_PRIORITY: t = OMP_CLAUSE_PRIORITY_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<priority%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); if (TREE_CODE (t) == INTEGER_CST && tree_int_cst_sgn (t) == -1) { warning_at (OMP_CLAUSE_LOCATION (c), 0, "%<priority%> value must be non-negative"); t = integer_one_node; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_PRIORITY_EXPR (c) = t; } break; case OMP_CLAUSE_HINT: t = OMP_CLAUSE_HINT_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<hint%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { t = maybe_constant_value (t); t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); if (TREE_CODE (t) != INTEGER_CST) { error_at (OMP_CLAUSE_LOCATION (c), "%<hint%> expression must be constant integer " "expression"); remove = true; } } OMP_CLAUSE_HINT_EXPR (c) = t; } break; case OMP_CLAUSE_IS_DEVICE_PTR: case OMP_CLAUSE_USE_DEVICE_PTR: field_ok = (ort & C_ORT_OMP_DECLARE_SIMD) == C_ORT_OMP; t = OMP_CLAUSE_DECL (c); if (!type_dependent_expression_p (t)) { tree type = TREE_TYPE (t); if (!TYPE_PTR_P (type) && (!TYPE_REF_P (type) || !TYPE_PTR_P (TREE_TYPE (type)))) { if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_USE_DEVICE_PTR && ort == C_ORT_OMP) { error_at (OMP_CLAUSE_LOCATION (c), "%qs variable is neither a pointer " "nor reference to pointer", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (TREE_CODE (type) != ARRAY_TYPE && (!TYPE_REF_P (type) || TREE_CODE (TREE_TYPE (type)) != ARRAY_TYPE)) { error_at (OMP_CLAUSE_LOCATION (c), "%qs variable is neither a pointer, nor an " "array nor reference to pointer or array", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } } goto check_dup_generic; case OMP_CLAUSE_USE_DEVICE_ADDR: field_ok = true; t = OMP_CLAUSE_DECL (c); if (!processing_template_decl && (VAR_P (t) || TREE_CODE (t) == PARM_DECL) && !TYPE_REF_P (TREE_TYPE (t)) && !cxx_mark_addressable (t)) remove = true; goto check_dup_generic; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_PARALLEL: case OMP_CLAUSE_FOR: case OMP_CLAUSE_SECTIONS: case OMP_CLAUSE_TASKGROUP: case OMP_CLAUSE_PROC_BIND: case OMP_CLAUSE_DEVICE_TYPE: case OMP_CLAUSE_NOGROUP: case OMP_CLAUSE_THREADS: case OMP_CLAUSE_SIMD: case OMP_CLAUSE_DEFAULTMAP: case OMP_CLAUSE_BIND: case OMP_CLAUSE_AUTO: case OMP_CLAUSE_INDEPENDENT: case OMP_CLAUSE_SEQ: case OMP_CLAUSE_IF_PRESENT: case OMP_CLAUSE_FINALIZE: break; case OMP_CLAUSE_TILE: for (tree list = OMP_CLAUSE_TILE_LIST (c); !remove && list; list = TREE_CHAIN (list)) { t = TREE_VALUE (list); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%<tile%> argument needs integral type"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { /* Zero is used to indicate '*', we permit you to get there via an ICE of value zero. */ t = maybe_constant_value (t); if (!tree_fits_shwi_p (t) || tree_to_shwi (t) < 0) { error_at (OMP_CLAUSE_LOCATION (c), "%<tile%> argument needs positive " "integral constant"); remove = true; } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } } /* Update list item. */ TREE_VALUE (list) = t; } break; case OMP_CLAUSE_ORDERED: ordered_seen = true; break; case OMP_CLAUSE_ORDER: if (order_seen) remove = true; else order_seen = true; break; case OMP_CLAUSE_INBRANCH: case OMP_CLAUSE_NOTINBRANCH: if (branch_seen) { error_at (OMP_CLAUSE_LOCATION (c), "%<inbranch%> clause is incompatible with " "%<notinbranch%>"); remove = true; } branch_seen = true; break; case OMP_CLAUSE_INCLUSIVE: case OMP_CLAUSE_EXCLUSIVE: t = omp_clause_decl_field (OMP_CLAUSE_DECL (c)); if (!t) t = OMP_CLAUSE_DECL (c); if (t == current_class_ptr) { error_at (OMP_CLAUSE_LOCATION (c), "%<this%> allowed in OpenMP only in %<declare simd%>" " clauses"); remove = true; break; } if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL && TREE_CODE (t) != FIELD_DECL) { if (processing_template_decl && TREE_CODE (t) != OVERLOAD) break; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } break; default: gcc_unreachable (); } if (remove) *pc = OMP_CLAUSE_CHAIN (c); else pc = &OMP_CLAUSE_CHAIN (c); } if (reduction_seen < 0 && (ordered_seen || schedule_seen)) reduction_seen = -2; for (pc = &clauses, c = clauses; c ; c = *pc) { enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c); bool remove = false; bool need_complete_type = false; bool need_default_ctor = false; bool need_copy_ctor = false; bool need_copy_assignment = false; bool need_implicitly_determined = false; bool need_dtor = false; tree type, inner_type; switch (c_kind) { case OMP_CLAUSE_SHARED: need_implicitly_determined = true; break; case OMP_CLAUSE_PRIVATE: need_complete_type = true; need_default_ctor = true; need_dtor = true; need_implicitly_determined = true; break; case OMP_CLAUSE_FIRSTPRIVATE: need_complete_type = true; need_copy_ctor = true; need_dtor = true; need_implicitly_determined = true; break; case OMP_CLAUSE_LASTPRIVATE: need_complete_type = true; need_copy_assignment = true; need_implicitly_determined = true; break; case OMP_CLAUSE_REDUCTION: if (reduction_seen == -2) OMP_CLAUSE_REDUCTION_INSCAN (c) = 0; if (OMP_CLAUSE_REDUCTION_INSCAN (c)) need_copy_assignment = true; need_implicitly_determined = true; break; case OMP_CLAUSE_IN_REDUCTION: case OMP_CLAUSE_TASK_REDUCTION: case OMP_CLAUSE_INCLUSIVE: case OMP_CLAUSE_EXCLUSIVE: need_implicitly_determined = true; break; case OMP_CLAUSE_LINEAR: if (ort != C_ORT_OMP_DECLARE_SIMD) need_implicitly_determined = true; else if (OMP_CLAUSE_LINEAR_VARIABLE_STRIDE (c) && !bitmap_bit_p (&map_head, DECL_UID (OMP_CLAUSE_LINEAR_STEP (c)))) { error_at (OMP_CLAUSE_LOCATION (c), "%<linear%> clause step is a parameter %qD not " "specified in %<uniform%> clause", OMP_CLAUSE_LINEAR_STEP (c)); *pc = OMP_CLAUSE_CHAIN (c); continue; } break; case OMP_CLAUSE_COPYPRIVATE: need_copy_assignment = true; break; case OMP_CLAUSE_COPYIN: need_copy_assignment = true; break; case OMP_CLAUSE_SIMDLEN: if (safelen && !processing_template_decl && tree_int_cst_lt (OMP_CLAUSE_SAFELEN_EXPR (safelen), OMP_CLAUSE_SIMDLEN_EXPR (c))) { error_at (OMP_CLAUSE_LOCATION (c), "%<simdlen%> clause value is bigger than " "%<safelen%> clause value"); OMP_CLAUSE_SIMDLEN_EXPR (c) = OMP_CLAUSE_SAFELEN_EXPR (safelen); } pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_SCHEDULE: if (ordered_seen && (OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_NONMONOTONIC)) { error_at (OMP_CLAUSE_LOCATION (c), "%<nonmonotonic%> schedule modifier specified " "together with %<ordered%> clause"); OMP_CLAUSE_SCHEDULE_KIND (c) = (enum omp_clause_schedule_kind) (OMP_CLAUSE_SCHEDULE_KIND (c) & ~OMP_CLAUSE_SCHEDULE_NONMONOTONIC); } if (reduction_seen == -2) error_at (OMP_CLAUSE_LOCATION (c), "%qs clause specified together with %<inscan%> " "%<reduction%> clause", "schedule"); pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_NOGROUP: if (reduction_seen) { error_at (OMP_CLAUSE_LOCATION (c), "%<nogroup%> clause must not be used together with " "%<reduction%> clause"); *pc = OMP_CLAUSE_CHAIN (c); continue; } pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_ORDERED: if (reduction_seen == -2) error_at (OMP_CLAUSE_LOCATION (c), "%qs clause specified together with %<inscan%> " "%<reduction%> clause", "ordered"); pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_ORDER: if (ordered_seen) { error_at (OMP_CLAUSE_LOCATION (c), "%<order%> clause must not be used together " "with %<ordered%>"); *pc = OMP_CLAUSE_CHAIN (c); continue; } pc = &OMP_CLAUSE_CHAIN (c); continue; case OMP_CLAUSE_NOWAIT: if (copyprivate_seen) { error_at (OMP_CLAUSE_LOCATION (c), "%<nowait%> clause must not be used together " "with %<copyprivate%>"); *pc = OMP_CLAUSE_CHAIN (c); continue; } /* FALLTHRU */ default: pc = &OMP_CLAUSE_CHAIN (c); continue; } t = OMP_CLAUSE_DECL (c); if (processing_template_decl && !VAR_P (t) && TREE_CODE (t) != PARM_DECL) { pc = &OMP_CLAUSE_CHAIN (c); continue; } switch (c_kind) { case OMP_CLAUSE_LASTPRIVATE: if (!bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { need_default_ctor = true; need_dtor = true; } break; case OMP_CLAUSE_REDUCTION: case OMP_CLAUSE_IN_REDUCTION: case OMP_CLAUSE_TASK_REDUCTION: if (finish_omp_reduction_clause (c, &need_default_ctor, &need_dtor)) remove = true; else t = OMP_CLAUSE_DECL (c); break; case OMP_CLAUSE_COPYIN: if (!VAR_P (t) || !CP_DECL_THREAD_LOCAL_P (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%qE must be %<threadprivate%> for %<copyin%>", t); remove = true; } break; default: break; } if (need_complete_type || need_copy_assignment) { t = require_complete_type (t); if (t == error_mark_node) remove = true; else if (!processing_template_decl && TYPE_REF_P (TREE_TYPE (t)) && !complete_type_or_else (TREE_TYPE (TREE_TYPE (t)), t)) remove = true; } if (need_implicitly_determined) { const char *share_name = NULL; if (VAR_P (t) && CP_DECL_THREAD_LOCAL_P (t)) share_name = "threadprivate"; else switch (cxx_omp_predetermined_sharing_1 (t)) { case OMP_CLAUSE_DEFAULT_UNSPECIFIED: break; case OMP_CLAUSE_DEFAULT_SHARED: if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE) && c_omp_predefined_variable (t)) /* The __func__ variable and similar function-local predefined variables may be listed in a shared or firstprivate clause. */ break; if (VAR_P (t) && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE && TREE_STATIC (t) && cxx_omp_const_qual_no_mutable (t)) { tree ctx = CP_DECL_CONTEXT (t); /* const qualified static data members without mutable member may be specified in firstprivate clause. */ if (TYPE_P (ctx) && MAYBE_CLASS_TYPE_P (ctx)) break; } share_name = "shared"; break; case OMP_CLAUSE_DEFAULT_PRIVATE: share_name = "private"; break; default: gcc_unreachable (); } if (share_name) { error_at (OMP_CLAUSE_LOCATION (c), "%qE is predetermined %qs for %qs", omp_clause_printable_decl (t), share_name, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_SHARED && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_FIRSTPRIVATE && cxx_omp_const_qual_no_mutable (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%<const%> qualified %qE without %<mutable%> member " "may appear only in %<shared%> or %<firstprivate%> " "clauses", omp_clause_printable_decl (t)); remove = true; } } /* We're interested in the base element, not arrays. */ inner_type = type = TREE_TYPE (t); if ((need_complete_type || need_copy_assignment || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IN_REDUCTION || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_TASK_REDUCTION) && TYPE_REF_P (inner_type)) inner_type = TREE_TYPE (inner_type); while (TREE_CODE (inner_type) == ARRAY_TYPE) inner_type = TREE_TYPE (inner_type); /* Check for special function availability by building a call to one. Save the results, because later we won't be in the right context for making these queries. */ if (CLASS_TYPE_P (inner_type) && COMPLETE_TYPE_P (inner_type) && (need_default_ctor || need_copy_ctor || need_copy_assignment || need_dtor) && !type_dependent_expression_p (t) && cxx_omp_create_clause_info (c, inner_type, need_default_ctor, need_copy_ctor, need_copy_assignment, need_dtor)) remove = true; if (!remove && c_kind == OMP_CLAUSE_SHARED && processing_template_decl) { t = omp_clause_decl_field (OMP_CLAUSE_DECL (c)); if (t) OMP_CLAUSE_DECL (c) = t; } if (remove) *pc = OMP_CLAUSE_CHAIN (c); else pc = &OMP_CLAUSE_CHAIN (c); } bitmap_obstack_release (NULL); return clauses; } /* Start processing OpenMP clauses that can include any privatization clauses for non-static data members. */ tree push_omp_privatization_clauses (bool ignore_next) { if (omp_private_member_ignore_next) { omp_private_member_ignore_next = ignore_next; return NULL_TREE; } omp_private_member_ignore_next = ignore_next; if (omp_private_member_map) omp_private_member_vec.safe_push (error_mark_node); return push_stmt_list (); } /* Revert remapping of any non-static data members since the last push_omp_privatization_clauses () call. */ void pop_omp_privatization_clauses (tree stmt) { if (stmt == NULL_TREE) return; stmt = pop_stmt_list (stmt); if (omp_private_member_map) { while (!omp_private_member_vec.is_empty ()) { tree t = omp_private_member_vec.pop (); if (t == error_mark_node) { add_stmt (stmt); return; } bool no_decl_expr = t == integer_zero_node; if (no_decl_expr) t = omp_private_member_vec.pop (); tree *v = omp_private_member_map->get (t); gcc_assert (v); if (!no_decl_expr) add_decl_expr (*v); omp_private_member_map->remove (t); } delete omp_private_member_map; omp_private_member_map = NULL; } add_stmt (stmt); } /* Remember OpenMP privatization clauses mapping and clear it. Used for lambdas. */ void save_omp_privatization_clauses (vec<tree> &save) { save = vNULL; if (omp_private_member_ignore_next) save.safe_push (integer_one_node); omp_private_member_ignore_next = false; if (!omp_private_member_map) return; while (!omp_private_member_vec.is_empty ()) { tree t = omp_private_member_vec.pop (); if (t == error_mark_node) { save.safe_push (t); continue; } tree n = t; if (t == integer_zero_node) t = omp_private_member_vec.pop (); tree *v = omp_private_member_map->get (t); gcc_assert (v); save.safe_push (*v); save.safe_push (t); if (n != t) save.safe_push (n); } delete omp_private_member_map; omp_private_member_map = NULL; } /* Restore OpenMP privatization clauses mapping saved by the above function. */ void restore_omp_privatization_clauses (vec<tree> &save) { gcc_assert (omp_private_member_vec.is_empty ()); omp_private_member_ignore_next = false; if (save.is_empty ()) return; if (save.length () == 1 && save[0] == integer_one_node) { omp_private_member_ignore_next = true; save.release (); return; } omp_private_member_map = new hash_map <tree, tree>; while (!save.is_empty ()) { tree t = save.pop (); tree n = t; if (t != error_mark_node) { if (t == integer_one_node) { omp_private_member_ignore_next = true; gcc_assert (save.is_empty ()); break; } if (t == integer_zero_node) t = save.pop (); tree &v = omp_private_member_map->get_or_insert (t); v = save.pop (); } omp_private_member_vec.safe_push (t); if (n != t) omp_private_member_vec.safe_push (n); } save.release (); } /* For all variables in the tree_list VARS, mark them as thread local. */ void finish_omp_threadprivate (tree vars) { tree t; /* Mark every variable in VARS to be assigned thread local storage. */ for (t = vars; t; t = TREE_CHAIN (t)) { tree v = TREE_PURPOSE (t); if (error_operand_p (v)) ; else if (!VAR_P (v)) error ("%<threadprivate%> %qD is not file, namespace " "or block scope variable", v); /* If V had already been marked threadprivate, it doesn't matter whether it had been used prior to this point. */ else if (TREE_USED (v) && (DECL_LANG_SPECIFIC (v) == NULL || !CP_DECL_THREADPRIVATE_P (v))) error ("%qE declared %<threadprivate%> after first use", v); else if (! TREE_STATIC (v) && ! DECL_EXTERNAL (v)) error ("automatic variable %qE cannot be %<threadprivate%>", v); else if (! COMPLETE_TYPE_P (complete_type (TREE_TYPE (v)))) error ("%<threadprivate%> %qE has incomplete type", v); else if (TREE_STATIC (v) && TYPE_P (CP_DECL_CONTEXT (v)) && CP_DECL_CONTEXT (v) != current_class_type) error ("%<threadprivate%> %qE directive not " "in %qT definition", v, CP_DECL_CONTEXT (v)); else { /* Allocate a LANG_SPECIFIC structure for V, if needed. */ if (DECL_LANG_SPECIFIC (v) == NULL) retrofit_lang_decl (v); if (! CP_DECL_THREAD_LOCAL_P (v)) { CP_DECL_THREAD_LOCAL_P (v) = true; set_decl_tls_model (v, decl_default_tls_model (v)); /* If rtl has been already set for this var, call make_decl_rtl once again, so that encode_section_info has a chance to look at the new decl flags. */ if (DECL_RTL_SET_P (v)) make_decl_rtl (v); } CP_DECL_THREADPRIVATE_P (v) = 1; } } } /* Build an OpenMP structured block. */ tree begin_omp_structured_block (void) { return do_pushlevel (sk_omp); } tree finish_omp_structured_block (tree block) { return do_poplevel (block); } /* Similarly, except force the retention of the BLOCK. */ tree begin_omp_parallel (void) { keep_next_level (true); return begin_omp_structured_block (); } /* Generate OACC_DATA, with CLAUSES and BLOCK as its compound statement. */ tree finish_oacc_data (tree clauses, tree block) { tree stmt; block = finish_omp_structured_block (block); stmt = make_node (OACC_DATA); TREE_TYPE (stmt) = void_type_node; OACC_DATA_CLAUSES (stmt) = clauses; OACC_DATA_BODY (stmt) = block; return add_stmt (stmt); } /* Generate OACC_HOST_DATA, with CLAUSES and BLOCK as its compound statement. */ tree finish_oacc_host_data (tree clauses, tree block) { tree stmt; block = finish_omp_structured_block (block); stmt = make_node (OACC_HOST_DATA); TREE_TYPE (stmt) = void_type_node; OACC_HOST_DATA_CLAUSES (stmt) = clauses; OACC_HOST_DATA_BODY (stmt) = block; return add_stmt (stmt); } /* Generate OMP construct CODE, with BODY and CLAUSES as its compound statement. */ tree finish_omp_construct (enum tree_code code, tree body, tree clauses) { body = finish_omp_structured_block (body); tree stmt = make_node (code); TREE_TYPE (stmt) = void_type_node; OMP_BODY (stmt) = body; OMP_CLAUSES (stmt) = clauses; return add_stmt (stmt); } tree finish_omp_parallel (tree clauses, tree body) { tree stmt; body = finish_omp_structured_block (body); stmt = make_node (OMP_PARALLEL); TREE_TYPE (stmt) = void_type_node; OMP_PARALLEL_CLAUSES (stmt) = clauses; OMP_PARALLEL_BODY (stmt) = body; return add_stmt (stmt); } tree begin_omp_task (void) { keep_next_level (true); return begin_omp_structured_block (); } tree finish_omp_task (tree clauses, tree body) { tree stmt; body = finish_omp_structured_block (body); stmt = make_node (OMP_TASK); TREE_TYPE (stmt) = void_type_node; OMP_TASK_CLAUSES (stmt) = clauses; OMP_TASK_BODY (stmt) = body; return add_stmt (stmt); } /* Helper function for finish_omp_for. Convert Ith random access iterator into integral iterator. Return FALSE if successful. */ static bool handle_omp_for_class_iterator (int i, location_t locus, enum tree_code code, tree declv, tree orig_declv, tree initv, tree condv, tree incrv, tree *body, tree *pre_body, tree &clauses, int collapse, int ordered) { tree diff, iter_init, iter_incr = NULL, last; tree incr_var = NULL, orig_pre_body, orig_body, c; tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); tree iter = decl; location_t elocus = locus; if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); switch (TREE_CODE (cond)) { case GT_EXPR: case GE_EXPR: case LT_EXPR: case LE_EXPR: case NE_EXPR: if (TREE_OPERAND (cond, 1) == iter) cond = build2 (swap_tree_comparison (TREE_CODE (cond)), TREE_TYPE (cond), iter, TREE_OPERAND (cond, 0)); if (TREE_OPERAND (cond, 0) != iter) cond = error_mark_node; else { tree tem = build_x_binary_op (EXPR_LOCATION (cond), TREE_CODE (cond), iter, ERROR_MARK, TREE_OPERAND (cond, 1), ERROR_MARK, NULL, tf_warning_or_error); if (error_operand_p (tem)) return true; } break; default: cond = error_mark_node; break; } if (cond == error_mark_node) { error_at (elocus, "invalid controlling predicate"); return true; } diff = build_x_binary_op (elocus, MINUS_EXPR, TREE_OPERAND (cond, 1), ERROR_MARK, iter, ERROR_MARK, NULL, tf_warning_or_error); diff = cp_fully_fold (diff); if (error_operand_p (diff)) return true; if (TREE_CODE (TREE_TYPE (diff)) != INTEGER_TYPE) { error_at (elocus, "difference between %qE and %qD does not have integer type", TREE_OPERAND (cond, 1), iter); return true; } if (!c_omp_check_loop_iv_exprs (locus, orig_declv, TREE_VEC_ELT (declv, i), NULL_TREE, cond, cp_walk_subtrees)) return true; switch (TREE_CODE (incr)) { case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: if (TREE_OPERAND (incr, 0) != iter) { incr = error_mark_node; break; } iter_incr = build_x_unary_op (EXPR_LOCATION (incr), TREE_CODE (incr), iter, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; else if (TREE_CODE (incr) == PREINCREMENT_EXPR || TREE_CODE (incr) == POSTINCREMENT_EXPR) incr = integer_one_node; else incr = integer_minus_one_node; break; case MODIFY_EXPR: if (TREE_OPERAND (incr, 0) != iter) incr = error_mark_node; else if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR || TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR) { tree rhs = TREE_OPERAND (incr, 1); if (TREE_OPERAND (rhs, 0) == iter) { if (TREE_CODE (TREE_TYPE (TREE_OPERAND (rhs, 1))) != INTEGER_TYPE) incr = error_mark_node; else { iter_incr = build_x_modify_expr (EXPR_LOCATION (rhs), iter, TREE_CODE (rhs), TREE_OPERAND (rhs, 1), tf_warning_or_error); if (error_operand_p (iter_incr)) return true; incr = TREE_OPERAND (rhs, 1); incr = cp_convert (TREE_TYPE (diff), incr, tf_warning_or_error); if (TREE_CODE (rhs) == MINUS_EXPR) { incr = build1 (NEGATE_EXPR, TREE_TYPE (diff), incr); incr = fold_simple (incr); } if (TREE_CODE (incr) != INTEGER_CST && (TREE_CODE (incr) != NOP_EXPR || (TREE_CODE (TREE_OPERAND (incr, 0)) != INTEGER_CST))) iter_incr = NULL; } } else if (TREE_OPERAND (rhs, 1) == iter) { if (TREE_CODE (TREE_TYPE (TREE_OPERAND (rhs, 0))) != INTEGER_TYPE || TREE_CODE (rhs) != PLUS_EXPR) incr = error_mark_node; else { iter_incr = build_x_binary_op (EXPR_LOCATION (rhs), PLUS_EXPR, TREE_OPERAND (rhs, 0), ERROR_MARK, iter, ERROR_MARK, NULL, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; iter_incr = build_x_modify_expr (EXPR_LOCATION (rhs), iter, NOP_EXPR, iter_incr, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; incr = TREE_OPERAND (rhs, 0); iter_incr = NULL; } } else incr = error_mark_node; } else incr = error_mark_node; break; default: incr = error_mark_node; break; } if (incr == error_mark_node) { error_at (elocus, "invalid increment expression"); return true; } incr = cp_convert (TREE_TYPE (diff), incr, tf_warning_or_error); incr = cp_fully_fold (incr); tree loop_iv_seen = NULL_TREE; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_DECL (c) == iter) { if (code == OMP_TASKLOOP || code == OMP_LOOP) { loop_iv_seen = c; OMP_CLAUSE_LASTPRIVATE_LOOP_IV (c) = 1; } break; } else if ((code == OMP_TASKLOOP || code == OMP_LOOP) && OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE && OMP_CLAUSE_DECL (c) == iter) { loop_iv_seen = c; if (code == OMP_TASKLOOP) OMP_CLAUSE_PRIVATE_TASKLOOP_IV (c) = 1; } decl = create_temporary_var (TREE_TYPE (diff)); pushdecl (decl); add_decl_expr (decl); last = create_temporary_var (TREE_TYPE (diff)); pushdecl (last); add_decl_expr (last); if (c && iter_incr == NULL && TREE_CODE (incr) != INTEGER_CST && (!ordered || (i < collapse && collapse > 1))) { incr_var = create_temporary_var (TREE_TYPE (diff)); pushdecl (incr_var); add_decl_expr (incr_var); } gcc_assert (stmts_are_full_exprs_p ()); tree diffvar = NULL_TREE; if (code == OMP_TASKLOOP) { if (!loop_iv_seen) { tree ivc = build_omp_clause (locus, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (ivc) = iter; cxx_omp_finish_clause (ivc, NULL); OMP_CLAUSE_CHAIN (ivc) = clauses; clauses = ivc; } tree lvc = build_omp_clause (locus, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (lvc) = last; OMP_CLAUSE_CHAIN (lvc) = clauses; clauses = lvc; diffvar = create_temporary_var (TREE_TYPE (diff)); pushdecl (diffvar); add_decl_expr (diffvar); } else if (code == OMP_LOOP) { if (!loop_iv_seen) { /* While iterators on the loop construct are predetermined lastprivate, if the decl is not declared inside of the loop, OMP_CLAUSE_LASTPRIVATE should have been added already. */ loop_iv_seen = build_omp_clause (locus, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (loop_iv_seen) = iter; OMP_CLAUSE_CHAIN (loop_iv_seen) = clauses; clauses = loop_iv_seen; } else if (OMP_CLAUSE_CODE (loop_iv_seen) == OMP_CLAUSE_PRIVATE) { OMP_CLAUSE_PRIVATE_DEBUG (loop_iv_seen) = 0; OMP_CLAUSE_PRIVATE_OUTER_REF (loop_iv_seen) = 0; OMP_CLAUSE_CODE (loop_iv_seen) = OMP_CLAUSE_FIRSTPRIVATE; } if (OMP_CLAUSE_CODE (loop_iv_seen) == OMP_CLAUSE_FIRSTPRIVATE) cxx_omp_finish_clause (loop_iv_seen, NULL); } orig_pre_body = *pre_body; *pre_body = push_stmt_list (); if (orig_pre_body) add_stmt (orig_pre_body); if (init != NULL) finish_expr_stmt (build_x_modify_expr (elocus, iter, NOP_EXPR, init, tf_warning_or_error)); init = build_int_cst (TREE_TYPE (diff), 0); if (c && iter_incr == NULL && (!ordered || (i < collapse && collapse > 1))) { if (incr_var) { finish_expr_stmt (build_x_modify_expr (elocus, incr_var, NOP_EXPR, incr, tf_warning_or_error)); incr = incr_var; } iter_incr = build_x_modify_expr (elocus, iter, PLUS_EXPR, incr, tf_warning_or_error); } if (c && ordered && i < collapse && collapse > 1) iter_incr = incr; finish_expr_stmt (build_x_modify_expr (elocus, last, NOP_EXPR, init, tf_warning_or_error)); if (diffvar) { finish_expr_stmt (build_x_modify_expr (elocus, diffvar, NOP_EXPR, diff, tf_warning_or_error)); diff = diffvar; } *pre_body = pop_stmt_list (*pre_body); cond = cp_build_binary_op (elocus, TREE_CODE (cond), decl, diff, tf_warning_or_error); incr = build_modify_expr (elocus, decl, NULL_TREE, PLUS_EXPR, elocus, incr, NULL_TREE); orig_body = *body; *body = push_stmt_list (); iter_init = build2 (MINUS_EXPR, TREE_TYPE (diff), decl, last); iter_init = build_x_modify_expr (elocus, iter, PLUS_EXPR, iter_init, tf_warning_or_error); if (iter_init != error_mark_node) iter_init = build1 (NOP_EXPR, void_type_node, iter_init); finish_expr_stmt (iter_init); finish_expr_stmt (build_x_modify_expr (elocus, last, NOP_EXPR, decl, tf_warning_or_error)); add_stmt (orig_body); *body = pop_stmt_list (*body); if (c) { OMP_CLAUSE_LASTPRIVATE_STMT (c) = push_stmt_list (); if (!ordered) finish_expr_stmt (iter_incr); else { iter_init = decl; if (i < collapse && collapse > 1 && !error_operand_p (iter_incr)) iter_init = build2 (PLUS_EXPR, TREE_TYPE (diff), iter_init, iter_incr); iter_init = build2 (MINUS_EXPR, TREE_TYPE (diff), iter_init, last); iter_init = build_x_modify_expr (elocus, iter, PLUS_EXPR, iter_init, tf_warning_or_error); if (iter_init != error_mark_node) iter_init = build1 (NOP_EXPR, void_type_node, iter_init); finish_expr_stmt (iter_init); } OMP_CLAUSE_LASTPRIVATE_STMT (c) = pop_stmt_list (OMP_CLAUSE_LASTPRIVATE_STMT (c)); } if (TREE_CODE (TREE_VEC_ELT (orig_declv, i)) == TREE_LIST) { tree t = TREE_VEC_ELT (orig_declv, i); gcc_assert (TREE_PURPOSE (t) == NULL_TREE && TREE_VALUE (t) == NULL_TREE && TREE_CODE (TREE_CHAIN (t)) == TREE_VEC); TREE_PURPOSE (t) = TREE_VEC_ELT (declv, i); TREE_VALUE (t) = last; } else TREE_VEC_ELT (orig_declv, i) = tree_cons (TREE_VEC_ELT (declv, i), last, NULL_TREE); TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; return false; } /* Build and validate an OMP_FOR statement. CLAUSES, BODY, COND, INCR are directly for their associated operands in the statement. DECL and INIT are a combo; if DECL is NULL then INIT ought to be a MODIFY_EXPR, and the DECL should be extracted. PRE_BODY are optional statements that need to go before the loop into its sk_omp scope. */ tree finish_omp_for (location_t locus, enum tree_code code, tree declv, tree orig_declv, tree initv, tree condv, tree incrv, tree body, tree pre_body, vec<tree> *orig_inits, tree clauses) { tree omp_for = NULL, orig_incr = NULL; tree decl = NULL, init, cond, incr; location_t elocus; int i; int collapse = 1; int ordered = 0; gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); if (TREE_VEC_LENGTH (declv) > 1) { tree c; c = omp_find_clause (clauses, OMP_CLAUSE_TILE); if (c) collapse = list_length (OMP_CLAUSE_TILE_LIST (c)); else { c = omp_find_clause (clauses, OMP_CLAUSE_COLLAPSE); if (c) collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (c)); if (collapse != TREE_VEC_LENGTH (declv)) ordered = TREE_VEC_LENGTH (declv); } } for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { decl = TREE_VEC_ELT (declv, i); init = TREE_VEC_ELT (initv, i); cond = TREE_VEC_ELT (condv, i); incr = TREE_VEC_ELT (incrv, i); elocus = locus; if (decl == NULL) { if (init != NULL) switch (TREE_CODE (init)) { case MODIFY_EXPR: decl = TREE_OPERAND (init, 0); init = TREE_OPERAND (init, 1); break; case MODOP_EXPR: if (TREE_CODE (TREE_OPERAND (init, 1)) == NOP_EXPR) { decl = TREE_OPERAND (init, 0); init = TREE_OPERAND (init, 2); } break; default: break; } if (decl == NULL) { error_at (locus, "expected iteration declaration or initialization"); return NULL; } } if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); if (cond == global_namespace) continue; if (cond == NULL) { error_at (elocus, "missing controlling predicate"); return NULL; } if (incr == NULL) { error_at (elocus, "missing increment expression"); return NULL; } TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; } if (orig_inits) { bool fail = false; tree orig_init; FOR_EACH_VEC_ELT (*orig_inits, i, orig_init) if (orig_init && !c_omp_check_loop_iv_exprs (locus, orig_declv ? orig_declv : declv, TREE_VEC_ELT (declv, i), orig_init, NULL_TREE, cp_walk_subtrees)) fail = true; if (fail) return NULL; } if (dependent_omp_for_p (declv, initv, condv, incrv)) { tree stmt; stmt = make_node (code); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { /* This is really just a place-holder. We'll be decomposing this again and going through the cp_build_modify_expr path below when we instantiate the thing. */ TREE_VEC_ELT (initv, i) = build2 (MODIFY_EXPR, void_type_node, TREE_VEC_ELT (declv, i), TREE_VEC_ELT (initv, i)); } TREE_TYPE (stmt) = void_type_node; OMP_FOR_INIT (stmt) = initv; OMP_FOR_COND (stmt) = condv; OMP_FOR_INCR (stmt) = incrv; OMP_FOR_BODY (stmt) = body; OMP_FOR_PRE_BODY (stmt) = pre_body; OMP_FOR_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, locus); return add_stmt (stmt); } if (!orig_declv) orig_declv = copy_node (declv); if (processing_template_decl) orig_incr = make_tree_vec (TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); ) { decl = TREE_VEC_ELT (declv, i); init = TREE_VEC_ELT (initv, i); cond = TREE_VEC_ELT (condv, i); incr = TREE_VEC_ELT (incrv, i); if (orig_incr) TREE_VEC_ELT (orig_incr, i) = incr; elocus = locus; if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); if (!DECL_P (decl)) { error_at (elocus, "expected iteration declaration or initialization"); return NULL; } if (incr && TREE_CODE (incr) == MODOP_EXPR) { if (orig_incr) TREE_VEC_ELT (orig_incr, i) = incr; incr = cp_build_modify_expr (elocus, TREE_OPERAND (incr, 0), TREE_CODE (TREE_OPERAND (incr, 1)), TREE_OPERAND (incr, 2), tf_warning_or_error); } if (CLASS_TYPE_P (TREE_TYPE (decl))) { if (code == OMP_SIMD) { error_at (elocus, "%<#pragma omp simd%> used with class " "iteration variable %qE", decl); return NULL; } if (handle_omp_for_class_iterator (i, locus, code, declv, orig_declv, initv, condv, incrv, &body, &pre_body, clauses, collapse, ordered)) return NULL; continue; } if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) && !TYPE_PTR_P (TREE_TYPE (decl))) { error_at (elocus, "invalid type for iteration variable %qE", decl); return NULL; } if (!processing_template_decl) { init = fold_build_cleanup_point_expr (TREE_TYPE (init), init); init = cp_build_modify_expr (elocus, decl, NOP_EXPR, init, tf_warning_or_error); } else init = build2 (MODIFY_EXPR, void_type_node, decl, init); if (cond && TREE_SIDE_EFFECTS (cond) && COMPARISON_CLASS_P (cond) && !processing_template_decl) { tree t = TREE_OPERAND (cond, 0); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (cond, 0) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); t = TREE_OPERAND (cond, 1); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (cond, 1) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } if (decl == error_mark_node || init == error_mark_node) return NULL; TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; i++; } if (pre_body && IS_EMPTY_STMT (pre_body)) pre_body = NULL; omp_for = c_finish_omp_for (locus, code, declv, orig_declv, initv, condv, incrv, body, pre_body, !processing_template_decl); /* Check for iterators appearing in lb, b or incr expressions. */ if (omp_for && !c_omp_check_loop_iv (omp_for, orig_declv, cp_walk_subtrees)) omp_for = NULL_TREE; if (omp_for == NULL) return NULL; add_stmt (omp_for); for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INCR (omp_for)); i++) { decl = TREE_OPERAND (TREE_VEC_ELT (OMP_FOR_INIT (omp_for), i), 0); incr = TREE_VEC_ELT (OMP_FOR_INCR (omp_for), i); if (TREE_CODE (incr) != MODIFY_EXPR) continue; if (TREE_SIDE_EFFECTS (TREE_OPERAND (incr, 1)) && BINARY_CLASS_P (TREE_OPERAND (incr, 1)) && !processing_template_decl) { tree t = TREE_OPERAND (TREE_OPERAND (incr, 1), 0); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (TREE_OPERAND (incr, 1), 0) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); t = TREE_OPERAND (TREE_OPERAND (incr, 1), 1); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (TREE_OPERAND (incr, 1), 1) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } if (orig_incr) TREE_VEC_ELT (OMP_FOR_INCR (omp_for), i) = TREE_VEC_ELT (orig_incr, i); } OMP_FOR_CLAUSES (omp_for) = clauses; /* For simd loops with non-static data member iterators, we could have added OMP_CLAUSE_LINEAR clauses without OMP_CLAUSE_LINEAR_STEP. As we know the step at this point, fill it in. */ if (code == OMP_SIMD && !processing_template_decl && TREE_VEC_LENGTH (OMP_FOR_INCR (omp_for)) == 1) for (tree c = omp_find_clause (clauses, OMP_CLAUSE_LINEAR); c; c = omp_find_clause (OMP_CLAUSE_CHAIN (c), OMP_CLAUSE_LINEAR)) if (OMP_CLAUSE_LINEAR_STEP (c) == NULL_TREE) { decl = TREE_OPERAND (TREE_VEC_ELT (OMP_FOR_INIT (omp_for), 0), 0); gcc_assert (decl == OMP_CLAUSE_DECL (c)); incr = TREE_VEC_ELT (OMP_FOR_INCR (omp_for), 0); tree step, stept; switch (TREE_CODE (incr)) { case PREINCREMENT_EXPR: case POSTINCREMENT_EXPR: /* c_omp_for_incr_canonicalize_ptr() should have been called to massage things appropriately. */ gcc_assert (!INDIRECT_TYPE_P (TREE_TYPE (decl))); OMP_CLAUSE_LINEAR_STEP (c) = build_int_cst (TREE_TYPE (decl), 1); break; case PREDECREMENT_EXPR: case POSTDECREMENT_EXPR: /* c_omp_for_incr_canonicalize_ptr() should have been called to massage things appropriately. */ gcc_assert (!INDIRECT_TYPE_P (TREE_TYPE (decl))); OMP_CLAUSE_LINEAR_STEP (c) = build_int_cst (TREE_TYPE (decl), -1); break; case MODIFY_EXPR: gcc_assert (TREE_OPERAND (incr, 0) == decl); incr = TREE_OPERAND (incr, 1); switch (TREE_CODE (incr)) { case PLUS_EXPR: if (TREE_OPERAND (incr, 1) == decl) step = TREE_OPERAND (incr, 0); else step = TREE_OPERAND (incr, 1); break; case MINUS_EXPR: case POINTER_PLUS_EXPR: gcc_assert (TREE_OPERAND (incr, 0) == decl); step = TREE_OPERAND (incr, 1); break; default: gcc_unreachable (); } stept = TREE_TYPE (decl); if (INDIRECT_TYPE_P (stept)) stept = sizetype; step = fold_convert (stept, step); if (TREE_CODE (incr) == MINUS_EXPR) step = fold_build1 (NEGATE_EXPR, stept, step); OMP_CLAUSE_LINEAR_STEP (c) = step; break; default: gcc_unreachable (); } } /* Override saved methods on OMP_LOOP's OMP_CLAUSE_LASTPRIVATE_LOOP_IV clauses, we need copy ctor for those rather than default ctor, plus as for other lastprivates assignment op and dtor. */ if (code == OMP_LOOP && !processing_template_decl) for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_LASTPRIVATE_LOOP_IV (c) && cxx_omp_create_clause_info (c, TREE_TYPE (OMP_CLAUSE_DECL (c)), false, true, true, true)) CP_OMP_CLAUSE_INFO (c) = NULL_TREE; return omp_for; } /* Fix up range for decls. Those decls were pushed into BIND's BIND_EXPR_VARS and need to be moved into the BIND_EXPR inside of the OMP_FOR's body. */ tree finish_omp_for_block (tree bind, tree omp_for) { if (omp_for == NULL_TREE || !OMP_FOR_ORIG_DECLS (omp_for) || bind == NULL_TREE || TREE_CODE (bind) != BIND_EXPR) return bind; tree b = NULL_TREE; for (int i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INIT (omp_for)); i++) if (TREE_CODE (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (omp_for), i)) == TREE_LIST && TREE_CHAIN (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (omp_for), i))) { tree v = TREE_CHAIN (TREE_VEC_ELT (OMP_FOR_ORIG_DECLS (omp_for), i)); gcc_assert (BIND_EXPR_BLOCK (bind) && (BIND_EXPR_VARS (bind) == BLOCK_VARS (BIND_EXPR_BLOCK (bind)))); for (int j = 2; j < TREE_VEC_LENGTH (v); j++) for (tree *p = &BIND_EXPR_VARS (bind); *p; p = &DECL_CHAIN (*p)) { if (*p == TREE_VEC_ELT (v, j)) { tree var = *p; *p = DECL_CHAIN (*p); if (b == NULL_TREE) { b = make_node (BLOCK); b = build3 (BIND_EXPR, void_type_node, NULL_TREE, OMP_FOR_BODY (omp_for), b); TREE_SIDE_EFFECTS (b) = 1; OMP_FOR_BODY (omp_for) = b; } DECL_CHAIN (var) = BIND_EXPR_VARS (b); BIND_EXPR_VARS (b) = var; BLOCK_VARS (BIND_EXPR_BLOCK (b)) = var; } } BLOCK_VARS (BIND_EXPR_BLOCK (bind)) = BIND_EXPR_VARS (bind); } return bind; } void finish_omp_atomic (location_t loc, enum tree_code code, enum tree_code opcode, tree lhs, tree rhs, tree v, tree lhs1, tree rhs1, tree clauses, enum omp_memory_order mo) { tree orig_lhs; tree orig_rhs; tree orig_v; tree orig_lhs1; tree orig_rhs1; bool dependent_p; tree stmt; orig_lhs = lhs; orig_rhs = rhs; orig_v = v; orig_lhs1 = lhs1; orig_rhs1 = rhs1; dependent_p = false; stmt = NULL_TREE; /* Even in a template, we can detect invalid uses of the atomic pragma if neither LHS nor RHS is type-dependent. */ if (processing_template_decl) { dependent_p = (type_dependent_expression_p (lhs) || (rhs && type_dependent_expression_p (rhs)) || (v && type_dependent_expression_p (v)) || (lhs1 && type_dependent_expression_p (lhs1)) || (rhs1 && type_dependent_expression_p (rhs1))); if (clauses) { gcc_assert (TREE_CODE (clauses) == OMP_CLAUSE && OMP_CLAUSE_CODE (clauses) == OMP_CLAUSE_HINT && OMP_CLAUSE_CHAIN (clauses) == NULL_TREE); if (type_dependent_expression_p (OMP_CLAUSE_HINT_EXPR (clauses)) || TREE_CODE (OMP_CLAUSE_HINT_EXPR (clauses)) != INTEGER_CST) dependent_p = true; } if (!dependent_p) { lhs = build_non_dependent_expr (lhs); if (rhs) rhs = build_non_dependent_expr (rhs); if (v) v = build_non_dependent_expr (v); if (lhs1) lhs1 = build_non_dependent_expr (lhs1); if (rhs1) rhs1 = build_non_dependent_expr (rhs1); } } if (!dependent_p) { bool swapped = false; if (rhs1 && cp_tree_equal (lhs, rhs)) { std::swap (rhs, rhs1); swapped = !commutative_tree_code (opcode); } if (rhs1 && !cp_tree_equal (lhs, rhs1)) { if (code == OMP_ATOMIC) error ("%<#pragma omp atomic update%> uses two different " "expressions for memory"); else error ("%<#pragma omp atomic capture%> uses two different " "expressions for memory"); return; } if (lhs1 && !cp_tree_equal (lhs, lhs1)) { if (code == OMP_ATOMIC) error ("%<#pragma omp atomic update%> uses two different " "expressions for memory"); else error ("%<#pragma omp atomic capture%> uses two different " "expressions for memory"); return; } stmt = c_finish_omp_atomic (loc, code, opcode, lhs, rhs, v, lhs1, rhs1, swapped, mo, processing_template_decl != 0); if (stmt == error_mark_node) return; } if (processing_template_decl) { if (code == OMP_ATOMIC_READ) { stmt = build_min_nt_loc (loc, OMP_ATOMIC_READ, orig_lhs); OMP_ATOMIC_MEMORY_ORDER (stmt) = mo; stmt = build2 (MODIFY_EXPR, void_type_node, orig_v, stmt); } else { if (opcode == NOP_EXPR) stmt = build2 (MODIFY_EXPR, void_type_node, orig_lhs, orig_rhs); else stmt = build2 (opcode, void_type_node, orig_lhs, orig_rhs); if (orig_rhs1) stmt = build_min_nt_loc (EXPR_LOCATION (orig_rhs1), COMPOUND_EXPR, orig_rhs1, stmt); if (code != OMP_ATOMIC) { stmt = build_min_nt_loc (loc, code, orig_lhs1, stmt); OMP_ATOMIC_MEMORY_ORDER (stmt) = mo; stmt = build2 (MODIFY_EXPR, void_type_node, orig_v, stmt); } } stmt = build2 (OMP_ATOMIC, void_type_node, clauses ? clauses : integer_zero_node, stmt); OMP_ATOMIC_MEMORY_ORDER (stmt) = mo; SET_EXPR_LOCATION (stmt, loc); } /* Avoid -Wunused-value warnings here, the whole construct has side-effects and even if it might be wrapped from fold-const.c or c-omp.c wrapped in some tree that appears to be unused, the value is not unused. */ warning_sentinel w (warn_unused_value); finish_expr_stmt (stmt); } void finish_omp_barrier (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER); releasing_vec vec; tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); finish_expr_stmt (stmt); } void finish_omp_depobj (location_t loc, tree depobj, enum omp_clause_depend_kind kind, tree clause) { if (!error_operand_p (depobj) && !type_dependent_expression_p (depobj)) { if (!lvalue_p (depobj)) { error_at (EXPR_LOC_OR_LOC (depobj, loc), "%<depobj%> expression is not lvalue expression"); depobj = error_mark_node; } } if (processing_template_decl) { if (clause == NULL_TREE) clause = build_int_cst (integer_type_node, kind); add_stmt (build_min_nt_loc (loc, OMP_DEPOBJ, depobj, clause)); return; } if (!error_operand_p (depobj)) { tree addr = cp_build_addr_expr (depobj, tf_warning_or_error); if (addr == error_mark_node) depobj = error_mark_node; else depobj = cp_build_indirect_ref (loc, addr, RO_UNARY_STAR, tf_warning_or_error); } c_finish_omp_depobj (loc, depobj, kind, clause); } void finish_omp_flush (int mo) { tree fn = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE); releasing_vec vec; if (mo != MEMMODEL_LAST) { fn = builtin_decl_explicit (BUILT_IN_ATOMIC_THREAD_FENCE); vec->quick_push (build_int_cst (integer_type_node, mo)); } tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); finish_expr_stmt (stmt); } void finish_omp_taskwait (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT); releasing_vec vec; tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); finish_expr_stmt (stmt); } void finish_omp_taskyield (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD); releasing_vec vec; tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); finish_expr_stmt (stmt); } void finish_omp_cancel (tree clauses) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL); int mask = 0; if (omp_find_clause (clauses, OMP_CLAUSE_PARALLEL)) mask = 1; else if (omp_find_clause (clauses, OMP_CLAUSE_FOR)) mask = 2; else if (omp_find_clause (clauses, OMP_CLAUSE_SECTIONS)) mask = 4; else if (omp_find_clause (clauses, OMP_CLAUSE_TASKGROUP)) mask = 8; else { error ("%<#pragma omp cancel%> must specify one of " "%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> clauses"); return; } releasing_vec vec; tree ifc = omp_find_clause (clauses, OMP_CLAUSE_IF); if (ifc != NULL_TREE) { if (OMP_CLAUSE_IF_MODIFIER (ifc) != ERROR_MARK && OMP_CLAUSE_IF_MODIFIER (ifc) != VOID_CST) error_at (OMP_CLAUSE_LOCATION (ifc), "expected %<cancel%> %<if%> clause modifier"); else { tree ifc2 = omp_find_clause (OMP_CLAUSE_CHAIN (ifc), OMP_CLAUSE_IF); if (ifc2 != NULL_TREE) { gcc_assert (OMP_CLAUSE_IF_MODIFIER (ifc) == VOID_CST && OMP_CLAUSE_IF_MODIFIER (ifc2) != ERROR_MARK && OMP_CLAUSE_IF_MODIFIER (ifc2) != VOID_CST); error_at (OMP_CLAUSE_LOCATION (ifc2), "expected %<cancel%> %<if%> clause modifier"); } } if (!processing_template_decl) ifc = maybe_convert_cond (OMP_CLAUSE_IF_EXPR (ifc)); else ifc = build_x_binary_op (OMP_CLAUSE_LOCATION (ifc), NE_EXPR, OMP_CLAUSE_IF_EXPR (ifc), ERROR_MARK, integer_zero_node, ERROR_MARK, NULL, tf_warning_or_error); } else ifc = boolean_true_node; vec->quick_push (build_int_cst (integer_type_node, mask)); vec->quick_push (ifc); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); finish_expr_stmt (stmt); } void finish_omp_cancellation_point (tree clauses) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCELLATION_POINT); int mask = 0; if (omp_find_clause (clauses, OMP_CLAUSE_PARALLEL)) mask = 1; else if (omp_find_clause (clauses, OMP_CLAUSE_FOR)) mask = 2; else if (omp_find_clause (clauses, OMP_CLAUSE_SECTIONS)) mask = 4; else if (omp_find_clause (clauses, OMP_CLAUSE_TASKGROUP)) mask = 8; else { error ("%<#pragma omp cancellation point%> must specify one of " "%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> clauses"); return; } releasing_vec vec = make_tree_vector_single (build_int_cst (integer_type_node, mask)); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); finish_expr_stmt (stmt); } /* Begin a __transaction_atomic or __transaction_relaxed statement. If PCOMPOUND is non-null, this is for a function-transaction-block, and we should create an extra compound stmt. */ tree begin_transaction_stmt (location_t loc, tree *pcompound, int flags) { tree r; if (pcompound) *pcompound = begin_compound_stmt (0); r = build_stmt (loc, TRANSACTION_EXPR, NULL_TREE); /* Only add the statement to the function if support enabled. */ if (flag_tm) add_stmt (r); else error_at (loc, ((flags & TM_STMT_ATTR_RELAXED) != 0 ? G_("%<__transaction_relaxed%> without " "transactional memory support enabled") : G_("%<__transaction_atomic%> without " "transactional memory support enabled"))); TRANSACTION_EXPR_BODY (r) = push_stmt_list (); TREE_SIDE_EFFECTS (r) = 1; return r; } /* End a __transaction_atomic or __transaction_relaxed statement. If COMPOUND_STMT is non-null, this is for a function-transaction-block, and we should end the compound. If NOEX is non-NULL, we wrap the body in a MUST_NOT_THROW_EXPR with NOEX as condition. */ void finish_transaction_stmt (tree stmt, tree compound_stmt, int flags, tree noex) { TRANSACTION_EXPR_BODY (stmt) = pop_stmt_list (TRANSACTION_EXPR_BODY (stmt)); TRANSACTION_EXPR_OUTER (stmt) = (flags & TM_STMT_ATTR_OUTER) != 0; TRANSACTION_EXPR_RELAXED (stmt) = (flags & TM_STMT_ATTR_RELAXED) != 0; TRANSACTION_EXPR_IS_STMT (stmt) = 1; /* noexcept specifications are not allowed for function transactions. */ gcc_assert (!(noex && compound_stmt)); if (noex) { tree body = build_must_not_throw_expr (TRANSACTION_EXPR_BODY (stmt), noex); protected_set_expr_location (body, EXPR_LOCATION (TRANSACTION_EXPR_BODY (stmt))); TREE_SIDE_EFFECTS (body) = 1; TRANSACTION_EXPR_BODY (stmt) = body; } if (compound_stmt) finish_compound_stmt (compound_stmt); } /* Build a __transaction_atomic or __transaction_relaxed expression. If NOEX is non-NULL, we wrap the body in a MUST_NOT_THROW_EXPR with NOEX as condition. */ tree build_transaction_expr (location_t loc, tree expr, int flags, tree noex) { tree ret; if (noex) { expr = build_must_not_throw_expr (expr, noex); protected_set_expr_location (expr, loc); TREE_SIDE_EFFECTS (expr) = 1; } ret = build1 (TRANSACTION_EXPR, TREE_TYPE (expr), expr); if (flags & TM_STMT_ATTR_RELAXED) TRANSACTION_EXPR_RELAXED (ret) = 1; TREE_SIDE_EFFECTS (ret) = 1; SET_EXPR_LOCATION (ret, loc); return ret; } void init_cp_semantics (void) { } /* Build a STATIC_ASSERT for a static assertion with the condition CONDITION and the message text MESSAGE. LOCATION is the location of the static assertion in the source code. When MEMBER_P, this static assertion is a member of a class. */ void finish_static_assert (tree condition, tree message, location_t location, bool member_p) { tsubst_flags_t complain = tf_warning_or_error; if (message == NULL_TREE || message == error_mark_node || condition == NULL_TREE || condition == error_mark_node) return; if (check_for_bare_parameter_packs (condition)) condition = error_mark_node; if (instantiation_dependent_expression_p (condition)) { /* We're in a template; build a STATIC_ASSERT and put it in the right place. */ tree assertion; assertion = make_node (STATIC_ASSERT); STATIC_ASSERT_CONDITION (assertion) = condition; STATIC_ASSERT_MESSAGE (assertion) = message; STATIC_ASSERT_SOURCE_LOCATION (assertion) = location; if (member_p) maybe_add_class_template_decl_list (current_class_type, assertion, /*friend_p=*/0); else add_stmt (assertion); return; } /* Save the condition in case it was a concept check. */ tree orig_condition = condition; /* Fold the expression and convert it to a boolean value. */ condition = perform_implicit_conversion_flags (boolean_type_node, condition, complain, LOOKUP_NORMAL); condition = fold_non_dependent_expr (condition, complain, /*manifestly_const_eval=*/true); if (TREE_CODE (condition) == INTEGER_CST && !integer_zerop (condition)) /* Do nothing; the condition is satisfied. */ ; else { location_t saved_loc = input_location; input_location = location; if (TREE_CODE (condition) == INTEGER_CST && integer_zerop (condition)) { int sz = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (message)))); int len = TREE_STRING_LENGTH (message) / sz - 1; /* Report the error. */ if (len == 0) error ("static assertion failed"); else error ("static assertion failed: %s", TREE_STRING_POINTER (message)); /* Actually explain the failure if this is a concept check or a requires-expression. */ if (concept_check_p (orig_condition) || TREE_CODE (orig_condition) == REQUIRES_EXPR) diagnose_constraints (location, orig_condition, NULL_TREE); } else if (condition && condition != error_mark_node) { error ("non-constant condition for static assertion"); if (require_rvalue_constant_expression (condition)) cxx_constant_value (condition); } input_location = saved_loc; } } /* Implements the C++0x decltype keyword. Returns the type of EXPR, suitable for use as a type-specifier. ID_EXPRESSION_OR_MEMBER_ACCESS_P is true when EXPR was parsed as an id-expression or a class member access, FALSE when it was parsed as a full expression. */ tree finish_decltype_type (tree expr, bool id_expression_or_member_access_p, tsubst_flags_t complain) { tree type = NULL_TREE; if (!expr || error_operand_p (expr)) return error_mark_node; if (TYPE_P (expr) || TREE_CODE (expr) == TYPE_DECL || (TREE_CODE (expr) == BIT_NOT_EXPR && TYPE_P (TREE_OPERAND (expr, 0)))) { if (complain & tf_error) error ("argument to %<decltype%> must be an expression"); return error_mark_node; } /* Depending on the resolution of DR 1172, we may later need to distinguish instantiation-dependent but not type-dependent expressions so that, say, A<decltype(sizeof(T))>::U doesn't require 'typename'. */ if (instantiation_dependent_uneval_expression_p (expr)) { type = cxx_make_type (DECLTYPE_TYPE); DECLTYPE_TYPE_EXPR (type) = expr; DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P (type) = id_expression_or_member_access_p; SET_TYPE_STRUCTURAL_EQUALITY (type); return type; } /* The type denoted by decltype(e) is defined as follows: */ expr = resolve_nondeduced_context (expr, complain); if (invalid_nonstatic_memfn_p (input_location, expr, complain)) return error_mark_node; if (type_unknown_p (expr)) { if (complain & tf_error) error ("%<decltype%> cannot resolve address of overloaded function"); return error_mark_node; } /* To get the size of a static data member declared as an array of unknown bound, we need to instantiate it. */ if (VAR_P (expr) && VAR_HAD_UNKNOWN_BOUND (expr) && DECL_TEMPLATE_INSTANTIATION (expr)) instantiate_decl (expr, /*defer_ok*/true, /*expl_inst_mem*/false); if (id_expression_or_member_access_p) { /* If e is an id-expression or a class member access (5.2.5 [expr.ref]), decltype(e) is defined as the type of the entity named by e. If there is no such entity, or e names a set of overloaded functions, the program is ill-formed. */ if (identifier_p (expr)) expr = lookup_name (expr); if (INDIRECT_REF_P (expr) || TREE_CODE (expr) == VIEW_CONVERT_EXPR) /* This can happen when the expression is, e.g., "a.b". Just look at the underlying operand. */ expr = TREE_OPERAND (expr, 0); if (TREE_CODE (expr) == OFFSET_REF || TREE_CODE (expr) == MEMBER_REF || TREE_CODE (expr) == SCOPE_REF) /* We're only interested in the field itself. If it is a BASELINK, we will need to see through it in the next step. */ expr = TREE_OPERAND (expr, 1); if (BASELINK_P (expr)) /* See through BASELINK nodes to the underlying function. */ expr = BASELINK_FUNCTIONS (expr); /* decltype of a decomposition name drops references in the tuple case (unlike decltype of a normal variable) and keeps cv-qualifiers from the containing object in the other cases (unlike decltype of a member access expression). */ if (DECL_DECOMPOSITION_P (expr)) { if (DECL_HAS_VALUE_EXPR_P (expr)) /* Expr is an array or struct subobject proxy, handle bit-fields properly. */ return unlowered_expr_type (expr); else /* Expr is a reference variable for the tuple case. */ return lookup_decomp_type (expr); } switch (TREE_CODE (expr)) { case FIELD_DECL: if (DECL_BIT_FIELD_TYPE (expr)) { type = DECL_BIT_FIELD_TYPE (expr); break; } /* Fall through for fields that aren't bitfields. */ gcc_fallthrough (); case FUNCTION_DECL: case VAR_DECL: case CONST_DECL: case PARM_DECL: case RESULT_DECL: case TEMPLATE_PARM_INDEX: expr = mark_type_use (expr); type = TREE_TYPE (expr); break; case ERROR_MARK: type = error_mark_node; break; case COMPONENT_REF: case COMPOUND_EXPR: mark_type_use (expr); type = is_bitfield_expr_with_lowered_type (expr); if (!type) type = TREE_TYPE (TREE_OPERAND (expr, 1)); break; case BIT_FIELD_REF: gcc_unreachable (); case INTEGER_CST: case PTRMEM_CST: /* We can get here when the id-expression refers to an enumerator or non-type template parameter. */ type = TREE_TYPE (expr); break; default: /* Handle instantiated template non-type arguments. */ type = TREE_TYPE (expr); break; } } else { /* Within a lambda-expression: Every occurrence of decltype((x)) where x is a possibly parenthesized id-expression that names an entity of automatic storage duration is treated as if x were transformed into an access to a corresponding data member of the closure type that would have been declared if x were a use of the denoted entity. */ if (outer_automatic_var_p (expr) && current_function_decl && LAMBDA_FUNCTION_P (current_function_decl)) type = capture_decltype (expr); else if (error_operand_p (expr)) type = error_mark_node; else if (expr == current_class_ptr) /* If the expression is just "this", we want the cv-unqualified pointer for the "this" type. */ type = TYPE_MAIN_VARIANT (TREE_TYPE (expr)); else { /* Otherwise, where T is the type of e, if e is an lvalue, decltype(e) is defined as T&; if an xvalue, T&&; otherwise, T. */ cp_lvalue_kind clk = lvalue_kind (expr); type = unlowered_expr_type (expr); gcc_assert (!TYPE_REF_P (type)); /* For vector types, pick a non-opaque variant. */ if (VECTOR_TYPE_P (type)) type = strip_typedefs (type); if (clk != clk_none && !(clk & clk_class)) type = cp_build_reference_type (type, (clk & clk_rvalueref)); } } return type; } /* Called from trait_expr_value to evaluate either __has_nothrow_assign or __has_nothrow_copy, depending on assign_p. Returns true iff all the copy {ctor,assign} fns are nothrow. */ static bool classtype_has_nothrow_assign_or_copy_p (tree type, bool assign_p) { tree fns = NULL_TREE; if (assign_p || TYPE_HAS_COPY_CTOR (type)) fns = get_class_binding (type, assign_p ? assign_op_identifier : ctor_identifier); bool saw_copy = false; for (ovl_iterator iter (fns); iter; ++iter) { tree fn = *iter; if (copy_fn_p (fn) > 0) { saw_copy = true; if (!maybe_instantiate_noexcept (fn) || !TYPE_NOTHROW_P (TREE_TYPE (fn))) return false; } } return saw_copy; } /* Actually evaluates the trait. */ static bool trait_expr_value (cp_trait_kind kind, tree type1, tree type2) { enum tree_code type_code1; tree t; type_code1 = TREE_CODE (type1); switch (kind) { case CPTK_HAS_NOTHROW_ASSIGN: type1 = strip_array_types (type1); return (!CP_TYPE_CONST_P (type1) && type_code1 != REFERENCE_TYPE && (trait_expr_value (CPTK_HAS_TRIVIAL_ASSIGN, type1, type2) || (CLASS_TYPE_P (type1) && classtype_has_nothrow_assign_or_copy_p (type1, true)))); case CPTK_HAS_TRIVIAL_ASSIGN: /* ??? The standard seems to be missing the "or array of such a class type" wording for this trait. */ type1 = strip_array_types (type1); return (!CP_TYPE_CONST_P (type1) && type_code1 != REFERENCE_TYPE && (trivial_type_p (type1) || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_COPY_ASSIGN (type1)))); case CPTK_HAS_NOTHROW_CONSTRUCTOR: type1 = strip_array_types (type1); return (trait_expr_value (CPTK_HAS_TRIVIAL_CONSTRUCTOR, type1, type2) || (CLASS_TYPE_P (type1) && (t = locate_ctor (type1)) && maybe_instantiate_noexcept (t) && TYPE_NOTHROW_P (TREE_TYPE (t)))); case CPTK_HAS_TRIVIAL_CONSTRUCTOR: type1 = strip_array_types (type1); return (trivial_type_p (type1) || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_DFLT (type1))); case CPTK_HAS_NOTHROW_COPY: type1 = strip_array_types (type1); return (trait_expr_value (CPTK_HAS_TRIVIAL_COPY, type1, type2) || (CLASS_TYPE_P (type1) && classtype_has_nothrow_assign_or_copy_p (type1, false))); case CPTK_HAS_TRIVIAL_COPY: /* ??? The standard seems to be missing the "or array of such a class type" wording for this trait. */ type1 = strip_array_types (type1); return (trivial_type_p (type1) || type_code1 == REFERENCE_TYPE || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_COPY_CTOR (type1))); case CPTK_HAS_TRIVIAL_DESTRUCTOR: type1 = strip_array_types (type1); return (trivial_type_p (type1) || type_code1 == REFERENCE_TYPE || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_DESTRUCTOR (type1))); case CPTK_HAS_VIRTUAL_DESTRUCTOR: return type_has_virtual_destructor (type1); case CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS: return type_has_unique_obj_representations (type1); case CPTK_IS_ABSTRACT: return ABSTRACT_CLASS_TYPE_P (type1); case CPTK_IS_AGGREGATE: return CP_AGGREGATE_TYPE_P (type1); case CPTK_IS_BASE_OF: return (NON_UNION_CLASS_TYPE_P (type1) && NON_UNION_CLASS_TYPE_P (type2) && (same_type_ignoring_top_level_qualifiers_p (type1, type2) || DERIVED_FROM_P (type1, type2))); case CPTK_IS_CLASS: return NON_UNION_CLASS_TYPE_P (type1); case CPTK_IS_EMPTY: return NON_UNION_CLASS_TYPE_P (type1) && CLASSTYPE_EMPTY_P (type1); case CPTK_IS_ENUM: return type_code1 == ENUMERAL_TYPE; case CPTK_IS_FINAL: return CLASS_TYPE_P (type1) && CLASSTYPE_FINAL (type1); case CPTK_IS_LITERAL_TYPE: return literal_type_p (type1); case CPTK_IS_POD: return pod_type_p (type1); case CPTK_IS_POLYMORPHIC: return CLASS_TYPE_P (type1) && TYPE_POLYMORPHIC_P (type1); case CPTK_IS_SAME_AS: return same_type_p (type1, type2); case CPTK_IS_STD_LAYOUT: return std_layout_type_p (type1); case CPTK_IS_TRIVIAL: return trivial_type_p (type1); case CPTK_IS_TRIVIALLY_ASSIGNABLE: return is_trivially_xible (MODIFY_EXPR, type1, type2); case CPTK_IS_TRIVIALLY_CONSTRUCTIBLE: return is_trivially_xible (INIT_EXPR, type1, type2); case CPTK_IS_TRIVIALLY_COPYABLE: return trivially_copyable_p (type1); case CPTK_IS_UNION: return type_code1 == UNION_TYPE; case CPTK_IS_ASSIGNABLE: return is_xible (MODIFY_EXPR, type1, type2); case CPTK_IS_CONSTRUCTIBLE: return is_xible (INIT_EXPR, type1, type2); default: gcc_unreachable (); return false; } } /* If TYPE is an array of unknown bound, or (possibly cv-qualified) void, or a complete type, returns true, otherwise false. */ static bool check_trait_type (tree type) { if (type == NULL_TREE) return true; if (TREE_CODE (type) == TREE_LIST) return (check_trait_type (TREE_VALUE (type)) && check_trait_type (TREE_CHAIN (type))); if (TREE_CODE (type) == ARRAY_TYPE && !TYPE_DOMAIN (type) && COMPLETE_TYPE_P (TREE_TYPE (type))) return true; if (VOID_TYPE_P (type)) return true; return !!complete_type_or_else (strip_array_types (type), NULL_TREE); } /* Process a trait expression. */ tree finish_trait_expr (location_t loc, cp_trait_kind kind, tree type1, tree type2) { if (type1 == error_mark_node || type2 == error_mark_node) return error_mark_node; if (processing_template_decl) { tree trait_expr = make_node (TRAIT_EXPR); TREE_TYPE (trait_expr) = boolean_type_node; TRAIT_EXPR_TYPE1 (trait_expr) = type1; TRAIT_EXPR_TYPE2 (trait_expr) = type2; TRAIT_EXPR_KIND (trait_expr) = kind; TRAIT_EXPR_LOCATION (trait_expr) = loc; return trait_expr; } switch (kind) { case CPTK_HAS_NOTHROW_ASSIGN: case CPTK_HAS_TRIVIAL_ASSIGN: case CPTK_HAS_NOTHROW_CONSTRUCTOR: case CPTK_HAS_TRIVIAL_CONSTRUCTOR: case CPTK_HAS_NOTHROW_COPY: case CPTK_HAS_TRIVIAL_COPY: case CPTK_HAS_TRIVIAL_DESTRUCTOR: case CPTK_HAS_UNIQUE_OBJ_REPRESENTATIONS: case CPTK_HAS_VIRTUAL_DESTRUCTOR: case CPTK_IS_ABSTRACT: case CPTK_IS_AGGREGATE: case CPTK_IS_EMPTY: case CPTK_IS_FINAL: case CPTK_IS_LITERAL_TYPE: case CPTK_IS_POD: case CPTK_IS_POLYMORPHIC: case CPTK_IS_STD_LAYOUT: case CPTK_IS_TRIVIAL: case CPTK_IS_TRIVIALLY_COPYABLE: if (!check_trait_type (type1)) return error_mark_node; break; case CPTK_IS_ASSIGNABLE: case CPTK_IS_CONSTRUCTIBLE: break; case CPTK_IS_TRIVIALLY_ASSIGNABLE: case CPTK_IS_TRIVIALLY_CONSTRUCTIBLE: if (!check_trait_type (type1) || !check_trait_type (type2)) return error_mark_node; break; case CPTK_IS_BASE_OF: if (NON_UNION_CLASS_TYPE_P (type1) && NON_UNION_CLASS_TYPE_P (type2) && !same_type_ignoring_top_level_qualifiers_p (type1, type2) && !complete_type_or_else (type2, NULL_TREE)) /* We already issued an error. */ return error_mark_node; break; case CPTK_IS_CLASS: case CPTK_IS_ENUM: case CPTK_IS_UNION: case CPTK_IS_SAME_AS: break; default: gcc_unreachable (); } tree val = (trait_expr_value (kind, type1, type2) ? boolean_true_node : boolean_false_node); return maybe_wrap_with_location (val, loc); } /* Do-nothing variants of functions to handle pragma FLOAT_CONST_DECIMAL64, which is ignored for C++. */ void set_float_const_decimal64 (void) { } void clear_float_const_decimal64 (void) { } bool float_const_decimal64_p (void) { return 0; } /* Return true if T designates the implied `this' parameter. */ bool is_this_parameter (tree t) { if (!DECL_P (t) || DECL_NAME (t) != this_identifier) return false; gcc_assert (TREE_CODE (t) == PARM_DECL || is_capture_proxy (t) || (cp_binding_oracle && TREE_CODE (t) == VAR_DECL)); return true; } /* Insert the deduced return type for an auto function. */ void apply_deduced_return_type (tree fco, tree return_type) { tree result; if (return_type == error_mark_node) return; if (DECL_CONV_FN_P (fco)) DECL_NAME (fco) = make_conv_op_name (return_type); TREE_TYPE (fco) = change_return_type (return_type, TREE_TYPE (fco)); result = DECL_RESULT (fco); if (result == NULL_TREE) return; if (TREE_TYPE (result) == return_type) return; if (!processing_template_decl && !VOID_TYPE_P (return_type) && !complete_type_or_else (return_type, NULL_TREE)) return; /* We already have a DECL_RESULT from start_preparsed_function. Now we need to redo the work it and allocate_struct_function did to reflect the new type. */ gcc_assert (current_function_decl == fco); result = build_decl (input_location, RESULT_DECL, NULL_TREE, TYPE_MAIN_VARIANT (return_type)); DECL_ARTIFICIAL (result) = 1; DECL_IGNORED_P (result) = 1; cp_apply_type_quals_to_decl (cp_type_quals (return_type), result); DECL_RESULT (fco) = result; if (!processing_template_decl) { bool aggr = aggregate_value_p (result, fco); #ifdef PCC_STATIC_STRUCT_RETURN cfun->returns_pcc_struct = aggr; #endif cfun->returns_struct = aggr; } } /* DECL is a local variable or parameter from the surrounding scope of a lambda-expression. Returns the decltype for a use of the capture field for DECL even if it hasn't been captured yet. */ static tree capture_decltype (tree decl) { tree lam = CLASSTYPE_LAMBDA_EXPR (DECL_CONTEXT (current_function_decl)); tree cap = lookup_name_real (DECL_NAME (decl), /*type*/0, /*nonclass*/1, /*block_p=*/true, /*ns*/0, LOOKUP_HIDDEN); tree type; if (cap && is_capture_proxy (cap)) type = TREE_TYPE (cap); else switch (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lam)) { case CPLD_NONE: error ("%qD is not captured", decl); return error_mark_node; case CPLD_COPY: type = TREE_TYPE (decl); if (TYPE_REF_P (type) && TREE_CODE (TREE_TYPE (type)) != FUNCTION_TYPE) type = TREE_TYPE (type); break; case CPLD_REFERENCE: type = TREE_TYPE (decl); if (!TYPE_REF_P (type)) type = build_reference_type (TREE_TYPE (decl)); break; default: gcc_unreachable (); } if (!TYPE_REF_P (type)) { if (!LAMBDA_EXPR_MUTABLE_P (lam)) type = cp_build_qualified_type (type, (cp_type_quals (type) |TYPE_QUAL_CONST)); type = build_reference_type (type); } return type; } /* Build a unary fold expression of EXPR over OP. If IS_RIGHT is true, this is a right unary fold. Otherwise it is a left unary fold. */ static tree finish_unary_fold_expr (tree expr, int op, tree_code dir) { /* Build a pack expansion (assuming expr has pack type). */ if (!uses_parameter_packs (expr)) { error_at (location_of (expr), "operand of fold expression has no " "unexpanded parameter packs"); return error_mark_node; } tree pack = make_pack_expansion (expr); /* Build the fold expression. */ tree code = build_int_cstu (integer_type_node, abs (op)); tree fold = build_min_nt_loc (UNKNOWN_LOCATION, dir, code, pack); FOLD_EXPR_MODIFY_P (fold) = (op < 0); return fold; } tree finish_left_unary_fold_expr (tree expr, int op) { return finish_unary_fold_expr (expr, op, UNARY_LEFT_FOLD_EXPR); } tree finish_right_unary_fold_expr (tree expr, int op) { return finish_unary_fold_expr (expr, op, UNARY_RIGHT_FOLD_EXPR); } /* Build a binary fold expression over EXPR1 and EXPR2. The associativity of the fold is determined by EXPR1 and EXPR2 (whichever has an unexpanded parameter pack). */ tree finish_binary_fold_expr (tree pack, tree init, int op, tree_code dir) { pack = make_pack_expansion (pack); tree code = build_int_cstu (integer_type_node, abs (op)); tree fold = build_min_nt_loc (UNKNOWN_LOCATION, dir, code, pack, init); FOLD_EXPR_MODIFY_P (fold) = (op < 0); return fold; } tree finish_binary_fold_expr (tree expr1, tree expr2, int op) { // Determine which expr has an unexpanded parameter pack and // set the pack and initial term. bool pack1 = uses_parameter_packs (expr1); bool pack2 = uses_parameter_packs (expr2); if (pack1 && !pack2) return finish_binary_fold_expr (expr1, expr2, op, BINARY_RIGHT_FOLD_EXPR); else if (pack2 && !pack1) return finish_binary_fold_expr (expr2, expr1, op, BINARY_LEFT_FOLD_EXPR); else { if (pack1) error ("both arguments in binary fold have unexpanded parameter packs"); else error ("no unexpanded parameter packs in binary fold"); } return error_mark_node; } /* Finish __builtin_launder (arg). */ tree finish_builtin_launder (location_t loc, tree arg, tsubst_flags_t complain) { tree orig_arg = arg; if (!type_dependent_expression_p (arg)) arg = decay_conversion (arg, complain); if (error_operand_p (arg)) return error_mark_node; if (!type_dependent_expression_p (arg) && !TYPE_PTR_P (TREE_TYPE (arg))) { error_at (loc, "non-pointer argument to %<__builtin_launder%>"); return error_mark_node; } if (processing_template_decl) arg = orig_arg; return build_call_expr_internal_loc (loc, IFN_LAUNDER, TREE_TYPE (arg), 1, arg); } /* Finish __builtin_convertvector (arg, type). */ tree cp_build_vec_convert (tree arg, location_t loc, tree type, tsubst_flags_t complain) { if (error_operand_p (type)) return error_mark_node; if (error_operand_p (arg)) return error_mark_node; tree ret = NULL_TREE; if (!type_dependent_expression_p (arg) && !dependent_type_p (type)) ret = c_build_vec_convert (cp_expr_loc_or_input_loc (arg), decay_conversion (arg, complain), loc, type, (complain & tf_error) != 0); if (!processing_template_decl) return ret; return build_call_expr_internal_loc (loc, IFN_VEC_CONVERT, type, 1, arg); } #include "gt-cp-semantics.h"
builder.h
// Copyright (c) 2015, The Regents of the University of California (Regents) // See LICENSE.txt for license details #ifndef BUILDER_H_ #define BUILDER_H_ #include <algorithm> #include <cinttypes> #include <fstream> #include <functional> #include <type_traits> #include <utility> #include "command_line.h" #include "generator.h" #include "graph.h" #include "platform_atomics.h" #include "pvector.h" #include "reader.h" #include "timer.h" #include "util.h" /* GAP Benchmark Suite Class: BuilderBase Author: Scott Beamer Given arguements from the command line (cli), returns a built graph - MakeGraph() will parse cli and obtain edgelist and call MakeGraphFromEL(edgelist) to perform actual graph construction - edgelist can be from file (reader) or synthetically generated (generator) - Common case: BuilderBase typedef'd (w/ params) to be Builder (benchmark.h) */ template <typename NodeID_, typename DestID_ = NodeID_, typename WeightT_ = NodeID_, bool invert = true> class BuilderBase { typedef EdgePair<NodeID_, DestID_> Edge; typedef pvector<Edge> EdgeList; const CLBase &cli_; bool symmetrize_; bool needs_weights_; int64_t num_nodes_ = -1; public: explicit BuilderBase(const CLBase &cli) : cli_(cli) { symmetrize_ = cli_.symmetrize(); needs_weights_ = !std::is_same<NodeID_, DestID_>::value; } DestID_ GetSource(EdgePair<NodeID_, NodeID_> e) { return e.u; } DestID_ GetSource(EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_>> e) { return NodeWeight<NodeID_, WeightT_>(e.u, e.v.w); } NodeID_ FindMaxNodeID(const EdgeList &el) { NodeID_ max_seen = 0; #pragma omp parallel for reduction(max : max_seen) for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; max_seen = (std::max)(max_seen, e.u); max_seen = (std::max)(max_seen, (NodeID_) e.v); } return max_seen; } pvector<NodeID_> CountDegrees(const EdgeList &el, bool transpose) { pvector<NodeID_> degrees(num_nodes_, 0); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) fetch_and_add(degrees[e.u], 1); if (symmetrize_ || (!symmetrize_ && transpose)) fetch_and_add(degrees[(NodeID_) e.v], 1); } return degrees; } static pvector<SGOffset> PrefixSum(const pvector<NodeID_> &degrees) { pvector<SGOffset> sums(degrees.size() + 1); SGOffset total = 0; for (size_t n=0; n < degrees.size(); n++) { sums[n] = total; total += degrees[n]; } sums[degrees.size()] = total; return sums; } static pvector<SGOffset> ParallelPrefixSum(const pvector<NodeID_> &degrees) { const size_t block_size = 1<<20; const size_t num_blocks = (degrees.size() + block_size - 1) / block_size; pvector<SGOffset> local_sums(num_blocks); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset lsum = 0; size_t block_end = (std::min)((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) lsum += degrees[i]; local_sums[block] = lsum; } pvector<SGOffset> bulk_prefix(num_blocks+1); SGOffset total = 0; for (size_t block=0; block < num_blocks; block++) { bulk_prefix[block] = total; total += local_sums[block]; } bulk_prefix[num_blocks] = total; pvector<SGOffset> prefix(degrees.size() + 1); #pragma omp parallel for for (size_t block=0; block < num_blocks; block++) { SGOffset local_total = bulk_prefix[block]; size_t block_end = (std::min)((block + 1) * block_size, degrees.size()); for (size_t i=block * block_size; i < block_end; i++) { prefix[i] = local_total; local_total += degrees[i]; } } prefix[degrees.size()] = bulk_prefix[num_blocks]; return prefix; } // Removes self-loops and redundant edges // Side effect: neighbor IDs will be sorted void SquishCSR(const CSRGraph<NodeID_, DestID_, invert> &g, bool transpose, DestID_*** sq_index, DestID_** sq_neighs) { pvector<NodeID_> diffs(g.num_nodes()); DestID_ *n_start, *n_end; #pragma omp parallel for private(n_start, n_end) for (NodeID_ n=0; n < g.num_nodes(); n++) { if (transpose) { n_start = g.in_neigh(n).begin(); n_end = g.in_neigh(n).end(); } else { n_start = g.out_neigh(n).begin(); n_end = g.out_neigh(n).end(); } std::sort(n_start, n_end); DestID_ *new_end = std::unique(n_start, n_end); new_end = std::remove(n_start, new_end, n); diffs[n] = new_end - n_start; } pvector<SGOffset> sq_offsets = ParallelPrefixSum(diffs); *sq_neighs = new DestID_[sq_offsets[g.num_nodes()]]; *sq_index = CSRGraph<NodeID_, DestID_>::GenIndex(sq_offsets, *sq_neighs); #pragma omp parallel for private(n_start) for (NodeID_ n=0; n < g.num_nodes(); n++) { if (transpose) n_start = g.in_neigh(n).begin(); else n_start = g.out_neigh(n).begin(); std::copy(n_start, n_start+diffs[n], (*sq_index)[n]); } } CSRGraph<NodeID_, DestID_, invert> SquishGraph( const CSRGraph<NodeID_, DestID_, invert> &g) { DestID_ **out_index, *out_neighs, **in_index, *in_neighs; SquishCSR(g, false, &out_index, &out_neighs); if (g.directed()) { if (invert) SquishCSR(g, true, &in_index, &in_neighs); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, out_neighs, in_index, in_neighs); } else { return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), out_index, out_neighs); } } /* Graph Bulding Steps (for CSR): - Read edgelist once to determine vertex degrees (CountDegrees) - Determine vertex offsets by a prefix sum (ParallelPrefixSum) - Allocate storage and set points according to offsets (GenIndex) - Copy edges into storage */ void MakeCSR(const EdgeList &el, bool transpose, DestID_*** index, DestID_** neighs) { pvector<NodeID_> degrees = CountDegrees(el, transpose); pvector<SGOffset> offsets = ParallelPrefixSum(degrees); *neighs = new DestID_[offsets[num_nodes_]]; *index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, *neighs); #pragma omp parallel for for (auto it = el.begin(); it < el.end(); it++) { Edge e = *it; if (symmetrize_ || (!symmetrize_ && !transpose)) (*neighs)[fetch_and_add(offsets[e.u], 1)] = e.v; if (symmetrize_ || (!symmetrize_ && transpose)) (*neighs)[fetch_and_add(offsets[static_cast<NodeID_>(e.v)], 1)] = GetSource(e); } } CSRGraph<NodeID_, DestID_, invert> MakeGraphFromEL(EdgeList &el) { DestID_ **index = nullptr, **inv_index = nullptr; DestID_ *neighs = nullptr, *inv_neighs = nullptr; Timer t; t.Start(); if (num_nodes_ == -1) num_nodes_ = FindMaxNodeID(el)+1; if (needs_weights_) Generator<NodeID_, DestID_, WeightT_>::InsertWeights(el); MakeCSR(el, false, &index, &neighs); if (!symmetrize_ && invert) MakeCSR(el, true, &inv_index, &inv_neighs); t.Stop(); PrintTime("Build Time", t.Seconds()); if (symmetrize_) return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs); else return CSRGraph<NodeID_, DestID_, invert>(num_nodes_, index, neighs, inv_index, inv_neighs); } CSRGraph<NodeID_, DestID_, invert> MakeGraph() { CSRGraph<NodeID_, DestID_, invert> g; { // extra scope to trigger earlier deletion of el (save memory) EdgeList el; if (cli_.filename() != "") { Reader<NodeID_, DestID_, WeightT_, invert> r(cli_.filename()); if ((r.GetSuffix() == ".sg") || (r.GetSuffix() == ".wsg")) { return r.ReadSerializedGraph(); } else { el = r.ReadFile(needs_weights_); } } else if (cli_.scale() != -1) { Generator<NodeID_, DestID_> gen(cli_.scale(), cli_.degree()); el = gen.GenerateEL(cli_.uniform()); } g = MakeGraphFromEL(el); } return SquishGraph(g); } // Relabels (and rebuilds) graph by order of decreasing degree static CSRGraph<NodeID_, DestID_, invert> RelabelByDegree( const CSRGraph<NodeID_, DestID_, invert> &g) { if (g.directed()) { std::cout << "Cannot relabel directed graph" << std::endl; std::exit(-11); } Timer t; t.Start(); typedef std::pair<int64_t, NodeID_> degree_node_p; pvector<degree_node_p> degree_id_pairs(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) degree_id_pairs[n] = std::make_pair(g.out_degree(n), n); std::sort(degree_id_pairs.begin(), degree_id_pairs.end(), std::greater<degree_node_p>()); pvector<NodeID_> degrees(g.num_nodes()); pvector<NodeID_> new_ids(g.num_nodes()); #pragma omp parallel for for (NodeID_ n=0; n < g.num_nodes(); n++) { degrees[n] = degree_id_pairs[n].first; new_ids[degree_id_pairs[n].second] = n; } pvector<SGOffset> offsets = ParallelPrefixSum(degrees); DestID_* neighs = new DestID_[offsets[g.num_nodes()]]; DestID_** index = CSRGraph<NodeID_, DestID_>::GenIndex(offsets, neighs); #pragma omp parallel for for (NodeID_ u=0; u < g.num_nodes(); u++) { for (NodeID_ v : g.out_neigh(u)) neighs[offsets[new_ids[u]]++] = new_ids[v]; std::sort(index[new_ids[u]], index[new_ids[u]+1]); } t.Stop(); PrintTime("Relabel", t.Seconds()); return CSRGraph<NodeID_, DestID_, invert>(g.num_nodes(), index, neighs); } }; #endif // BUILDER_H_
edge_vol_int.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include "edge_proxy_common.h" #include <libxsmm_intrinsics_x86.h> #include <libxsmm.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <math.h> #if defined(_OPENMP) # include <omp.h> #endif /*#define EDGE_HP_1G*/ /*#define HANDLE_AMOK*/ #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) #include <sys/mman.h> #include <linux/mman.h> #endif LIBXSMM_INLINE void* edge_hp_malloc( size_t nbytes, size_t alignment ) { void* ret_ptr = NULL; #if defined(EDGE_HP_1G) size_t num_large_pages = nbytes / (1073741824L); if ( nbytes > num_large_pages*1073741824L ) { num_large_pages++; } nbytes = (size_t) num_large_pages * 1073741824L; printf("trying to allocate %ld 1G pages\n", num_large_pages); /*ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 );*/ ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB | MAP_HUGE_1GB, -1, 0 ); if ( (ret_ptr == (void *)(-1)) ) { fprintf(stderr,"1G mmap call failed\n"); exit(1); } #elif defined(EDGE_HP_2M) size_t num_large_pages = nbytes / (2097152UL); if ( nbytes > num_large_pages*2097152UL ) { num_large_pages++; } nbytes = (size_t) num_large_pages * 2097152UL; printf("trying to allocate %ld 2M pages\n", num_large_pages); /*ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 );*/ ret_ptr = mmap( NULL, nbytes, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE | MAP_HUGETLB, -1, 0 ); if ( (ret_ptr == (void *)(-1)) ) { fprintf(stderr,"2M mmap call failed\n"); exit(1); } #else ret_ptr = libxsmm_aligned_malloc( nbytes, alignment ); #endif return ret_ptr; } LIBXSMM_INLINE void edge_hp_free( void* ptr, size_t nbytes ) { LIBXSMM_UNUSED( nbytes ); #if defined(EDGE_HP_1G) /* to be implemented */ #elif defined(EDGE_HP_2M) /* to be implemented */ #else libxsmm_free( ptr ); #endif } #if defined(__AVX512F__) LIBXSMM_INLINE void matMulFusedAC( unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c ) { unsigned int l_m, l_n, l_k; const __m512d beta = _mm512_set1_pd( i_beta ); LIBXSMM_UNUSED(i_r); for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd( _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ), beta ) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc); } } for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ); for( l_k = 0; l_k < i_k; l_k++ ) { const __m512d alpha = _mm512_set1_pd( i_b[l_k*i_ldB + l_n] ); vc = _mm512_fmadd_pd( alpha, _mm512_loadu_pd( &(i_a[l_m*i_ldA*8 + l_k*8 + 0]) ), vc); } _mm512_storeu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc ); } } } LIBXSMM_INLINE void matMulFusedBC( unsigned short i_r, unsigned int i_m, unsigned int i_n, unsigned int i_k, unsigned int i_ldA, unsigned int i_ldB, unsigned int i_ldC, double i_beta, const double *i_a, const double *i_b, double *o_c ) { unsigned int l_m, l_n, l_k; const __m512d beta = _mm512_set1_pd( i_beta ); LIBXSMM_UNUSED(i_r); for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = (i_beta != 0.0) ? _mm512_mul_pd( _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ), beta ) : _mm512_setzero_pd(); _mm512_storeu_pd(&(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc); } } for( l_m = 0; l_m < i_m; l_m++ ) { for( l_n = 0; l_n < i_n; l_n++ ) { __m512d vc = _mm512_loadu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]) ); for( l_k = 0; l_k < i_k; l_k++ ) { const __m512d alpha = _mm512_set1_pd( i_a[l_m*i_ldA + l_k] ); vc = _mm512_fmadd_pd( alpha, _mm512_loadu_pd( &(i_b[l_k*i_ldB*8 + l_n*8 + 0]) ), vc); } _mm512_storeu_pd( &(o_c[l_m*i_ldC*8 + l_n*8 + 0]), vc ); } } } #endif LIBXSMM_INLINE void amok_detect( const double* i_runtimes, size_t* io_amoks, const size_t i_workers ) { double time_avg; size_t i; time_avg = 0.0; for (i = 0; i < i_workers; i++) { if ( io_amoks[8*i] == 0 ) { time_avg += i_runtimes[8*i]; } } time_avg = time_avg/((double)(i_workers-io_amoks[8*i_workers])); /* let detect amoks */ for (i = 0; i < i_workers; i++) { if ( io_amoks[8*i] == 0 ) { if ( i_runtimes[8*i] > time_avg*1.07 ) { /* this is the amok condition */ io_amoks[8*i_workers]++; io_amoks[8*i] = 1; } } } } LIBXSMM_INLINE void amok_balance( const size_t* i_amoks, const size_t i_workers, const size_t i_worksize, const size_t i_mytid, size_t* io_chunk, size_t* io_mystart, size_t* io_myend ) { size_t l_chunk, l_start, l_end; size_t l_cur_amoks = i_amoks[8*i_workers]; size_t l_non_amoks = i_workers - l_cur_amoks; l_chunk = (i_worksize % l_non_amoks == 0) ? (i_worksize / l_non_amoks) : ((i_worksize / l_non_amoks) + 1); if (i_amoks[8*i_mytid] != 0) { l_start = 0; l_end = 0; } else { size_t l_tid_offset = 0; size_t l_z; for ( l_z = 0; l_z < i_mytid; l_z++) { if ( i_amoks[8*l_z] != 0 ) { l_tid_offset++; } } l_tid_offset = i_mytid - l_tid_offset; l_start = (l_tid_offset * l_chunk < i_worksize) ? (l_tid_offset * l_chunk) : i_worksize; l_end = ((l_tid_offset+1) * l_chunk < i_worksize) ? ((l_tid_offset+1) * l_chunk) : i_worksize; } *io_chunk = l_chunk; *io_mystart = l_start; *io_myend = l_end; } int main(int argc, char* argv[]) { char* mat_a = 0; unsigned int *mat_a_rowptr, *mat_a_colidx; unsigned int mat_a_rowcount, mat_a_colcount, mat_a_nnz; double* mat_a_values; libxsmm_dmmfunction a_kernel; char* mat_b = 0; unsigned int *mat_b_rowptr, *mat_b_colidx; unsigned int mat_b_rowcount, mat_b_colcount, mat_b_nnz; double* mat_b_values; libxsmm_dmmfunction b_kernel; char* mat_c = 0; unsigned int *mat_c_rowptr, *mat_c_colidx; unsigned int mat_c_rowcount, mat_c_colcount, mat_c_nnz; double* mat_c_values; libxsmm_dmmfunction c_kernel; char* mat_st = 0; unsigned int *mat_st_rowptr, *mat_st_colidx; unsigned int mat_st_rowcount, mat_st_colcount, mat_st_nnz; double* mat_st_values; libxsmm_dmmfunction st_kernel; int num_modes = 9; int num_quants = 9; size_t num_elems = 0; size_t num_cfr = 8; size_t num_reps = 1; size_t elem_size; /* OpenMP: signed induction variables */ int i, j; const libxsmm_gemm_descriptor *l_xgemm_desc_stiff = 0, *l_xgemm_desc_star = 0; libxsmm_descriptor_blob l_xgemm_blob_stiff, l_xgemm_blob_star; const libxsmm_gemm_prefetch_type prefetch = LIBXSMM_GEMM_PREFETCH_NONE; const int flags = LIBXSMM_GEMM_FLAGS('N', 'N'); const double alpha = 1, beta = 1; double flops_vol; double* q; double* qt; double* qs; double* star; double* global; unsigned long long l_start, l_end; double l_total; unsigned int l_num_threads; unsigned int l_star_ent = num_quants*num_quants; double* l_total_thread; double* l_cur_thread_time; double time_max; double time_min; double time_avg; size_t* amoks; /* read cmd */ if ((argc > 1 && !strncmp(argv[1], "-h", 3)) || (argc != 8)) { printf("Usage: %s stif1 stif2 stif3 star nModes nElems nReps\n", argv[0]); return 0; } libxsmm_rng_set_seed(1); /* some empty lines at the beginning */ printf("\n"); i = 1; if (argc > (int)i) mat_a = argv[i++]; if (argc > (int)i) mat_b = argv[i++]; if (argc > (int)i) mat_c = argv[i++]; if (argc > (int)i) mat_st = argv[i++]; if (argc > (int)i) num_modes = atoi(argv[i++]); if (argc > (int)i) num_elems = atoi(argv[i++]); if (argc > (int)i) num_reps = atoi(argv[i++]); elem_size = num_modes*num_quants*num_cfr; #if defined(_OPENMP) #pragma omp parallel { #pragma omp master { l_num_threads = omp_get_num_threads(); } } #else l_num_threads = 1; #endif l_total_thread = (double*)malloc(8*l_num_threads*sizeof(double)); l_cur_thread_time = (double*)malloc(8*l_num_threads*sizeof(double)); amoks = (size_t*)malloc(8*(l_num_threads+1)*sizeof(size_t)); for ( i = 0; i < 8*((int)l_num_threads+1); i++ ) { amoks[i] = 0; } /* read matrices */ printf("reading sparse matrices... "); edge_sparse_csr_reader_double( mat_a, &mat_a_rowptr, &mat_a_colidx, &mat_a_values, &mat_a_rowcount, &mat_a_colcount, &mat_a_nnz ); edge_sparse_csr_reader_double( mat_b, &mat_b_rowptr, &mat_b_colidx, &mat_b_values, &mat_b_rowcount, &mat_b_colcount, &mat_b_nnz ); edge_sparse_csr_reader_double( mat_c, &mat_c_rowptr, &mat_c_colidx, &mat_c_values, &mat_c_rowcount, &mat_c_colcount, &mat_c_nnz ); edge_sparse_csr_reader_double( mat_st, &mat_st_rowptr, &mat_st_colidx, &mat_st_values, &mat_st_rowcount, &mat_st_colcount, &mat_st_nnz ); printf("done!\n\n"); /* generate kernels */ printf("generating code... "); l_xgemm_desc_stiff = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_stiff, num_quants, num_modes, num_modes, num_modes, 0, num_modes, alpha, beta, flags, prefetch); l_xgemm_desc_star = libxsmm_dgemm_descriptor_init(&l_xgemm_blob_star, num_quants, num_modes, num_quants, 0, num_modes, num_modes, alpha, beta, flags, prefetch); a_kernel = libxsmm_create_xcsr_soa( l_xgemm_desc_stiff, mat_a_rowptr, mat_a_colidx, (const void*)mat_a_values, (unsigned int)num_cfr ).dmm; b_kernel = libxsmm_create_xcsr_soa( l_xgemm_desc_stiff, mat_b_rowptr, mat_b_colidx, (const void*)mat_b_values, (unsigned int)num_cfr ).dmm; c_kernel = libxsmm_create_xcsr_soa( l_xgemm_desc_stiff, mat_c_rowptr, mat_c_colidx, (const void*)mat_c_values, (unsigned int)num_cfr ).dmm; st_kernel = libxsmm_create_xcsr_soa( l_xgemm_desc_star, mat_st_rowptr, mat_st_colidx, (const void*)mat_st_values, (unsigned int)num_cfr ).dmm; if ( a_kernel == 0 ) { printf("a kernel could not be built -> exit!"); exit(-1); } if ( b_kernel == 0 ) { printf("b kernel could not be built -> exit!"); exit(-1); } if ( b_kernel == 0 ) { printf("c kernel could not be built -> exit!"); exit(-1); } if ( st_kernel == 0 ) { printf("st kernel could not be built -> exit!"); exit(-1); } printf("done!\n\n"); /* copying code to 1 GB page */ #if 0 #if defined(EDGE_HP_1G) || defined(EDGE_HP_2M) printf("copying code to 1GB page...\n"); onegcode = (void*)edge_hp_malloc( 5*1024*1024, 2097152 ); memcpy( onegcode, (void*) a_kernel, 1505 ); memcpy( onegcode+(1*1024*1024)+64, (void*) b_kernel, 2892 ); memcpy( onegcode+(2*1024*1024)+128, (void*) c_kernel, 3249 ); memcpy( onegcode+(3*1024*1024)+196, (void*)st_kernel, 11010 ); a_kernel = (libxsmm_dmmfunction)onegcode; b_kernel = (libxsmm_dmmfunction)(onegcode+(1*1024*1024)+64); c_kernel = (libxsmm_dmmfunction)(onegcode+(2*1024*1024)+128); st_kernel = (libxsmm_dmmfunction)(onegcode+(3*1024*1024)+196); printf("...done\n\n"); #endif #endif /* create unknowns and t-unknowns */ printf("allocating and initializing fake data... \n"); /* DoFs */ printf(" q: %f MiB\n", ((double)(num_elems*num_modes*num_quants*num_cfr*sizeof(double))) / ( 1024.0*1024.0) ); q = (double*)edge_hp_malloc( num_elems*num_modes*num_quants*num_cfr*sizeof(double), 2097152); /* tDofs */ printf(" qt: %f MiB\n", ((double)(num_elems*num_modes*num_quants*num_cfr*sizeof(double))) / ( 1024.0*1024.0) ); qt = (double*)edge_hp_malloc( num_elems*num_modes*num_quants*num_cfr*sizeof(double), 2097152); /* star matrices */ printf(" star: %f MiB\n", ((double)(num_elems*3*l_star_ent*sizeof(double))) / ( 1024.0*1024.0 ) ); star = (double*)edge_hp_malloc( num_elems*3*l_star_ent*sizeof(double), 2097152); /* stiffness matrices */ printf("global: %f MiB\n", ((double)(3*num_modes*num_modes*sizeof(double))) / ( 1024.0*1024 ) ); global = (double*)edge_hp_malloc( 3*num_modes*num_modes*sizeof(double), 2097152); /* per thread scratch */ printf(" t: %f MiB\n", ((double)(l_num_threads*num_modes*num_quants*num_cfr*sizeof(double)))/ ( 1024.0*1024.0) ); qs = (double*)edge_hp_malloc( l_num_threads*num_modes*num_quants*num_cfr*sizeof(double), 2097152); for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { q[i*elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)elem_size; j++) { qt[i*elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)l_num_threads; i++) { for (j = 0; j < (int)elem_size; j++) { qs[i*elem_size + j] = libxsmm_rng_f64(); } } for (i = 0; i < (int)num_elems; i++) { for (j = 0; j < (int)mat_st_nnz*3; j++) { star[(i*3*mat_st_nnz)+j] = libxsmm_rng_f64(); } } for (i = 0; i < 3; i++) { for (j = 0; j < num_modes*num_modes; j++) { global[(i*num_modes*num_modes)+j] = libxsmm_rng_f64(); } } printf("allocation done!\n\n"); printf("running benchmark...\n"); l_start = libxsmm_timer_tick(); #if defined(_OPENMP) # pragma omp parallel private(i, j) #endif { #if defined(_OPENMP) int mytid = omp_get_thread_num(); #else int mytid = 0; #endif libxsmm_timer_tickint mystart, myend; #if defined(HANDLE_AMOK) size_t cur_amoks = 0; size_t non_amoks = l_num_threads; #endif size_t l_el_chunk = 0; size_t l_el_start = 0; size_t l_el_end = 0; /* initial work distribution */ amok_balance( amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end ); for (i = 0; i < (int)num_reps; i++) { #if defined(HANDLE_AMOK) /* did we had an amok? */ if (cur_amoks != amoks[8*l_num_threads]) { cur_amoks = amoks[8*l_num_threads]; non_amoks = l_num_threads - cur_amoks; /* re-balance work */ amok_balance( amoks, l_num_threads, num_elems, mytid, &l_el_chunk, &l_el_start, &l_el_end ); } #endif mystart = libxsmm_timer_tick(); for (j = (int)l_el_start; j < (int)l_el_end; j++) { #if 1 st_kernel( star+(j*3*mat_st_nnz) , qt+(j*elem_size), qs+(mytid*elem_size) ); a_kernel( qs+(mytid*elem_size), global , q+(j*elem_size) ); st_kernel( star+(j*3*mat_st_nnz)+mat_st_nnz , qt+(j*elem_size), qs+(mytid*elem_size) ); b_kernel( qs+(mytid*elem_size), global+(num_modes*num_modes) , q+(j*elem_size) ); st_kernel( star+(j*3*mat_st_nnz)+(2*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) ); c_kernel( qs+(mytid*elem_size), global+(2*num_modes*num_modes), q+(j*elem_size) ); #else matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) ); matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global, q+(j*elem_size) ); matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz)+mat_st_nnz, qt+(j*elem_size), qs+(mytid*elem_size) ); matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global+(num_modes*num_modes) , q+(j*elem_size) ); matMulFusedBC( 8, num_quants, num_modes, num_quants, num_quants, num_modes, num_modes, 1.0, star+(j*3*mat_st_nnz)+(2*mat_st_nnz), qt+(j*elem_size), qs+(mytid*elem_size) ); matMulFusedAC( 8, num_quants, num_modes, num_modes, num_modes, num_modes, num_modes, 1.0, qs+(mytid*elem_size), global+(2*num_modes*num_modes), q+(j*elem_size) ); #endif } myend = libxsmm_timer_tick(); l_cur_thread_time[8*mytid] = libxsmm_timer_duration( mystart, myend ); l_total_thread[8*mytid] += libxsmm_timer_duration( mystart, myend ); #if defined(_OPENMP) #pragma omp barrier #endif #if defined(HANDLE_AMOK) /* checking for amoks is centralized business */ if (mytid == 0) { /* amok check */ amok_detect( l_cur_thread_time, amoks, l_num_threads ); } #if defined(_OPENMP) #pragma omp barrier #endif #endif } } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); printf("...done!\n\n"); /* some timing stats */ time_max = 0.0; time_min = 80000000; time_avg = 0.0; for (i = 0; i < (int)l_num_threads; i++) { if( amoks[8*i] == 0 ) { if( l_total_thread[8*i] > time_max) time_max = l_total_thread[8*i]; if( l_total_thread[8*i] < time_min) time_min = l_total_thread[8*i]; time_avg += l_total_thread[8*i]; } } time_avg = time_avg/((double)(l_num_threads-amoks[8*l_num_threads])); flops_vol = (double)num_quants * (double)mat_a_nnz * (double)num_cfr * 2.0; flops_vol += (double)num_quants * (double)mat_b_nnz * (double)num_cfr * 2.0; flops_vol += (double)num_quants * (double)mat_c_nnz * (double)num_cfr * 2.0; flops_vol += (double)num_modes * (double)mat_st_nnz * (double)num_cfr * 6.0; /* 3 star matrix mul */ printf("%fs time for vol (asm), min %f, max %f, avg %f, #amoks %llu, amok-threads ", l_total, time_min, time_max, time_avg, (unsigned long long)amoks[8*l_num_threads]); for ( i = 0; i < (int)l_num_threads; i++ ) { if ( amoks[8*i] != 0 ) { printf("%i,", i); } } printf("\n"); printf("%f GFLOPS for vol (asm)\n", ((double)num_elems * (double)num_reps * flops_vol) / (l_total * 1.0e9)); printf("%f GiB/s for vol (asm)\n", (double)((double)num_elems * (double)elem_size * 8.0 * 3.0 * (double)num_reps) / (l_total * 1024.0*1024.0*1024.0) ); printf("done!\n\n"); /* some empty lines at the end */ printf("\n\n"); return 0; }
updater_quantile_hist.h
/*! * Copyright 2017-2022 by XGBoost Contributors * \file updater_quantile_hist.h * \brief use quantized feature values to construct a tree * \author Philip Cho, Tianqi Chen, Egor Smirnov */ #ifndef XGBOOST_TREE_UPDATER_QUANTILE_HIST_H_ #define XGBOOST_TREE_UPDATER_QUANTILE_HIST_H_ #include <dmlc/timer.h> #include <rabit/rabit.h> #include <xgboost/tree_updater.h> #include <algorithm> #include <limits> #include <memory> #include <string> #include <utility> #include <vector> #include "xgboost/data.h" #include "xgboost/json.h" #include "hist/evaluate_splits.h" #include "hist/histogram.h" #include "hist/expand_entry.h" #include "hist/param.h" #include "constraints.h" #include "./param.h" #include "./driver.h" #include "./split_evaluator.h" #include "../common/random.h" #include "../common/timer.h" #include "../common/hist_util.h" #include "../common/row_set.h" #include "../common/partition_builder.h" #include "../common/column_matrix.h" namespace xgboost { struct RandomReplace { public: // similar value as for minstd_rand static constexpr uint64_t kBase = 16807; static constexpr uint64_t kMod = static_cast<uint64_t>(1) << 63; using EngineT = std::linear_congruential_engine<uint64_t, kBase, 0, kMod>; /* Right-to-left binary method: https://en.wikipedia.org/wiki/Modular_exponentiation */ static uint64_t SimpleSkip(uint64_t exponent, uint64_t initial_seed, uint64_t base, uint64_t mod) { CHECK_LE(exponent, mod); uint64_t result = 1; while (exponent > 0) { if (exponent % 2 == 1) { result = (result * base) % mod; } base = (base * base) % mod; exponent = exponent >> 1; } // with result we can now find the new seed return (result * initial_seed) % mod; } template<typename Condition, typename ContainerData> static void MakeIf(Condition condition, const typename ContainerData::value_type replace_value, const uint64_t initial_seed, const size_t ibegin, const size_t iend, ContainerData* gpair) { ContainerData& gpair_ref = *gpair; const uint64_t displaced_seed = SimpleSkip(ibegin, initial_seed, kBase, kMod); EngineT eng(displaced_seed); for (size_t i = ibegin; i < iend; ++i) { if (condition(i, eng)) { gpair_ref[i] = replace_value; } } } }; namespace tree { class HistRowPartitioner { // heuristically chosen block size of parallel partitioning static constexpr size_t kPartitionBlockSize = 2048; // worker class that partition a block of rows common::PartitionBuilder<kPartitionBlockSize> partition_builder_; // storage for row index common::RowSetCollection row_set_collection_; /** * \brief Turn split values into discrete bin indices. */ static void FindSplitConditions(const std::vector<CPUExpandEntry>& nodes, const RegTree& tree, const GHistIndexMatrix& gmat, std::vector<int32_t>* split_conditions); /** * \brief Update the row set for new splits specifed by nodes. */ void AddSplitsToRowSet(const std::vector<CPUExpandEntry>& nodes, RegTree const* p_tree); public: bst_row_t base_rowid = 0; public: HistRowPartitioner(size_t n_samples, size_t base_rowid, int32_t n_threads) { row_set_collection_.Clear(); const size_t block_size = n_samples / n_threads + !!(n_samples % n_threads); dmlc::OMPException exc; std::vector<size_t>& row_indices = *row_set_collection_.Data(); row_indices.resize(n_samples); size_t* p_row_indices = row_indices.data(); // parallel initialization o f row indices. (std::iota) #pragma omp parallel num_threads(n_threads) { exc.Run([&]() { const size_t tid = omp_get_thread_num(); const size_t ibegin = tid * block_size; const size_t iend = std::min(static_cast<size_t>(ibegin + block_size), n_samples); for (size_t i = ibegin; i < iend; ++i) { p_row_indices[i] = i + base_rowid; } }); } row_set_collection_.Init(); this->base_rowid = base_rowid; } template <bool any_missing, bool any_cat> void UpdatePosition(GenericParameter const* ctx, GHistIndexMatrix const& gmat, common::ColumnMatrix const& column_matrix, std::vector<CPUExpandEntry> const& nodes, RegTree const* p_tree) { // 1. Find split condition for each split const size_t n_nodes = nodes.size(); std::vector<int32_t> split_conditions; FindSplitConditions(nodes, *p_tree, gmat, &split_conditions); // 2.1 Create a blocked space of size SUM(samples in each node) common::BlockedSpace2d space( n_nodes, [&](size_t node_in_set) { int32_t nid = nodes[node_in_set].nid; return row_set_collection_[nid].Size(); }, kPartitionBlockSize); // 2.2 Initialize the partition builder // allocate buffers for storage intermediate results by each thread partition_builder_.Init(space.Size(), n_nodes, [&](size_t node_in_set) { const int32_t nid = nodes[node_in_set].nid; const size_t size = row_set_collection_[nid].Size(); const size_t n_tasks = size / kPartitionBlockSize + !!(size % kPartitionBlockSize); return n_tasks; }); CHECK_EQ(base_rowid, gmat.base_rowid); // 2.3 Split elements of row_set_collection_ to left and right child-nodes for each node // Store results in intermediate buffers from partition_builder_ common::ParallelFor2d(space, ctx->Threads(), [&](size_t node_in_set, common::Range1d r) { size_t begin = r.begin(); const int32_t nid = nodes[node_in_set].nid; const size_t task_id = partition_builder_.GetTaskIdx(node_in_set, begin); partition_builder_.AllocateForTask(task_id); switch (column_matrix.GetTypeSize()) { case common::kUint8BinsTypeSize: partition_builder_.template Partition<uint8_t, any_missing, any_cat>( node_in_set, nid, r, split_conditions[node_in_set], gmat, column_matrix, *p_tree, row_set_collection_[nid].begin); break; case common::kUint16BinsTypeSize: partition_builder_.template Partition<uint16_t, any_missing, any_cat>( node_in_set, nid, r, split_conditions[node_in_set], gmat, column_matrix, *p_tree, row_set_collection_[nid].begin); break; case common::kUint32BinsTypeSize: partition_builder_.template Partition<uint32_t, any_missing, any_cat>( node_in_set, nid, r, split_conditions[node_in_set], gmat, column_matrix, *p_tree, row_set_collection_[nid].begin); break; default: // no default behavior CHECK(false) << column_matrix.GetTypeSize(); } }); // 3. Compute offsets to copy blocks of row-indexes // from partition_builder_ to row_set_collection_ partition_builder_.CalculateRowOffsets(); // 4. Copy elements from partition_builder_ to row_set_collection_ back // with updated row-indexes for each tree-node common::ParallelFor2d(space, ctx->Threads(), [&](size_t node_in_set, common::Range1d r) { const int32_t nid = nodes[node_in_set].nid; partition_builder_.MergeToArray(node_in_set, r.begin(), const_cast<size_t*>(row_set_collection_[nid].begin)); }); // 5. Add info about splits into row_set_collection_ AddSplitsToRowSet(nodes, p_tree); } auto const& Partitions() const { return row_set_collection_; } size_t Size() const { return std::distance(row_set_collection_.begin(), row_set_collection_.end()); } auto& operator[](bst_node_t nidx) { return row_set_collection_[nidx]; } auto const& operator[](bst_node_t nidx) const { return row_set_collection_[nidx]; } }; inline BatchParam HistBatch(TrainParam const& param) { return {param.max_bin, param.sparse_threshold}; } /*! \brief construct a tree using quantized feature values */ class QuantileHistMaker: public TreeUpdater { public: explicit QuantileHistMaker(ObjInfo task) : task_{task} { updater_monitor_.Init("QuantileHistMaker"); } void Configure(const Args& args) override; void Update(HostDeviceVector<GradientPair>* gpair, DMatrix* dmat, const std::vector<RegTree*>& trees) override; bool UpdatePredictionCache(const DMatrix *data, linalg::VectorView<float> out_preds) override; void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); FromJson(config.at("train_param"), &this->param_); try { FromJson(config.at("cpu_hist_train_param"), &this->hist_maker_param_); } catch (std::out_of_range&) { // XGBoost model is from 1.1.x, so 'cpu_hist_train_param' is missing. // We add this compatibility check because it's just recently that we (developers) began // persuade R users away from using saveRDS() for model serialization. Hopefully, one day, // everyone will be using xgb.save(). LOG(WARNING) << "Attempted to load internal configuration for a model file that was generated " << "by a previous version of XGBoost. A likely cause for this warning is that the model " << "was saved with saveRDS() in R or pickle.dump() in Python. We strongly ADVISE AGAINST " << "using saveRDS() or pickle.dump() so that the model remains accessible in current and " << "upcoming XGBoost releases. Please use xgb.save() instead to preserve models for the " << "long term. For more details and explanation, see " << "https://xgboost.readthedocs.io/en/latest/tutorials/saving_model.html"; this->hist_maker_param_.UpdateAllowUnknown(Args{}); } } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["train_param"] = ToJson(param_); out["cpu_hist_train_param"] = ToJson(hist_maker_param_); } char const* Name() const override { return "grow_quantile_histmaker"; } protected: CPUHistMakerTrainParam hist_maker_param_; // training parameter TrainParam param_; // column accessor common::ColumnMatrix column_matrix_; DMatrix const* p_last_dmat_ {nullptr}; bool is_gmat_initialized_ {false}; // actual builder that runs the algorithm template<typename GradientSumT> struct Builder { public: using GradientPairT = xgboost::detail::GradientPairInternal<GradientSumT>; // constructor explicit Builder(const size_t n_trees, const TrainParam& param, std::unique_ptr<TreeUpdater> pruner, DMatrix const* fmat, ObjInfo task, GenericParameter const* ctx) : n_trees_(n_trees), param_(param), pruner_(std::move(pruner)), p_last_fmat_(fmat), histogram_builder_{new HistogramBuilder<GradientSumT, CPUExpandEntry>}, task_{task}, ctx_{ctx} { builder_monitor_.Init("Quantile::Builder"); } // update one tree, growing void Update(const GHistIndexMatrix& gmat, const common::ColumnMatrix& column_matrix, HostDeviceVector<GradientPair>* gpair, DMatrix* p_fmat, RegTree* p_tree); bool UpdatePredictionCache(const DMatrix* data, linalg::VectorView<float> out_preds); protected: // initialize temp data structure void InitData(const GHistIndexMatrix& gmat, const DMatrix& fmat, const RegTree& tree, std::vector<GradientPair>* gpair); size_t GetNumberOfTrees(); void InitSampling(const DMatrix& fmat, std::vector<GradientPair>* gpair); template <bool any_missing> void InitRoot(DMatrix* p_fmat, RegTree *p_tree, const std::vector<GradientPair> &gpair_h, int *num_leaves, std::vector<CPUExpandEntry> *expand); // Split nodes to 2 sets depending on amount of rows in each node // Histograms for small nodes will be built explicitly // Histograms for big nodes will be built by 'Subtraction Trick' void SplitSiblings(const std::vector<CPUExpandEntry>& nodes, std::vector<CPUExpandEntry>* nodes_to_evaluate, RegTree *p_tree); void AddSplitsToTree(const std::vector<CPUExpandEntry>& expand, RegTree *p_tree, int *num_leaves, std::vector<CPUExpandEntry>* nodes_for_apply_split); template <bool any_missing> void ExpandTree(const GHistIndexMatrix& gmat, const common::ColumnMatrix& column_matrix, DMatrix* p_fmat, RegTree* p_tree, const std::vector<GradientPair>& gpair_h); // --data fields-- const size_t n_trees_; const TrainParam& param_; std::shared_ptr<common::ColumnSampler> column_sampler_{ std::make_shared<common::ColumnSampler>()}; std::vector<GradientPair> gpair_local_; /*! \brief feature with least # of bins. to be used for dense specialization of InitNewNode() */ uint32_t fid_least_bins_; std::unique_ptr<TreeUpdater> pruner_; std::unique_ptr<HistEvaluator<GradientSumT, CPUExpandEntry>> evaluator_; // Right now there's only 1 partitioner in this vector, when external memory is fully // supported we will have number of partitioners equal to number of pages. std::vector<HistRowPartitioner> partitioner_; // back pointers to tree and data matrix const RegTree* p_last_tree_{nullptr}; DMatrix const* const p_last_fmat_; DMatrix* p_last_fmat_mutable_; // key is the node id which should be calculated by Subtraction Trick, value is the node which // provides the evidence for subtraction std::vector<CPUExpandEntry> nodes_for_subtraction_trick_; // list of nodes whose histograms would be built explicitly. std::vector<CPUExpandEntry> nodes_for_explicit_hist_build_; enum class DataLayout { kDenseDataZeroBased, kDenseDataOneBased, kSparseData }; DataLayout data_layout_; std::unique_ptr<HistogramBuilder<GradientSumT, CPUExpandEntry>> histogram_builder_; ObjInfo task_; // Context for number of threads GenericParameter const* ctx_; common::Monitor builder_monitor_; }; common::Monitor updater_monitor_; template<typename GradientSumT> void SetBuilder(const size_t n_trees, std::unique_ptr<Builder<GradientSumT>>*, DMatrix *dmat); template<typename GradientSumT> void CallBuilderUpdate(const std::unique_ptr<Builder<GradientSumT>>& builder, HostDeviceVector<GradientPair> *gpair, DMatrix *dmat, GHistIndexMatrix const& gmat, const std::vector<RegTree *> &trees); protected: std::unique_ptr<Builder<float>> float_builder_; std::unique_ptr<Builder<double>> double_builder_; std::unique_ptr<TreeUpdater> pruner_; ObjInfo task_; }; } // namespace tree } // namespace xgboost #endif // XGBOOST_TREE_UPDATER_QUANTILE_HIST_H_
thread_demo.c
/* Generated by Cython 0.27.3 */ #define PY_SSIZE_T_CLEAN #include "Python.h" #ifndef Py_PYTHON_H #error Python headers needed to compile C extensions, please install development version of Python. #elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) #error Cython requires Python 2.6+ or Python 3.3+. #else #define CYTHON_ABI "0_27_3" #define CYTHON_FUTURE_DIVISION 0 #include <stddef.h> #ifndef offsetof #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) #endif #if !defined(WIN32) && !defined(MS_WINDOWS) #ifndef __stdcall #define __stdcall #endif #ifndef __cdecl #define __cdecl #endif #ifndef __fastcall #define __fastcall #endif #endif #ifndef DL_IMPORT #define DL_IMPORT(t) t #endif #ifndef DL_EXPORT #define DL_EXPORT(t) t #endif #define __PYX_COMMA , #ifndef HAVE_LONG_LONG #if PY_VERSION_HEX >= 0x02070000 #define HAVE_LONG_LONG #endif #endif #ifndef PY_LONG_LONG #define PY_LONG_LONG LONG_LONG #endif #ifndef Py_HUGE_VAL #define Py_HUGE_VAL HUGE_VAL #endif #ifdef PYPY_VERSION #define CYTHON_COMPILING_IN_PYPY 1 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 0 #undef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 0 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #if PY_VERSION_HEX < 0x03050000 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #undef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #undef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 1 #undef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 0 #undef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 0 #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #elif defined(PYSTON_VERSION) #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 1 #define CYTHON_COMPILING_IN_CPYTHON 0 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #undef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 0 #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #undef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 0 #undef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 0 #undef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT 0 #undef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE 0 #else #define CYTHON_COMPILING_IN_PYPY 0 #define CYTHON_COMPILING_IN_PYSTON 0 #define CYTHON_COMPILING_IN_CPYTHON 1 #ifndef CYTHON_USE_TYPE_SLOTS #define CYTHON_USE_TYPE_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYTYPE_LOOKUP #define CYTHON_USE_PYTYPE_LOOKUP 0 #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) #define CYTHON_USE_PYTYPE_LOOKUP 1 #endif #if PY_MAJOR_VERSION < 3 #undef CYTHON_USE_ASYNC_SLOTS #define CYTHON_USE_ASYNC_SLOTS 0 #elif !defined(CYTHON_USE_ASYNC_SLOTS) #define CYTHON_USE_ASYNC_SLOTS 1 #endif #if PY_VERSION_HEX < 0x02070000 #undef CYTHON_USE_PYLONG_INTERNALS #define CYTHON_USE_PYLONG_INTERNALS 0 #elif !defined(CYTHON_USE_PYLONG_INTERNALS) #define CYTHON_USE_PYLONG_INTERNALS 1 #endif #ifndef CYTHON_USE_PYLIST_INTERNALS #define CYTHON_USE_PYLIST_INTERNALS 1 #endif #ifndef CYTHON_USE_UNICODE_INTERNALS #define CYTHON_USE_UNICODE_INTERNALS 1 #endif #if PY_VERSION_HEX < 0x030300F0 #undef CYTHON_USE_UNICODE_WRITER #define CYTHON_USE_UNICODE_WRITER 0 #elif !defined(CYTHON_USE_UNICODE_WRITER) #define CYTHON_USE_UNICODE_WRITER 1 #endif #ifndef CYTHON_AVOID_BORROWED_REFS #define CYTHON_AVOID_BORROWED_REFS 0 #endif #ifndef CYTHON_ASSUME_SAFE_MACROS #define CYTHON_ASSUME_SAFE_MACROS 1 #endif #ifndef CYTHON_UNPACK_METHODS #define CYTHON_UNPACK_METHODS 1 #endif #ifndef CYTHON_FAST_THREAD_STATE #define CYTHON_FAST_THREAD_STATE 1 #endif #ifndef CYTHON_FAST_PYCALL #define CYTHON_FAST_PYCALL 1 #endif #ifndef CYTHON_PEP489_MULTI_PHASE_INIT #define CYTHON_PEP489_MULTI_PHASE_INIT (0 && PY_VERSION_HEX >= 0x03050000) #endif #ifndef CYTHON_USE_TP_FINALIZE #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) #endif #endif #if !defined(CYTHON_FAST_PYCCALL) #define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) #endif #if CYTHON_USE_PYLONG_INTERNALS #include "longintrepr.h" #undef SHIFT #undef BASE #undef MASK #endif #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) #define Py_OptimizeFlag 0 #endif #define __PYX_BUILD_PY_SSIZE_T "n" #define CYTHON_FORMAT_SSIZE_T "z" #if PY_MAJOR_VERSION < 3 #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyClass_Type #else #define __Pyx_BUILTIN_MODULE_NAME "builtins" #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) #define __Pyx_DefaultClassType PyType_Type #endif #ifndef Py_TPFLAGS_CHECKTYPES #define Py_TPFLAGS_CHECKTYPES 0 #endif #ifndef Py_TPFLAGS_HAVE_INDEX #define Py_TPFLAGS_HAVE_INDEX 0 #endif #ifndef Py_TPFLAGS_HAVE_NEWBUFFER #define Py_TPFLAGS_HAVE_NEWBUFFER 0 #endif #ifndef Py_TPFLAGS_HAVE_FINALIZE #define Py_TPFLAGS_HAVE_FINALIZE 0 #endif #if PY_VERSION_HEX < 0x030700A0 || !defined(METH_FASTCALL) #ifndef METH_FASTCALL #define METH_FASTCALL 0x80 #endif typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject **args, Py_ssize_t nargs); typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject **args, Py_ssize_t nargs, PyObject *kwnames); #else #define __Pyx_PyCFunctionFast _PyCFunctionFast #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords #endif #if CYTHON_FAST_PYCCALL #define __Pyx_PyFastCFunction_Check(func)\ ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))))) #else #define __Pyx_PyFastCFunction_Check(func) 0 #endif #if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #elif PY_VERSION_HEX >= 0x03060000 #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() #elif PY_VERSION_HEX >= 0x03000000 #define __Pyx_PyThreadState_Current PyThreadState_GET() #else #define __Pyx_PyThreadState_Current _PyThreadState_Current #endif #if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) #define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) #else #define __Pyx_PyDict_NewPresized(n) PyDict_New() #endif #if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) #else #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) #endif #if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) #define CYTHON_PEP393_ENABLED 1 #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ 0 : _PyUnicode_Ready((PyObject *)(op))) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) #else #define CYTHON_PEP393_ENABLED 0 #define PyUnicode_1BYTE_KIND 1 #define PyUnicode_2BYTE_KIND 2 #define PyUnicode_4BYTE_KIND 4 #define __Pyx_PyUnicode_READY(op) (0) #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) #endif #if CYTHON_COMPILING_IN_PYPY #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) #else #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) #endif #if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) #define PyObject_Malloc(s) PyMem_Malloc(s) #define PyObject_Free(p) PyMem_Free(p) #define PyObject_Realloc(p) PyMem_Realloc(p) #endif #if CYTHON_COMPILING_IN_PYSTON #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) #else #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) #endif #define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) #define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None)) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) #else #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) #endif #if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) #define PyObject_ASCII(o) PyObject_Repr(o) #endif #if PY_MAJOR_VERSION >= 3 #define PyBaseString_Type PyUnicode_Type #define PyStringObject PyUnicodeObject #define PyString_Type PyUnicode_Type #define PyString_Check PyUnicode_Check #define PyString_CheckExact PyUnicode_CheckExact #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) #else #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) #endif #ifndef PySet_CheckExact #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) #endif #define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) #if PY_MAJOR_VERSION >= 3 #define PyIntObject PyLongObject #define PyInt_Type PyLong_Type #define PyInt_Check(op) PyLong_Check(op) #define PyInt_CheckExact(op) PyLong_CheckExact(op) #define PyInt_FromString PyLong_FromString #define PyInt_FromUnicode PyLong_FromUnicode #define PyInt_FromLong PyLong_FromLong #define PyInt_FromSize_t PyLong_FromSize_t #define PyInt_FromSsize_t PyLong_FromSsize_t #define PyInt_AsLong PyLong_AsLong #define PyInt_AS_LONG PyLong_AS_LONG #define PyInt_AsSsize_t PyLong_AsSsize_t #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask #define PyNumber_Int PyNumber_Long #endif #if PY_MAJOR_VERSION >= 3 #define PyBoolObject PyLongObject #endif #if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY #ifndef PyUnicode_InternFromString #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) #endif #endif #if PY_VERSION_HEX < 0x030200A4 typedef long Py_hash_t; #define __Pyx_PyInt_FromHash_t PyInt_FromLong #define __Pyx_PyInt_AsHash_t PyInt_AsLong #else #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t #endif #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : PyInstanceMethod_New(func)) #else #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) #endif #ifndef __has_attribute #define __has_attribute(x) 0 #endif #ifndef __has_cpp_attribute #define __has_cpp_attribute(x) 0 #endif #if CYTHON_USE_ASYNC_SLOTS #if PY_VERSION_HEX >= 0x030500B1 #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) #else #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) #endif #else #define __Pyx_PyType_AsAsync(obj) NULL #endif #ifndef __Pyx_PyAsyncMethodsStruct typedef struct { unaryfunc am_await; unaryfunc am_aiter; unaryfunc am_anext; } __Pyx_PyAsyncMethodsStruct; #endif #ifndef CYTHON_RESTRICT #if defined(__GNUC__) #define CYTHON_RESTRICT __restrict__ #elif defined(_MSC_VER) && _MSC_VER >= 1400 #define CYTHON_RESTRICT __restrict #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_RESTRICT restrict #else #define CYTHON_RESTRICT #endif #endif #ifndef CYTHON_UNUSED # if defined(__GNUC__) # if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif # elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) # define CYTHON_UNUSED __attribute__ ((__unused__)) # else # define CYTHON_UNUSED # endif #endif #ifndef CYTHON_MAYBE_UNUSED_VAR # if defined(__cplusplus) template<class T> void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } # else # define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) # endif #endif #ifndef CYTHON_NCP_UNUSED # if CYTHON_COMPILING_IN_CPYTHON # define CYTHON_NCP_UNUSED # else # define CYTHON_NCP_UNUSED CYTHON_UNUSED # endif #endif #define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) #ifdef _MSC_VER #ifndef _MSC_STDINT_H_ #if _MSC_VER < 1300 typedef unsigned char uint8_t; typedef unsigned int uint32_t; #else typedef unsigned __int8 uint8_t; typedef unsigned __int32 uint32_t; #endif #endif #else #include <stdint.h> #endif #ifndef CYTHON_FALLTHROUGH #if defined(__cplusplus) && __cplusplus >= 201103L #if __has_cpp_attribute(fallthrough) #define CYTHON_FALLTHROUGH [[fallthrough]] #elif __has_cpp_attribute(clang::fallthrough) #define CYTHON_FALLTHROUGH [[clang::fallthrough]] #elif __has_cpp_attribute(gnu::fallthrough) #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] #endif #endif #ifndef CYTHON_FALLTHROUGH #if __has_attribute(fallthrough) #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) #else #define CYTHON_FALLTHROUGH #endif #endif #if defined(__clang__ ) && defined(__apple_build_version__) #if __apple_build_version__ < 7000000 #undef CYTHON_FALLTHROUGH #define CYTHON_FALLTHROUGH #endif #endif #endif #ifndef CYTHON_INLINE #if defined(__clang__) #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) #elif defined(__GNUC__) #define CYTHON_INLINE __inline__ #elif defined(_MSC_VER) #define CYTHON_INLINE __inline #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define CYTHON_INLINE inline #else #define CYTHON_INLINE #endif #endif #if defined(WIN32) || defined(MS_WINDOWS) #define _USE_MATH_DEFINES #endif #include <math.h> #ifdef NAN #define __PYX_NAN() ((float) NAN) #else static CYTHON_INLINE float __PYX_NAN() { float value; memset(&value, 0xFF, sizeof(value)); return value; } #endif #if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) #define __Pyx_truncl trunc #else #define __Pyx_truncl truncl #endif #define __PYX_ERR(f_index, lineno, Ln_error) \ { \ __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ } #ifndef __PYX_EXTERN_C #ifdef __cplusplus #define __PYX_EXTERN_C extern "C" #else #define __PYX_EXTERN_C extern #endif #endif #define __PYX_HAVE__thread_demo #define __PYX_HAVE_API__thread_demo #include <math.h> #include "pythread.h" #include <string.h> #include <stdlib.h> #include <stdio.h> #include "pystate.h" #ifdef _OPENMP #include <omp.h> #endif /* _OPENMP */ #if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) #define CYTHON_WITHOUT_ASSERTIONS #endif typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; #define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 #define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT 0 #define __PYX_DEFAULT_STRING_ENCODING "" #define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString #define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #define __Pyx_uchar_cast(c) ((unsigned char)c) #define __Pyx_long_cast(x) ((long)x) #define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ (sizeof(type) < sizeof(Py_ssize_t)) ||\ (sizeof(type) > sizeof(Py_ssize_t) &&\ likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX) &&\ (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ v == (type)PY_SSIZE_T_MIN))) ||\ (sizeof(type) == sizeof(Py_ssize_t) &&\ (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ v == (type)PY_SSIZE_T_MAX))) ) #if defined (__cplusplus) && __cplusplus >= 201103L #include <cstdlib> #define __Pyx_sst_abs(value) std::abs(value) #elif SIZEOF_INT >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) abs(value) #elif SIZEOF_LONG >= SIZEOF_SIZE_T #define __Pyx_sst_abs(value) labs(value) #elif defined (_MSC_VER) #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L #define __Pyx_sst_abs(value) llabs(value) #elif defined (__GNUC__) #define __Pyx_sst_abs(value) __builtin_llabs(value) #else #define __Pyx_sst_abs(value) ((value<0) ? -value : value) #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); #define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) #define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) #define __Pyx_PyBytes_FromString PyBytes_FromString #define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); #if PY_MAJOR_VERSION < 3 #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize #else #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize #endif #define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) #define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) #define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) #define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) #define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) #define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) #define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) #define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { const Py_UNICODE *u_end = u; while (*u_end++) ; return (size_t)(u_end - u - 1); } #define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) #define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode #define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode #define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) #define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) #define __Pyx_PyBool_FromLong(b) ((b) ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False)) static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); #define __Pyx_PySequence_Tuple(obj)\ (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); #if CYTHON_ASSUME_SAFE_MACROS #define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) #else #define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) #endif #define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) #else #define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) #endif #define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII static int __Pyx_sys_getdefaultencoding_not_ascii; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; PyObject* ascii_chars_u = NULL; PyObject* ascii_chars_b = NULL; const char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; if (strcmp(default_encoding_c, "ascii") == 0) { __Pyx_sys_getdefaultencoding_not_ascii = 0; } else { char ascii_chars[128]; int c; for (c = 0; c < 128; c++) { ascii_chars[c] = c; } __Pyx_sys_getdefaultencoding_not_ascii = 1; ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); if (!ascii_chars_u) goto bad; ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { PyErr_Format( PyExc_ValueError, "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", default_encoding_c); goto bad; } Py_DECREF(ascii_chars_u); Py_DECREF(ascii_chars_b); } Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); Py_XDECREF(ascii_chars_u); Py_XDECREF(ascii_chars_b); return -1; } #endif #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) #else #define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) #if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT static char* __PYX_DEFAULT_STRING_ENCODING; static int __Pyx_init_sys_getdefaultencoding_params(void) { PyObject* sys; PyObject* default_encoding = NULL; char* default_encoding_c; sys = PyImport_ImportModule("sys"); if (!sys) goto bad; default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); Py_DECREF(sys); if (!default_encoding) goto bad; default_encoding_c = PyBytes_AsString(default_encoding); if (!default_encoding_c) goto bad; __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c)); if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); Py_DECREF(default_encoding); return 0; bad: Py_XDECREF(default_encoding); return -1; } #endif #endif /* Test for GCC > 2.95 */ #if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #else /* !__GNUC__ or GCC < 2.95 */ #define likely(x) (x) #define unlikely(x) (x) #endif /* __GNUC__ */ static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } static PyObject *__pyx_m = NULL; static PyObject *__pyx_d; static PyObject *__pyx_b; static PyObject *__pyx_cython_runtime; static PyObject *__pyx_empty_tuple; static PyObject *__pyx_empty_bytes; static PyObject *__pyx_empty_unicode; static int __pyx_lineno; static int __pyx_clineno = 0; static const char * __pyx_cfilenm= __FILE__; static const char *__pyx_filename; static const char *__pyx_f[] = { "thread_demo.pyx", "stringsource", }; /* NoFastGil.proto */ #define __Pyx_PyGILState_Ensure PyGILState_Ensure #define __Pyx_PyGILState_Release PyGILState_Release #define __Pyx_FastGIL_Remember() #define __Pyx_FastGIL_Forget() #define __Pyx_FastGilFuncInit() /* ForceInitThreads.proto */ #ifndef __PYX_FORCE_INIT_THREADS #define __PYX_FORCE_INIT_THREADS 0 #endif /* MemviewSliceStruct.proto */ struct __pyx_memoryview_obj; typedef struct { struct __pyx_memoryview_obj *memview; char *data; Py_ssize_t shape[8]; Py_ssize_t strides[8]; Py_ssize_t suboffsets[8]; } __Pyx_memviewslice; #define __Pyx_MemoryView_Len(m) (m.shape[0]) /* Atomics.proto */ #include <pythread.h> #ifndef CYTHON_ATOMICS #define CYTHON_ATOMICS 1 #endif #define __pyx_atomic_int_type int #if CYTHON_ATOMICS && __GNUC__ >= 4 && (__GNUC_MINOR__ > 1 ||\ (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL >= 2)) &&\ !defined(__i386__) #define __pyx_atomic_incr_aligned(value, lock) __sync_fetch_and_add(value, 1) #define __pyx_atomic_decr_aligned(value, lock) __sync_fetch_and_sub(value, 1) #ifdef __PYX_DEBUG_ATOMICS #warning "Using GNU atomics" #endif #elif CYTHON_ATOMICS && defined(_MSC_VER) && 0 #include <Windows.h> #undef __pyx_atomic_int_type #define __pyx_atomic_int_type LONG #define __pyx_atomic_incr_aligned(value, lock) InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #pragma message ("Using MSVC atomics") #endif #elif CYTHON_ATOMICS && (defined(__ICC) || defined(__INTEL_COMPILER)) && 0 #define __pyx_atomic_incr_aligned(value, lock) _InterlockedIncrement(value) #define __pyx_atomic_decr_aligned(value, lock) _InterlockedDecrement(value) #ifdef __PYX_DEBUG_ATOMICS #warning "Using Intel atomics" #endif #else #undef CYTHON_ATOMICS #define CYTHON_ATOMICS 0 #ifdef __PYX_DEBUG_ATOMICS #warning "Not using atomics" #endif #endif typedef volatile __pyx_atomic_int_type __pyx_atomic_int; #if CYTHON_ATOMICS #define __pyx_add_acquisition_count(memview)\ __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview), memview->lock) #else #define __pyx_add_acquisition_count(memview)\ __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #define __pyx_sub_acquisition_count(memview)\ __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) #endif /* BufferFormatStructs.proto */ #define IS_UNSIGNED(type) (((type) -1) > 0) struct __Pyx_StructField_; #define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) typedef struct { const char* name; struct __Pyx_StructField_* fields; size_t size; size_t arraysize[8]; int ndim; char typegroup; char is_unsigned; int flags; } __Pyx_TypeInfo; typedef struct __Pyx_StructField_ { __Pyx_TypeInfo* type; const char* name; size_t offset; } __Pyx_StructField; typedef struct { __Pyx_StructField* field; size_t parent_offset; } __Pyx_BufFmt_StackElem; typedef struct { __Pyx_StructField root; __Pyx_BufFmt_StackElem* head; size_t fmt_offset; size_t new_count, enc_count; size_t struct_alignment; int is_complex; char enc_type; char new_packmode; char enc_packmode; char is_valid_array; } __Pyx_BufFmt_Context; /*--- Type declarations ---*/ struct __pyx_array_obj; struct __pyx_MemviewEnum_obj; struct __pyx_memoryview_obj; struct __pyx_memoryviewslice_obj; /* "View.MemoryView":103 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_array_obj { PyObject_HEAD struct __pyx_vtabstruct_array *__pyx_vtab; char *data; Py_ssize_t len; char *format; int ndim; Py_ssize_t *_shape; Py_ssize_t *_strides; Py_ssize_t itemsize; PyObject *mode; PyObject *_format; void (*callback_free_data)(void *); int free_data; int dtype_is_object; }; /* "View.MemoryView":277 * * @cname('__pyx_MemviewEnum') * cdef class Enum(object): # <<<<<<<<<<<<<< * cdef object name * def __init__(self, name): */ struct __pyx_MemviewEnum_obj { PyObject_HEAD PyObject *name; }; /* "View.MemoryView":328 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_memoryview_obj { PyObject_HEAD struct __pyx_vtabstruct_memoryview *__pyx_vtab; PyObject *obj; PyObject *_size; PyObject *_array_interface; PyThread_type_lock lock; __pyx_atomic_int acquisition_count[2]; __pyx_atomic_int *acquisition_count_aligned_p; Py_buffer view; int flags; int dtype_is_object; __Pyx_TypeInfo *typeinfo; }; /* "View.MemoryView":953 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_memoryviewslice_obj { struct __pyx_memoryview_obj __pyx_base; __Pyx_memviewslice from_slice; PyObject *from_object; PyObject *(*to_object_func)(char *); int (*to_dtype_func)(char *, PyObject *); }; /* "View.MemoryView":103 * * @cname("__pyx_array") * cdef class array: # <<<<<<<<<<<<<< * * cdef: */ struct __pyx_vtabstruct_array { PyObject *(*get_memview)(struct __pyx_array_obj *); }; static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; /* "View.MemoryView":328 * * @cname('__pyx_memoryview') * cdef class memoryview(object): # <<<<<<<<<<<<<< * * cdef object obj */ struct __pyx_vtabstruct_memoryview { char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); }; static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; /* "View.MemoryView":953 * * @cname('__pyx_memoryviewslice') * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< * "Internal class for passing memoryview slices to Python" * */ struct __pyx_vtabstruct__memoryviewslice { struct __pyx_vtabstruct_memoryview __pyx_base; }; static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; /* --- Runtime support code (head) --- */ /* Refnanny.proto */ #ifndef CYTHON_REFNANNY #define CYTHON_REFNANNY 0 #endif #if CYTHON_REFNANNY typedef struct { void (*INCREF)(void*, PyObject*, int); void (*DECREF)(void*, PyObject*, int); void (*GOTREF)(void*, PyObject*, int); void (*GIVEREF)(void*, PyObject*, int); void* (*SetupContext)(const char*, int, const char*); void (*FinishContext)(void**); } __Pyx_RefNannyAPIStruct; static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; #ifdef WITH_THREAD #define __Pyx_RefNannySetupContext(name, acquire_gil)\ if (acquire_gil) {\ PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ PyGILState_Release(__pyx_gilstate_save);\ } else {\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ } #else #define __Pyx_RefNannySetupContext(name, acquire_gil)\ __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) #endif #define __Pyx_RefNannyFinishContext()\ __Pyx_RefNanny->FinishContext(&__pyx_refnanny) #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) #else #define __Pyx_RefNannyDeclarations #define __Pyx_RefNannySetupContext(name, acquire_gil) #define __Pyx_RefNannyFinishContext() #define __Pyx_INCREF(r) Py_INCREF(r) #define __Pyx_DECREF(r) Py_DECREF(r) #define __Pyx_GOTREF(r) #define __Pyx_GIVEREF(r) #define __Pyx_XINCREF(r) Py_XINCREF(r) #define __Pyx_XDECREF(r) Py_XDECREF(r) #define __Pyx_XGOTREF(r) #define __Pyx_XGIVEREF(r) #endif #define __Pyx_XDECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_XDECREF(tmp);\ } while (0) #define __Pyx_DECREF_SET(r, v) do {\ PyObject *tmp = (PyObject *) r;\ r = v; __Pyx_DECREF(tmp);\ } while (0) #define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) #define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) /* PyObjectGetAttrStr.proto */ #if CYTHON_USE_TYPE_SLOTS static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { PyTypeObject* tp = Py_TYPE(obj); if (likely(tp->tp_getattro)) return tp->tp_getattro(obj, attr_name); #if PY_MAJOR_VERSION < 3 if (likely(tp->tp_getattr)) return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); #endif return PyObject_GetAttr(obj, attr_name); } #else #define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) #endif /* GetBuiltinName.proto */ static PyObject *__Pyx_GetBuiltinName(PyObject *name); /* GetModuleGlobalName.proto */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name); /* PyCFunctionFastCall.proto */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); #else #define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) #endif /* PyFunctionFastCall.proto */ #if CYTHON_FAST_PYCALL #define __Pyx_PyFunction_FastCall(func, args, nargs)\ __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs); #else #define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) #endif #endif /* PyObjectCall.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); #else #define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) #endif /* PyObjectCallMethO.proto */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); #endif /* PyObjectCallOneArg.proto */ static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); /* BufferIndexError.proto */ static void __Pyx_RaiseBufferIndexError(int axis); /* BufferIndexErrorNogil.proto */ static void __Pyx_RaiseBufferIndexErrorNogil(int axis); /* PyThreadStateGet.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; #define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; #define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type #else #define __Pyx_PyThreadState_declare #define __Pyx_PyThreadState_assign #define __Pyx_PyErr_Occurred() PyErr_Occurred() #endif /* PyErrFetchRestore.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) #define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) #define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) #define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) #else #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #endif #else #define __Pyx_PyErr_Clear() PyErr_Clear() #define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) #define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) #define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) #define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) #endif /* MemviewSliceInit.proto */ #define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d #define __Pyx_MEMVIEW_DIRECT 1 #define __Pyx_MEMVIEW_PTR 2 #define __Pyx_MEMVIEW_FULL 4 #define __Pyx_MEMVIEW_CONTIG 8 #define __Pyx_MEMVIEW_STRIDED 16 #define __Pyx_MEMVIEW_FOLLOW 32 #define __Pyx_IS_C_CONTIG 1 #define __Pyx_IS_F_CONTIG 2 static int __Pyx_init_memviewslice( struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference); static CYTHON_INLINE int __pyx_add_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( __pyx_atomic_int *acquisition_count, PyThread_type_lock lock); #define __pyx_get_slice_count_pointer(memview) (memview->acquisition_count_aligned_p) #define __pyx_get_slice_count(memview) (*__pyx_get_slice_count_pointer(memview)) #define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) #define __PYX_XDEC_MEMVIEW(slice, have_gil) __Pyx_XDEC_MEMVIEW(slice, have_gil, __LINE__) static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *, int, int); /* RaiseArgTupleInvalid.proto */ static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); /* RaiseDoubleKeywords.proto */ static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); /* ParseKeywords.proto */ static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ const char* function_name); /* ArgTypeTest.proto */ #define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ ((likely((Py_TYPE(obj) == type) | (none_allowed && (obj == Py_None)))) ? 1 :\ __Pyx__ArgTypeTest(obj, type, name, exact)) static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); /* RaiseException.proto */ static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); /* IncludeStringH.proto */ #include <string.h> /* BytesEquals.proto */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); /* UnicodeEquals.proto */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); /* StrEquals.proto */ #if PY_MAJOR_VERSION >= 3 #define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals #else #define __Pyx_PyString_Equals __Pyx_PyBytes_Equals #endif /* None.proto */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); /* UnaryNegOverflows.proto */ #define UNARY_NEG_WOULD_OVERFLOW(x)\ (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ /* GetAttr.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); /* decode_c_string_utf16.proto */ static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 0; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16LE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = -1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } static CYTHON_INLINE PyObject *__Pyx_PyUnicode_DecodeUTF16BE(const char *s, Py_ssize_t size, const char *errors) { int byteorder = 1; return PyUnicode_DecodeUTF16(s, size, errors, &byteorder); } /* decode_c_string.proto */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)); /* PyErrExceptionMatches.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); #else #define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) #endif /* GetAttr3.proto */ static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); /* RaiseTooManyValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); /* RaiseNeedMoreValuesToUnpack.proto */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); /* RaiseNoneIterError.proto */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); /* ExtTypeTest.proto */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); /* SaveResetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); #else #define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) #define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) #endif /* GetException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); #endif /* SwapException.proto */ #if CYTHON_FAST_THREAD_STATE #define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); #endif /* Import.proto */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); /* GetItemInt.proto */ #define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ __Pyx_GetItemInt_Generic(o, to_py_func(i)))) #define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); #define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, int wraparound, int boundscheck); static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, int wraparound, int boundscheck); static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ /* ListCompAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len)) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) #endif /* PyIntBinop.proto */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, long intval, int inplace); #else #define __Pyx_PyInt_AddObjC(op1, op2, intval, inplace)\ (inplace ? PyNumber_InPlaceAdd(op1, op2) : PyNumber_Add(op1, op2)) #endif /* ListExtend.proto */ static CYTHON_INLINE int __Pyx_PyList_Extend(PyObject* L, PyObject* v) { #if CYTHON_COMPILING_IN_CPYTHON PyObject* none = _PyList_Extend((PyListObject*)L, v); if (unlikely(!none)) return -1; Py_DECREF(none); return 0; #else return PyList_SetSlice(L, PY_SSIZE_T_MAX, PY_SSIZE_T_MAX, v); #endif } /* ListAppend.proto */ #if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { PyListObject* L = (PyListObject*) list; Py_ssize_t len = Py_SIZE(list); if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { Py_INCREF(x); PyList_SET_ITEM(list, len, x); Py_SIZE(list) = len+1; return 0; } return PyList_Append(list, x); } #else #define __Pyx_PyList_Append(L,x) PyList_Append(L,x) #endif /* None.proto */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); /* None.proto */ static CYTHON_INLINE long __Pyx_div_long(long, long); /* WriteUnraisableException.proto */ static void __Pyx_WriteUnraisable(const char *name, int clineno, int lineno, const char *filename, int full_traceback, int nogil); /* ImportFrom.proto */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); /* HasAttr.proto */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); /* SetVTable.proto */ static int __Pyx_SetVtable(PyObject *dict, void *vtable); /* SetupReduce.proto */ static int __Pyx_setup_reduce(PyObject* type_obj); /* CLineInTraceback.proto */ #ifdef CYTHON_CLINE_IN_TRACEBACK #define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) #else static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); #endif /* CodeObjectCache.proto */ typedef struct { PyCodeObject* code_object; int code_line; } __Pyx_CodeObjectCacheEntry; struct __Pyx_CodeObjectCache { int count; int max_count; __Pyx_CodeObjectCacheEntry* entries; }; static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); static PyCodeObject *__pyx_find_code_object(int code_line); static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); /* AddTraceback.proto */ static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename); #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); static void __Pyx_ReleaseBuffer(Py_buffer *view); #else #define __Pyx_GetBuffer PyObject_GetBuffer #define __Pyx_ReleaseBuffer PyBuffer_Release #endif /* BufferStructDeclare.proto */ typedef struct { Py_ssize_t shape, strides, suboffsets; } __Pyx_Buf_DimInfo; typedef struct { size_t refcount; Py_buffer pybuffer; } __Pyx_Buffer; typedef struct { __Pyx_Buffer *rcbuffer; char *data; __Pyx_Buf_DimInfo diminfo[8]; } __Pyx_LocalBuf_ND; /* MemviewSliceIsContig.proto */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); /* OverlappingSlices.proto */ static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize); /* Capsule.proto */ static CYTHON_INLINE PyObject *__pyx_capsule_create(void *p, const char *sig); /* IsLittleEndian.proto */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); /* BufferFormatCheck.proto */ static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type); /* TypeInfoCompare.proto */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); /* MemviewSliceValidateAndInit.proto */ static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj); /* ObjectToMemviewSlice.proto */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); /* MemviewDtypeToObject.proto */ static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp); static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj); /* MemviewSliceCopyTemplate.proto */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object); /* CIntFromPy.proto */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); /* CIntFromPy.proto */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); /* CIntToPy.proto */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); /* CIntFromPy.proto */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); /* FastTypeChecks.proto */ #if CYTHON_COMPILING_IN_CPYTHON #define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); #else #define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) #define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) #define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) #endif /* CheckBinaryVersion.proto */ static int __Pyx_check_binary_version(void); /* InitStrings.proto */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ /* Module declarations from 'libc.math' */ /* Module declarations from 'thread_demo' */ static PyTypeObject *__pyx_array_type = 0; static PyTypeObject *__pyx_MemviewEnum_type = 0; static PyTypeObject *__pyx_memoryview_type = 0; static PyTypeObject *__pyx_memoryviewslice_type = 0; static PyObject *generic = 0; static PyObject *strided = 0; static PyObject *indirect = 0; static PyObject *contiguous = 0; static PyObject *indirect_contiguous = 0; static int __pyx_memoryview_thread_locks_used; static PyThread_type_lock __pyx_memoryview_thread_locks[8]; static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ static void *__pyx_align_pointer(void *, size_t); /*proto*/ static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ static PyObject *_unellipsify(PyObject *, int); /*proto*/ static PyObject *assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ static int __pyx_memoryview_err_dim(PyObject *, char *, int); /*proto*/ static int __pyx_memoryview_err(PyObject *, char *); /*proto*/ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ static __Pyx_TypeInfo __Pyx_TypeInfo_double = { "double", NULL, sizeof(double), { 0 }, 0, 'R', 0, 0 }; #define __Pyx_MODULE_NAME "thread_demo" extern int __pyx_module_is_main_thread_demo; int __pyx_module_is_main_thread_demo = 0; /* Implementation of 'thread_demo' */ static PyObject *__pyx_builtin_ValueError; static PyObject *__pyx_builtin_MemoryError; static PyObject *__pyx_builtin_enumerate; static PyObject *__pyx_builtin_range; static PyObject *__pyx_builtin_TypeError; static PyObject *__pyx_builtin_Ellipsis; static PyObject *__pyx_builtin_id; static PyObject *__pyx_builtin_IndexError; static const char __pyx_k_N[] = "N"; static const char __pyx_k_O[] = "O"; static const char __pyx_k_X[] = "X"; static const char __pyx_k_Y[] = "Y"; static const char __pyx_k_c[] = "c"; static const char __pyx_k_i[] = "i"; static const char __pyx_k_id[] = "id"; static const char __pyx_k_np[] = "np"; static const char __pyx_k_new[] = "__new__"; static const char __pyx_k_obj[] = "obj"; static const char __pyx_k_base[] = "base"; static const char __pyx_k_dict[] = "__dict__"; static const char __pyx_k_main[] = "__main__"; static const char __pyx_k_mode[] = "mode"; static const char __pyx_k_name[] = "name"; static const char __pyx_k_ndim[] = "ndim"; static const char __pyx_k_pack[] = "pack"; static const char __pyx_k_size[] = "size"; static const char __pyx_k_step[] = "step"; static const char __pyx_k_stop[] = "stop"; static const char __pyx_k_test[] = "__test__"; static const char __pyx_k_ASCII[] = "ASCII"; static const char __pyx_k_class[] = "__class__"; static const char __pyx_k_error[] = "error"; static const char __pyx_k_flags[] = "flags"; static const char __pyx_k_numpy[] = "numpy"; static const char __pyx_k_range[] = "range"; static const char __pyx_k_shape[] = "shape"; static const char __pyx_k_start[] = "start"; static const char __pyx_k_zeros[] = "zeros"; static const char __pyx_k_encode[] = "encode"; static const char __pyx_k_format[] = "format"; static const char __pyx_k_import[] = "__import__"; static const char __pyx_k_name_2[] = "__name__"; static const char __pyx_k_pickle[] = "pickle"; static const char __pyx_k_reduce[] = "__reduce__"; static const char __pyx_k_struct[] = "struct"; static const char __pyx_k_unpack[] = "unpack"; static const char __pyx_k_update[] = "update"; static const char __pyx_k_fortran[] = "fortran"; static const char __pyx_k_memview[] = "memview"; static const char __pyx_k_Ellipsis[] = "Ellipsis"; static const char __pyx_k_getstate[] = "__getstate__"; static const char __pyx_k_itemsize[] = "itemsize"; static const char __pyx_k_pyx_type[] = "__pyx_type"; static const char __pyx_k_setstate[] = "__setstate__"; static const char __pyx_k_TypeError[] = "TypeError"; static const char __pyx_k_c_array_f[] = "c_array_f"; static const char __pyx_k_enumerate[] = "enumerate"; static const char __pyx_k_pyx_state[] = "__pyx_state"; static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; static const char __pyx_k_IndexError[] = "IndexError"; static const char __pyx_k_ValueError[] = "ValueError"; static const char __pyx_k_pyx_result[] = "__pyx_result"; static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; static const char __pyx_k_MemoryError[] = "MemoryError"; static const char __pyx_k_PickleError[] = "PickleError"; static const char __pyx_k_thread_demo[] = "thread_demo"; static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; static const char __pyx_k_stringsource[] = "stringsource"; static const char __pyx_k_pyx_getbuffer[] = "__pyx_getbuffer"; static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; static const char __pyx_k_thread_demo_pyx[] = "thread_demo.pyx"; static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; static const char __pyx_k_strided_and_direct[] = "<strided and direct>"; static const char __pyx_k_strided_and_indirect[] = "<strided and indirect>"; static const char __pyx_k_contiguous_and_direct[] = "<contiguous and direct>"; static const char __pyx_k_MemoryView_of_r_object[] = "<MemoryView of %r object>"; static const char __pyx_k_MemoryView_of_r_at_0x_x[] = "<MemoryView of %r at 0x%x>"; static const char __pyx_k_contiguous_and_indirect[] = "<contiguous and indirect>"; static const char __pyx_k_Cannot_index_with_type_s[] = "Cannot index with type '%s'"; static const char __pyx_k_Invalid_shape_in_axis_d_d[] = "Invalid shape in axis %d: %d."; static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; static const char __pyx_k_Created_on_Tue_Mar_20_09_22_22[] = "\nCreated on Tue Mar 20 09:22:22 2018\n\n@author: akiranagamori\n"; static const char __pyx_k_strided_and_direct_or_indirect[] = "<strided and direct or indirect>"; static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; static const char __pyx_k_Incompatible_checksums_s_vs_0xb0[] = "Incompatible checksums (%s vs 0xb068931 = (name))"; static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got %s"; static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis %d)"; static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension %d (got %d and %d)"; static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; static PyObject *__pyx_n_s_ASCII; static PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; static PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; static PyObject *__pyx_kp_s_Cannot_index_with_type_s; static PyObject *__pyx_n_s_Ellipsis; static PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; static PyObject *__pyx_kp_s_Incompatible_checksums_s_vs_0xb0; static PyObject *__pyx_n_s_IndexError; static PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; static PyObject *__pyx_kp_s_Invalid_mode_expected_c_or_fortr; static PyObject *__pyx_kp_s_Invalid_shape_in_axis_d_d; static PyObject *__pyx_n_s_MemoryError; static PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; static PyObject *__pyx_kp_s_MemoryView_of_r_object; static PyObject *__pyx_n_s_N; static PyObject *__pyx_n_b_O; static PyObject *__pyx_kp_s_Out_of_bounds_on_buffer_access_a; static PyObject *__pyx_n_s_PickleError; static PyObject *__pyx_n_s_TypeError; static PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; static PyObject *__pyx_n_s_ValueError; static PyObject *__pyx_n_s_View_MemoryView; static PyObject *__pyx_n_s_X; static PyObject *__pyx_n_s_Y; static PyObject *__pyx_n_s_allocate_buffer; static PyObject *__pyx_n_s_base; static PyObject *__pyx_n_s_c; static PyObject *__pyx_n_u_c; static PyObject *__pyx_n_s_c_array_f; static PyObject *__pyx_n_s_class; static PyObject *__pyx_n_s_cline_in_traceback; static PyObject *__pyx_kp_s_contiguous_and_direct; static PyObject *__pyx_kp_s_contiguous_and_indirect; static PyObject *__pyx_n_s_dict; static PyObject *__pyx_n_s_dtype_is_object; static PyObject *__pyx_n_s_encode; static PyObject *__pyx_n_s_enumerate; static PyObject *__pyx_n_s_error; static PyObject *__pyx_n_s_flags; static PyObject *__pyx_n_s_format; static PyObject *__pyx_n_s_fortran; static PyObject *__pyx_n_u_fortran; static PyObject *__pyx_n_s_getstate; static PyObject *__pyx_kp_s_got_differing_extents_in_dimensi; static PyObject *__pyx_n_s_i; static PyObject *__pyx_n_s_id; static PyObject *__pyx_n_s_import; static PyObject *__pyx_n_s_itemsize; static PyObject *__pyx_kp_s_itemsize_0_for_cython_array; static PyObject *__pyx_n_s_main; static PyObject *__pyx_n_s_memview; static PyObject *__pyx_n_s_mode; static PyObject *__pyx_n_s_name; static PyObject *__pyx_n_s_name_2; static PyObject *__pyx_n_s_ndim; static PyObject *__pyx_n_s_new; static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; static PyObject *__pyx_n_s_np; static PyObject *__pyx_n_s_numpy; static PyObject *__pyx_n_s_obj; static PyObject *__pyx_n_s_pack; static PyObject *__pyx_n_s_pickle; static PyObject *__pyx_n_s_pyx_PickleError; static PyObject *__pyx_n_s_pyx_checksum; static PyObject *__pyx_n_s_pyx_getbuffer; static PyObject *__pyx_n_s_pyx_result; static PyObject *__pyx_n_s_pyx_state; static PyObject *__pyx_n_s_pyx_type; static PyObject *__pyx_n_s_pyx_unpickle_Enum; static PyObject *__pyx_n_s_pyx_vtable; static PyObject *__pyx_n_s_range; static PyObject *__pyx_n_s_reduce; static PyObject *__pyx_n_s_reduce_cython; static PyObject *__pyx_n_s_reduce_ex; static PyObject *__pyx_n_s_setstate; static PyObject *__pyx_n_s_setstate_cython; static PyObject *__pyx_n_s_shape; static PyObject *__pyx_n_s_size; static PyObject *__pyx_n_s_start; static PyObject *__pyx_n_s_step; static PyObject *__pyx_n_s_stop; static PyObject *__pyx_kp_s_strided_and_direct; static PyObject *__pyx_kp_s_strided_and_direct_or_indirect; static PyObject *__pyx_kp_s_strided_and_indirect; static PyObject *__pyx_kp_s_stringsource; static PyObject *__pyx_n_s_struct; static PyObject *__pyx_n_s_test; static PyObject *__pyx_n_s_thread_demo; static PyObject *__pyx_kp_s_thread_demo_pyx; static PyObject *__pyx_kp_s_unable_to_allocate_array_data; static PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; static PyObject *__pyx_n_s_unpack; static PyObject *__pyx_n_s_update; static PyObject *__pyx_n_s_zeros; static PyObject *__pyx_pf_11thread_demo_c_array_f(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_X); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ static PyObject *__pyx_int_0; static PyObject *__pyx_int_1; static PyObject *__pyx_int_184977713; static PyObject *__pyx_int_neg_1; static PyObject *__pyx_tuple_; static PyObject *__pyx_tuple__2; static PyObject *__pyx_tuple__3; static PyObject *__pyx_tuple__4; static PyObject *__pyx_tuple__5; static PyObject *__pyx_tuple__6; static PyObject *__pyx_tuple__7; static PyObject *__pyx_tuple__8; static PyObject *__pyx_tuple__9; static PyObject *__pyx_slice__14; static PyObject *__pyx_slice__15; static PyObject *__pyx_slice__16; static PyObject *__pyx_tuple__10; static PyObject *__pyx_tuple__11; static PyObject *__pyx_tuple__12; static PyObject *__pyx_tuple__13; static PyObject *__pyx_tuple__17; static PyObject *__pyx_tuple__18; static PyObject *__pyx_tuple__19; static PyObject *__pyx_tuple__20; static PyObject *__pyx_tuple__22; static PyObject *__pyx_tuple__23; static PyObject *__pyx_tuple__24; static PyObject *__pyx_tuple__25; static PyObject *__pyx_tuple__26; static PyObject *__pyx_tuple__27; static PyObject *__pyx_codeobj__21; static PyObject *__pyx_codeobj__28; /* "thread_demo.pyx":14 * from libc.math cimport exp as c_exp * * def c_array_f(double[:] X): # <<<<<<<<<<<<<< * * cdef int N = X.shape[0] */ /* Python wrapper */ static PyObject *__pyx_pw_11thread_demo_1c_array_f(PyObject *__pyx_self, PyObject *__pyx_arg_X); /*proto*/ static PyMethodDef __pyx_mdef_11thread_demo_1c_array_f = {"c_array_f", (PyCFunction)__pyx_pw_11thread_demo_1c_array_f, METH_O, 0}; static PyObject *__pyx_pw_11thread_demo_1c_array_f(PyObject *__pyx_self, PyObject *__pyx_arg_X) { __Pyx_memviewslice __pyx_v_X = { 0, 0, { 0 }, { 0 }, { 0 } }; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("c_array_f (wrapper)", 0); assert(__pyx_arg_X); { __pyx_v_X = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_arg_X); if (unlikely(!__pyx_v_X.memview)) __PYX_ERR(0, 14, __pyx_L3_error) } goto __pyx_L4_argument_unpacking_done; __pyx_L3_error:; __Pyx_AddTraceback("thread_demo.c_array_f", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_11thread_demo_c_array_f(__pyx_self, __pyx_v_X); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_11thread_demo_c_array_f(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_X) { int __pyx_v_N; __Pyx_memviewslice __pyx_v_Y = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_memviewslice __pyx_t_6 = { 0, 0, { 0 }, { 0 }, { 0 } }; int __pyx_t_7; int __pyx_t_8; int __pyx_t_9; Py_ssize_t __pyx_t_10; int __pyx_t_11; int __pyx_t_12; Py_ssize_t __pyx_t_13; Py_ssize_t __pyx_t_14; Py_ssize_t __pyx_t_15; __Pyx_RefNannySetupContext("c_array_f", 0); /* "thread_demo.pyx":16 * def c_array_f(double[:] X): * * cdef int N = X.shape[0] # <<<<<<<<<<<<<< * cdef double[:] Y = np.zeros(N) * cdef int i */ __pyx_v_N = (__pyx_v_X.shape[0]); /* "thread_demo.pyx":17 * * cdef int N = X.shape[0] * cdef double[:] Y = np.zeros(N) # <<<<<<<<<<<<<< * cdef int i * */ __pyx_t_2 = __Pyx_GetModuleGlobalName(__pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_N); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_4)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_4) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_t_2}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_4, __pyx_t_2}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } else #endif { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = NULL; __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_6 = __Pyx_PyObject_to_MemoryviewSlice_ds_double(__pyx_t_1); if (unlikely(!__pyx_t_6.memview)) __PYX_ERR(0, 17, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_v_Y = __pyx_t_6; __pyx_t_6.memview = NULL; __pyx_t_6.data = NULL; /* "thread_demo.pyx":20 * cdef int i * * for i in prange(N, nogil = True): # <<<<<<<<<<<<<< * if X[i] > 0.5: * Y[i] = c_exp(X[i]) */ { #ifdef WITH_THREAD PyThreadState *_save; Py_UNBLOCK_THREADS __Pyx_FastGIL_Remember(); #endif /*try:*/ { __pyx_t_7 = __pyx_v_N; if (1 == 0) abort(); { int __pyx_parallel_temp0 = ((int)0xbad0bad0); const char *__pyx_parallel_filename = NULL; int __pyx_parallel_lineno = 0, __pyx_parallel_clineno = 0; PyObject *__pyx_parallel_exc_type = NULL, *__pyx_parallel_exc_value = NULL, *__pyx_parallel_exc_tb = NULL; int __pyx_parallel_why; __pyx_parallel_why = 0; #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) (x) #define unlikely(x) (x) #endif __pyx_t_9 = (__pyx_t_7 - 0 + 1 - 1/abs(1)) / 1; if (__pyx_t_9 > 0) { #ifdef _OPENMP #pragma omp parallel private(__pyx_t_10, __pyx_t_11, __pyx_t_12, __pyx_t_13, __pyx_t_14, __pyx_t_15) private(__pyx_filename, __pyx_lineno, __pyx_clineno) shared(__pyx_parallel_why, __pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb) #endif /* _OPENMP */ { #ifdef _OPENMP #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif Py_BEGIN_ALLOW_THREADS #endif /* _OPENMP */ #ifdef _OPENMP #pragma omp for firstprivate(__pyx_v_i) lastprivate(__pyx_v_i) #endif /* _OPENMP */ for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_9; __pyx_t_8++){ if (__pyx_parallel_why < 2) { __pyx_v_i = (int)(0 + 1 * __pyx_t_8); /* "thread_demo.pyx":21 * * for i in prange(N, nogil = True): * if X[i] > 0.5: # <<<<<<<<<<<<<< * Y[i] = c_exp(X[i]) * else: */ __pyx_t_10 = __pyx_v_i; __pyx_t_11 = -1; if (__pyx_t_10 < 0) { __pyx_t_10 += __pyx_v_X.shape[0]; if (unlikely(__pyx_t_10 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_10 >= __pyx_v_X.shape[0])) __pyx_t_11 = 0; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexErrorNogil(__pyx_t_11); __PYX_ERR(0, 21, __pyx_L8_error) } __pyx_t_12 = (((*((double *) ( /* dim=0 */ (__pyx_v_X.data + __pyx_t_10 * __pyx_v_X.strides[0]) ))) > 0.5) != 0); if (__pyx_t_12) { /* "thread_demo.pyx":22 * for i in prange(N, nogil = True): * if X[i] > 0.5: * Y[i] = c_exp(X[i]) # <<<<<<<<<<<<<< * else: * Y[i] = 0 */ __pyx_t_13 = __pyx_v_i; __pyx_t_11 = -1; if (__pyx_t_13 < 0) { __pyx_t_13 += __pyx_v_X.shape[0]; if (unlikely(__pyx_t_13 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_13 >= __pyx_v_X.shape[0])) __pyx_t_11 = 0; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexErrorNogil(__pyx_t_11); __PYX_ERR(0, 22, __pyx_L8_error) } __pyx_t_14 = __pyx_v_i; __pyx_t_11 = -1; if (__pyx_t_14 < 0) { __pyx_t_14 += __pyx_v_Y.shape[0]; if (unlikely(__pyx_t_14 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_14 >= __pyx_v_Y.shape[0])) __pyx_t_11 = 0; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexErrorNogil(__pyx_t_11); __PYX_ERR(0, 22, __pyx_L8_error) } *((double *) ( /* dim=0 */ (__pyx_v_Y.data + __pyx_t_14 * __pyx_v_Y.strides[0]) )) = exp((*((double *) ( /* dim=0 */ (__pyx_v_X.data + __pyx_t_13 * __pyx_v_X.strides[0]) )))); /* "thread_demo.pyx":21 * * for i in prange(N, nogil = True): * if X[i] > 0.5: # <<<<<<<<<<<<<< * Y[i] = c_exp(X[i]) * else: */ goto __pyx_L10; } /* "thread_demo.pyx":24 * Y[i] = c_exp(X[i]) * else: * Y[i] = 0 # <<<<<<<<<<<<<< * * return Y */ /*else*/ { __pyx_t_15 = __pyx_v_i; __pyx_t_11 = -1; if (__pyx_t_15 < 0) { __pyx_t_15 += __pyx_v_Y.shape[0]; if (unlikely(__pyx_t_15 < 0)) __pyx_t_11 = 0; } else if (unlikely(__pyx_t_15 >= __pyx_v_Y.shape[0])) __pyx_t_11 = 0; if (unlikely(__pyx_t_11 != -1)) { __Pyx_RaiseBufferIndexErrorNogil(__pyx_t_11); __PYX_ERR(0, 24, __pyx_L8_error) } *((double *) ( /* dim=0 */ (__pyx_v_Y.data + __pyx_t_15 * __pyx_v_Y.strides[0]) )) = 0.0; } __pyx_L10:; goto __pyx_L12; __pyx_L8_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif #ifdef _OPENMP #pragma omp flush(__pyx_parallel_exc_type) #endif /* _OPENMP */ if (!__pyx_parallel_exc_type) { __Pyx_ErrFetchWithState(&__pyx_parallel_exc_type, &__pyx_parallel_exc_value, &__pyx_parallel_exc_tb); __pyx_parallel_filename = __pyx_filename; __pyx_parallel_lineno = __pyx_lineno; __pyx_parallel_clineno = __pyx_clineno; __Pyx_GOTREF(__pyx_parallel_exc_type); } #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_parallel_why = 4; goto __pyx_L11; __pyx_L11:; #ifdef _OPENMP #pragma omp critical(__pyx_parallel_lastprivates0) #endif /* _OPENMP */ { __pyx_parallel_temp0 = __pyx_v_i; } __pyx_L12:; #ifdef _OPENMP #pragma omp flush(__pyx_parallel_why) #endif /* _OPENMP */ } } #ifdef _OPENMP Py_END_ALLOW_THREADS #else { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif #endif /* _OPENMP */ /* Clean up any temporaries */ #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif #ifndef _OPENMP } #endif /* _OPENMP */ } } if (__pyx_parallel_exc_type) { /* This may have been overridden by a continue, break or return in another thread. Prefer the error. */ __pyx_parallel_why = 4; } if (__pyx_parallel_why) { __pyx_v_i = __pyx_parallel_temp0; switch (__pyx_parallel_why) { case 4: { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_GIVEREF(__pyx_parallel_exc_type); __Pyx_ErrRestoreWithState(__pyx_parallel_exc_type, __pyx_parallel_exc_value, __pyx_parallel_exc_tb); __pyx_filename = __pyx_parallel_filename; __pyx_lineno = __pyx_parallel_lineno; __pyx_clineno = __pyx_parallel_clineno; #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } goto __pyx_L4_error; } } } #if ((defined(__APPLE__) || defined(__OSX__)) && (defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))))) #undef likely #undef unlikely #define likely(x) __builtin_expect(!!(x), 1) #define unlikely(x) __builtin_expect(!!(x), 0) #endif } /* "thread_demo.pyx":20 * cdef int i * * for i in prange(N, nogil = True): # <<<<<<<<<<<<<< * if X[i] > 0.5: * Y[i] = c_exp(X[i]) */ /*finally:*/ { /*normal exit:*/{ #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L5; } __pyx_L4_error: { #ifdef WITH_THREAD __Pyx_FastGIL_Forget(); Py_BLOCK_THREADS #endif goto __pyx_L1_error; } __pyx_L5:; } } /* "thread_demo.pyx":26 * Y[i] = 0 * * return Y # <<<<<<<<<<<<<< */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_Y, 1, (PyObject *(*)(char *)) __pyx_memview_get_double, (int (*)(char *, PyObject *)) __pyx_memview_set_double, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 26, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "thread_demo.pyx":14 * from libc.math cimport exp as c_exp * * def c_array_f(double[:] X): # <<<<<<<<<<<<<< * * cdef int N = X.shape[0] */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __PYX_XDEC_MEMVIEW(&__pyx_t_6, 1); __Pyx_AddTraceback("thread_demo.c_array_f", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __PYX_XDEC_MEMVIEW(&__pyx_v_X, 1); __PYX_XDEC_MEMVIEW(&__pyx_v_Y, 1); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* Python wrapper */ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_shape = 0; Py_ssize_t __pyx_v_itemsize; PyObject *__pyx_v_format = 0; PyObject *__pyx_v_mode = 0; int __pyx_v_allocate_buffer; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; PyObject* values[5] = {0,0,0,0,0}; values[3] = ((PyObject *)__pyx_n_s_c); if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_shape)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_itemsize)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 120, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_format)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 120, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 3: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_mode); if (value) { values[3] = value; kw_args--; } } CYTHON_FALLTHROUGH; case 4: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_allocate_buffer); if (value) { values[4] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 120, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); CYTHON_FALLTHROUGH; case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); CYTHON_FALLTHROUGH; case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_shape = ((PyObject*)values[0]); __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 120, __pyx_L3_error) __pyx_v_format = values[2]; __pyx_v_mode = values[3]; if (values[4]) { __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 121, __pyx_L3_error) } else { /* "View.MemoryView":121 * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< * * cdef int idx */ __pyx_v_allocate_buffer = ((int)1); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 120, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 120, __pyx_L1_error) if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 120, __pyx_L1_error) } __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ goto __pyx_L0; __pyx_L1_error:; __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { int __pyx_v_idx; Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_dim; PyObject **__pyx_v_p; char __pyx_v_order; int __pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; int __pyx_t_7; Py_ssize_t __pyx_t_8; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; __Pyx_RefNannySetupContext("__cinit__", 0); __Pyx_INCREF(__pyx_v_format); /* "View.MemoryView":127 * cdef PyObject **p * * self.ndim = <int> len(shape) # <<<<<<<<<<<<<< * self.itemsize = itemsize * */ if (unlikely(__pyx_v_shape == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 127, __pyx_L1_error) } __pyx_t_1 = PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 127, __pyx_L1_error) __pyx_v_self->ndim = ((int)__pyx_t_1); /* "View.MemoryView":128 * * self.ndim = <int> len(shape) * self.itemsize = itemsize # <<<<<<<<<<<<<< * * if not self.ndim: */ __pyx_v_self->itemsize = __pyx_v_itemsize; /* "View.MemoryView":130 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ __pyx_t_2 = ((!(__pyx_v_self->ndim != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":131 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 131, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 131, __pyx_L1_error) /* "View.MemoryView":130 * self.itemsize = itemsize * * if not self.ndim: # <<<<<<<<<<<<<< * raise ValueError("Empty shape tuple for cython.array") * */ } /* "View.MemoryView":133 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ __pyx_t_2 = ((__pyx_v_itemsize <= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":134 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 134, __pyx_L1_error) /* "View.MemoryView":133 * raise ValueError("Empty shape tuple for cython.array") * * if itemsize <= 0: # <<<<<<<<<<<<<< * raise ValueError("itemsize <= 0 for cython.array") * */ } /* "View.MemoryView":136 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ __pyx_t_2 = PyBytes_Check(__pyx_v_format); __pyx_t_4 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":137 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_tuple__3, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":136 * raise ValueError("itemsize <= 0 for cython.array") * * if not isinstance(format, bytes): # <<<<<<<<<<<<<< * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string */ } /* "View.MemoryView":138 * if not isinstance(format, bytes): * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< * self.format = self._format * */ if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_v_format)->tp_name), 0))) __PYX_ERR(1, 138, __pyx_L1_error) __pyx_t_5 = __pyx_v_format; __Pyx_INCREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_5); __Pyx_GOTREF(__pyx_v_self->_format); __Pyx_DECREF(__pyx_v_self->_format); __pyx_v_self->_format = ((PyObject*)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":139 * format = format.encode('ASCII') * self._format = format # keep a reference to the byte string * self.format = self._format # <<<<<<<<<<<<<< * * */ if (unlikely(__pyx_v_self->_format == Py_None)) { PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); __PYX_ERR(1, 139, __pyx_L1_error) } __pyx_t_6 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(1, 139, __pyx_L1_error) __pyx_v_self->format = __pyx_t_6; /* "View.MemoryView":142 * * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< * self._strides = self._shape + self.ndim * */ __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); /* "View.MemoryView":143 * * self._shape = <Py_ssize_t *> PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< * * if not self._shape: */ __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); /* "View.MemoryView":145 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ __pyx_t_4 = ((!(__pyx_v_self->_shape != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":146 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 146, __pyx_L1_error) /* "View.MemoryView":145 * self._strides = self._shape + self.ndim * * if not self._shape: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate shape and strides.") * */ } /* "View.MemoryView":149 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ __pyx_t_7 = 0; __pyx_t_5 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_5); __pyx_t_1 = 0; for (;;) { if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_5)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_5, __pyx_t_1); __Pyx_INCREF(__pyx_t_3); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(1, 149, __pyx_L1_error) #else __pyx_t_3 = PySequence_ITEM(__pyx_t_5, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 149, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __pyx_t_8 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_8 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 149, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_dim = __pyx_t_8; __pyx_v_idx = __pyx_t_7; __pyx_t_7 = (__pyx_t_7 + 1); /* "View.MemoryView":150 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ __pyx_t_4 = ((__pyx_v_dim <= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":151 * for idx, dim in enumerate(shape): * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) # <<<<<<<<<<<<<< * self._shape[idx] = dim * */ __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_idx); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_9 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_10 = PyTuple_New(2); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 1, __pyx_t_9); __pyx_t_3 = 0; __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_t_10); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __pyx_t_10 = PyTuple_New(1); if (unlikely(!__pyx_t_10)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_10); __Pyx_GIVEREF(__pyx_t_9); PyTuple_SET_ITEM(__pyx_t_10, 0, __pyx_t_9); __pyx_t_9 = 0; __pyx_t_9 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_10, NULL); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 151, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; __Pyx_Raise(__pyx_t_9, 0, 0, 0); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __PYX_ERR(1, 151, __pyx_L1_error) /* "View.MemoryView":150 * * for idx, dim in enumerate(shape): * if dim <= 0: # <<<<<<<<<<<<<< * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim */ } /* "View.MemoryView":152 * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) * self._shape[idx] = dim # <<<<<<<<<<<<<< * * cdef char order */ (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; /* "View.MemoryView":149 * * * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< * if dim <= 0: * raise ValueError("Invalid shape in axis %d: %d." % (idx, dim)) */ } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":155 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 155, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":156 * cdef char order * if mode == 'fortran': * order = b'F' # <<<<<<<<<<<<<< * self.mode = u'fortran' * elif mode == 'c': */ __pyx_v_order = 'F'; /* "View.MemoryView":157 * if mode == 'fortran': * order = b'F' * self.mode = u'fortran' # <<<<<<<<<<<<<< * elif mode == 'c': * order = b'C' */ __Pyx_INCREF(__pyx_n_u_fortran); __Pyx_GIVEREF(__pyx_n_u_fortran); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_fortran; /* "View.MemoryView":155 * * cdef char order * if mode == 'fortran': # <<<<<<<<<<<<<< * order = b'F' * self.mode = u'fortran' */ goto __pyx_L10; } /* "View.MemoryView":158 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ __pyx_t_4 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 158, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":159 * self.mode = u'fortran' * elif mode == 'c': * order = b'C' # <<<<<<<<<<<<<< * self.mode = u'c' * else: */ __pyx_v_order = 'C'; /* "View.MemoryView":160 * elif mode == 'c': * order = b'C' * self.mode = u'c' # <<<<<<<<<<<<<< * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) */ __Pyx_INCREF(__pyx_n_u_c); __Pyx_GIVEREF(__pyx_n_u_c); __Pyx_GOTREF(__pyx_v_self->mode); __Pyx_DECREF(__pyx_v_self->mode); __pyx_v_self->mode = __pyx_n_u_c; /* "View.MemoryView":158 * order = b'F' * self.mode = u'fortran' * elif mode == 'c': # <<<<<<<<<<<<<< * order = b'C' * self.mode = u'c' */ goto __pyx_L10; } /* "View.MemoryView":162 * self.mode = u'c' * else: * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) # <<<<<<<<<<<<<< * * self.len = fill_contig_strides_array(self._shape, self._strides, */ /*else*/ { __pyx_t_5 = __Pyx_PyString_Format(__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_v_mode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_9 = PyTuple_New(1); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_5); __pyx_t_5 = 0; __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_9, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 162, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 162, __pyx_L1_error) } __pyx_L10:; /* "View.MemoryView":164 * raise ValueError("Invalid mode, expected 'c' or 'fortran', got %s" % mode) * * self.len = fill_contig_strides_array(self._shape, self._strides, # <<<<<<<<<<<<<< * itemsize, self.ndim, order) * */ __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); /* "View.MemoryView":167 * itemsize, self.ndim, order) * * self.free_data = allocate_buffer # <<<<<<<<<<<<<< * self.dtype_is_object = format == b'O' * if allocate_buffer: */ __pyx_v_self->free_data = __pyx_v_allocate_buffer; /* "View.MemoryView":168 * * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< * if allocate_buffer: * */ __pyx_t_5 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 168, __pyx_L1_error) __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 168, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_self->dtype_is_object = __pyx_t_4; /* "View.MemoryView":169 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ __pyx_t_4 = (__pyx_v_allocate_buffer != 0); if (__pyx_t_4) { /* "View.MemoryView":172 * * * self.data = <char *>malloc(self.len) # <<<<<<<<<<<<<< * if not self.data: * raise MemoryError("unable to allocate array data.") */ __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); /* "View.MemoryView":173 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ __pyx_t_4 = ((!(__pyx_v_self->data != 0)) != 0); if (__pyx_t_4) { /* "View.MemoryView":174 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 174, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 174, __pyx_L1_error) /* "View.MemoryView":173 * * self.data = <char *>malloc(self.len) * if not self.data: # <<<<<<<<<<<<<< * raise MemoryError("unable to allocate array data.") * */ } /* "View.MemoryView":176 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ __pyx_t_4 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_4) { /* "View.MemoryView":177 * * if self.dtype_is_object: * p = <PyObject **> self.data # <<<<<<<<<<<<<< * for i in range(self.len / itemsize): * p[i] = Py_None */ __pyx_v_p = ((PyObject **)__pyx_v_self->data); /* "View.MemoryView":178 * if self.dtype_is_object: * p = <PyObject **> self.data * for i in range(self.len / itemsize): # <<<<<<<<<<<<<< * p[i] = Py_None * Py_INCREF(Py_None) */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 178, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 178, __pyx_L1_error) } __pyx_t_1 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_itemsize); for (__pyx_t_8 = 0; __pyx_t_8 < __pyx_t_1; __pyx_t_8+=1) { __pyx_v_i = __pyx_t_8; /* "View.MemoryView":179 * p = <PyObject **> self.data * for i in range(self.len / itemsize): * p[i] = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ (__pyx_v_p[__pyx_v_i]) = Py_None; /* "View.MemoryView":180 * for i in range(self.len / itemsize): * p[i] = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * @cname('getbuffer') */ Py_INCREF(Py_None); } /* "View.MemoryView":176 * raise MemoryError("unable to allocate array data.") * * if self.dtype_is_object: # <<<<<<<<<<<<<< * p = <PyObject **> self.data * for i in range(self.len / itemsize): */ } /* "View.MemoryView":169 * self.free_data = allocate_buffer * self.dtype_is_object = format == b'O' * if allocate_buffer: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":120 * cdef bint dtype_is_object * * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< * mode="c", bint allocate_buffer=True): * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_9); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_format); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":183 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_v_bufmode; int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; char *__pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; Py_ssize_t *__pyx_t_7; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":184 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 # <<<<<<<<<<<<<< * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = -1; /* "View.MemoryView":185 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 185, __pyx_L1_error) __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":186 * cdef int bufmode = -1 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS */ __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":185 * def __getbuffer__(self, Py_buffer *info, int flags): * cdef int bufmode = -1 * if self.mode == u"c": # <<<<<<<<<<<<<< * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": */ goto __pyx_L3; } /* "View.MemoryView":187 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ __pyx_t_2 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 187, __pyx_L1_error) __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":188 * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") */ __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); /* "View.MemoryView":187 * if self.mode == u"c": * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * elif self.mode == u"fortran": # <<<<<<<<<<<<<< * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): */ } __pyx_L3:; /* "View.MemoryView":189 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ __pyx_t_1 = ((!((__pyx_v_flags & __pyx_v_bufmode) != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":190 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 190, __pyx_L1_error) /* "View.MemoryView":189 * elif self.mode == u"fortran": * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): # <<<<<<<<<<<<<< * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data */ } /* "View.MemoryView":191 * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data # <<<<<<<<<<<<<< * info.len = self.len * info.ndim = self.ndim */ __pyx_t_4 = __pyx_v_self->data; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":192 * raise ValueError("Can only create a buffer that is contiguous in memory.") * info.buf = self.data * info.len = self.len # <<<<<<<<<<<<<< * info.ndim = self.ndim * info.shape = self._shape */ __pyx_t_5 = __pyx_v_self->len; __pyx_v_info->len = __pyx_t_5; /* "View.MemoryView":193 * info.buf = self.data * info.len = self.len * info.ndim = self.ndim # <<<<<<<<<<<<<< * info.shape = self._shape * info.strides = self._strides */ __pyx_t_6 = __pyx_v_self->ndim; __pyx_v_info->ndim = __pyx_t_6; /* "View.MemoryView":194 * info.len = self.len * info.ndim = self.ndim * info.shape = self._shape # <<<<<<<<<<<<<< * info.strides = self._strides * info.suboffsets = NULL */ __pyx_t_7 = __pyx_v_self->_shape; __pyx_v_info->shape = __pyx_t_7; /* "View.MemoryView":195 * info.ndim = self.ndim * info.shape = self._shape * info.strides = self._strides # <<<<<<<<<<<<<< * info.suboffsets = NULL * info.itemsize = self.itemsize */ __pyx_t_7 = __pyx_v_self->_strides; __pyx_v_info->strides = __pyx_t_7; /* "View.MemoryView":196 * info.shape = self._shape * info.strides = self._strides * info.suboffsets = NULL # <<<<<<<<<<<<<< * info.itemsize = self.itemsize * info.readonly = 0 */ __pyx_v_info->suboffsets = NULL; /* "View.MemoryView":197 * info.strides = self._strides * info.suboffsets = NULL * info.itemsize = self.itemsize # <<<<<<<<<<<<<< * info.readonly = 0 * */ __pyx_t_5 = __pyx_v_self->itemsize; __pyx_v_info->itemsize = __pyx_t_5; /* "View.MemoryView":198 * info.suboffsets = NULL * info.itemsize = self.itemsize * info.readonly = 0 # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ __pyx_v_info->readonly = 0; /* "View.MemoryView":200 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":201 * * if flags & PyBUF_FORMAT: * info.format = self.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_4 = __pyx_v_self->format; __pyx_v_info->format = __pyx_t_4; /* "View.MemoryView":200 * info.readonly = 0 * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.format * else: */ goto __pyx_L5; } /* "View.MemoryView":203 * info.format = self.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.obj = self */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L5:; /* "View.MemoryView":205 * info.format = NULL * * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":183 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * cdef int bufmode = -1 * if self.mode == u"c": */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; if (__pyx_v_info != NULL && __pyx_v_info->obj != NULL) { __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = NULL; } goto __pyx_L2; __pyx_L0:; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __pyx_L2:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":209 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* Python wrapper */ static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":210 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ __pyx_t_1 = ((__pyx_v_self->callback_free_data != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":211 * def __dealloc__(array self): * if self.callback_free_data != NULL: * self.callback_free_data(self.data) # <<<<<<<<<<<<<< * elif self.free_data: * if self.dtype_is_object: */ __pyx_v_self->callback_free_data(__pyx_v_self->data); /* "View.MemoryView":210 * * def __dealloc__(array self): * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< * self.callback_free_data(self.data) * elif self.free_data: */ goto __pyx_L3; } /* "View.MemoryView":212 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ __pyx_t_1 = (__pyx_v_self->free_data != 0); if (__pyx_t_1) { /* "View.MemoryView":213 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":214 * elif self.free_data: * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, # <<<<<<<<<<<<<< * self._strides, self.ndim, False) * free(self.data) */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); /* "View.MemoryView":213 * self.callback_free_data(self.data) * elif self.free_data: * if self.dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) */ } /* "View.MemoryView":216 * refcount_objects_in_slice(self.data, self._shape, * self._strides, self.ndim, False) * free(self.data) # <<<<<<<<<<<<<< * PyObject_Free(self._shape) * */ free(__pyx_v_self->data); /* "View.MemoryView":212 * if self.callback_free_data != NULL: * self.callback_free_data(self.data) * elif self.free_data: # <<<<<<<<<<<<<< * if self.dtype_is_object: * refcount_objects_in_slice(self.data, self._shape, */ } __pyx_L3:; /* "View.MemoryView":217 * self._strides, self.ndim, False) * free(self.data) * PyObject_Free(self._shape) # <<<<<<<<<<<<<< * * @property */ PyObject_Free(__pyx_v_self->_shape); /* "View.MemoryView":209 * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") * * def __dealloc__(array self): # <<<<<<<<<<<<<< * if self.callback_free_data != NULL: * self.callback_free_data(self.data) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":220 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":221 * @property * def memview(self): * return self.get_memview() # <<<<<<<<<<<<<< * * @cname('get_memview') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 221, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":220 * * @property * def memview(self): # <<<<<<<<<<<<<< * return self.get_memview() * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":224 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_memview", 0); /* "View.MemoryView":225 * @cname('get_memview') * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< * return memoryview(self, flags, self.dtype_is_object) * */ __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); /* "View.MemoryView":226 * cdef get_memview(self): * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":224 * * @cname('get_memview') * cdef get_memview(self): # <<<<<<<<<<<<<< * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE * return memoryview(self, flags, self.dtype_is_object) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":228 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* Python wrapper */ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":229 * * def __len__(self): * return self._shape[0] # <<<<<<<<<<<<<< * * def __getattr__(self, attr): */ __pyx_r = (__pyx_v_self->_shape[0]); goto __pyx_L0; /* "View.MemoryView":228 * return memoryview(self, flags, self.dtype_is_object) * * def __len__(self): # <<<<<<<<<<<<<< * return self._shape[0] * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":231 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* Python wrapper */ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getattr__", 0); /* "View.MemoryView":232 * * def __getattr__(self, attr): * return getattr(self.memview, attr) # <<<<<<<<<<<<<< * * def __getitem__(self, item): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 232, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":231 * return self._shape[0] * * def __getattr__(self, attr): # <<<<<<<<<<<<<< * return getattr(self.memview, attr) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":234 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* Python wrapper */ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":235 * * def __getitem__(self, item): * return self.memview[item] # <<<<<<<<<<<<<< * * def __setitem__(self, item, value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 235, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":234 * return getattr(self.memview, attr) * * def __getitem__(self, item): # <<<<<<<<<<<<<< * return self.memview[item] * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":237 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* Python wrapper */ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setitem__", 0); /* "View.MemoryView":238 * * def __setitem__(self, item, value): * self.memview[item] = value # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 238, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (unlikely(PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0)) __PYX_ERR(1, 238, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":237 * return self.memview[item] * * def __setitem__(self, item, value): # <<<<<<<<<<<<<< * self.memview[item] = value * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":242 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_mode, char *__pyx_v_buf) { struct __pyx_array_obj *__pyx_v_result = 0; struct __pyx_array_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("array_cwrapper", 0); /* "View.MemoryView":246 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ __pyx_t_1 = ((__pyx_v_buf == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":247 * * if buf == NULL: * result = array(shape, itemsize, format, mode.decode('ASCII')) # <<<<<<<<<<<<<< * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_4); __pyx_t_2 = 0; __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_5, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 247, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":246 * cdef array result * * if buf == NULL: # <<<<<<<<<<<<<< * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: */ goto __pyx_L3; } /* "View.MemoryView":249 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ /*else*/ { __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_mode, 0, strlen(__pyx_v_mode), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_2 = PyTuple_New(4); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_v_shape); __Pyx_GIVEREF(__pyx_v_shape); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_shape); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_2, 2, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 3, __pyx_t_3); __pyx_t_4 = 0; __pyx_t_5 = 0; __pyx_t_3 = 0; /* "View.MemoryView":250 * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) # <<<<<<<<<<<<<< * result.data = buf * */ __pyx_t_3 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 250, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (PyDict_SetItem(__pyx_t_3, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 250, __pyx_L1_error) /* "View.MemoryView":249 * result = array(shape, itemsize, format, mode.decode('ASCII')) * else: * result = array(shape, itemsize, format, mode.decode('ASCII'), # <<<<<<<<<<<<<< * allocate_buffer=False) * result.data = buf */ __pyx_t_5 = __Pyx_PyObject_Call(((PyObject *)__pyx_array_type), __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_5); __pyx_t_5 = 0; /* "View.MemoryView":251 * result = array(shape, itemsize, format, mode.decode('ASCII'), * allocate_buffer=False) * result.data = buf # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->data = __pyx_v_buf; } __pyx_L3:; /* "View.MemoryView":253 * result.data = buf * * return result # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(((PyObject *)__pyx_r)); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":242 * * @cname("__pyx_array_new") * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, # <<<<<<<<<<<<<< * char *mode, char *buf): * cdef array result */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":279 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* Python wrapper */ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_name = 0; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; PyObject* values[1] = {0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_name)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 279, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); } __pyx_v_name = values[0]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 279, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__init__", 0); /* "View.MemoryView":280 * cdef object name * def __init__(self, name): * self.name = name # <<<<<<<<<<<<<< * def __repr__(self): * return self.name */ __Pyx_INCREF(__pyx_v_name); __Pyx_GIVEREF(__pyx_v_name); __Pyx_GOTREF(__pyx_v_self->name); __Pyx_DECREF(__pyx_v_self->name); __pyx_v_self->name = __pyx_v_name; /* "View.MemoryView":279 * cdef class Enum(object): * cdef object name * def __init__(self, name): # <<<<<<<<<<<<<< * self.name = name * def __repr__(self): */ /* function exit code */ __pyx_r = 0; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":281 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* Python wrapper */ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":282 * self.name = name * def __repr__(self): * return self.name # <<<<<<<<<<<<<< * * cdef generic = Enum("<strided and direct or indirect>") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->name); __pyx_r = __pyx_v_self->name; goto __pyx_L0; /* "View.MemoryView":281 * def __init__(self, name): * self.name = name * def __repr__(self): # <<<<<<<<<<<<<< * return self.name * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef bint use_setstate * state = (self.name,) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { int __pyx_v_use_setstate; PyObject *__pyx_v_state = NULL; PyObject *__pyx_v__dict = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":3 * def __reduce_cython__(self): * cdef bint use_setstate * state = (self.name,) # <<<<<<<<<<<<<< * _dict = getattr(self, '__dict__', None) * if _dict is not None: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v_self->name); __Pyx_GIVEREF(__pyx_v_self->name); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name); __pyx_v_state = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":4 * cdef bint use_setstate * state = (self.name,) * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< * if _dict is not None: * state += (_dict,) */ __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v__dict = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":5 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ __pyx_t_2 = (__pyx_v__dict != Py_None); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "(tree fragment)":6 * _dict = getattr(self, '__dict__', None) * if _dict is not None: * state += (_dict,) # <<<<<<<<<<<<<< * use_setstate = True * else: */ __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(__pyx_v__dict); __Pyx_GIVEREF(__pyx_v__dict); PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict); __pyx_t_4 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 6, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_4)); __pyx_t_4 = 0; /* "(tree fragment)":7 * if _dict is not None: * state += (_dict,) * use_setstate = True # <<<<<<<<<<<<<< * else: * use_setstate = self.name is not None */ __pyx_v_use_setstate = 1; /* "(tree fragment)":5 * state = (self.name,) * _dict = getattr(self, '__dict__', None) * if _dict is not None: # <<<<<<<<<<<<<< * state += (_dict,) * use_setstate = True */ goto __pyx_L3; } /* "(tree fragment)":9 * use_setstate = True * else: * use_setstate = self.name is not None # <<<<<<<<<<<<<< * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state */ /*else*/ { __pyx_t_3 = (__pyx_v_self->name != Py_None); __pyx_v_use_setstate = __pyx_t_3; } __pyx_L3:; /* "(tree fragment)":10 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ __pyx_t_3 = (__pyx_v_use_setstate != 0); if (__pyx_t_3) { /* "(tree fragment)":11 * use_setstate = self.name is not None * if use_setstate: * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state # <<<<<<<<<<<<<< * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) */ __Pyx_XDECREF(__pyx_r); __pyx_t_4 = __Pyx_GetModuleGlobalName(__pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None); __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 11, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_1); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_v_state); __pyx_t_4 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "(tree fragment)":10 * else: * use_setstate = self.name is not None * if use_setstate: # <<<<<<<<<<<<<< * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: */ } /* "(tree fragment)":13 * return __pyx_unpickle_Enum, (type(self), 0xb068931, None), state * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_5 = __Pyx_GetModuleGlobalName(__pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); __Pyx_INCREF(__pyx_int_184977713); __Pyx_GIVEREF(__pyx_int_184977713); PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_184977713); __Pyx_INCREF(__pyx_v_state); __Pyx_GIVEREF(__pyx_v_state); PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state); __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_5); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1); __pyx_t_5 = 0; __pyx_t_1 = 0; __pyx_r = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L0; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * cdef bint use_setstate * state = (self.name,) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_state); __Pyx_XDECREF(__pyx_v__dict); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":14 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":15 * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 15, __pyx_L1_error) __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":14 * else: * return __pyx_unpickle_Enum, (type(self), 0xb068931, state) * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(self, __pyx_state) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":296 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ static void *__pyx_align_pointer(void *__pyx_v_memory, size_t __pyx_v_alignment) { Py_intptr_t __pyx_v_aligned_p; size_t __pyx_v_offset; void *__pyx_r; int __pyx_t_1; /* "View.MemoryView":298 * cdef void *align_pointer(void *memory, size_t alignment) nogil: * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory # <<<<<<<<<<<<<< * cdef size_t offset * */ __pyx_v_aligned_p = ((Py_intptr_t)__pyx_v_memory); /* "View.MemoryView":302 * * with cython.cdivision(True): * offset = aligned_p % alignment # <<<<<<<<<<<<<< * * if offset > 0: */ __pyx_v_offset = (__pyx_v_aligned_p % __pyx_v_alignment); /* "View.MemoryView":304 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ __pyx_t_1 = ((__pyx_v_offset > 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":305 * * if offset > 0: * aligned_p += alignment - offset # <<<<<<<<<<<<<< * * return <void *> aligned_p */ __pyx_v_aligned_p = (__pyx_v_aligned_p + (__pyx_v_alignment - __pyx_v_offset)); /* "View.MemoryView":304 * offset = aligned_p % alignment * * if offset > 0: # <<<<<<<<<<<<<< * aligned_p += alignment - offset * */ } /* "View.MemoryView":307 * aligned_p += alignment - offset * * return <void *> aligned_p # <<<<<<<<<<<<<< * * */ __pyx_r = ((void *)__pyx_v_aligned_p); goto __pyx_L0; /* "View.MemoryView":296 * * @cname('__pyx_align_pointer') * cdef void *align_pointer(void *memory, size_t alignment) nogil: # <<<<<<<<<<<<<< * "Align pointer memory on a given boundary" * cdef Py_intptr_t aligned_p = <Py_intptr_t> memory */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":343 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* Python wrapper */ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v_obj = 0; int __pyx_v_flags; int __pyx_v_dtype_is_object; int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_obj)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_flags)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 343, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (kw_args > 0) { PyObject* value = PyDict_GetItem(__pyx_kwds, __pyx_n_s_dtype_is_object); if (value) { values[2] = value; kw_args--; } } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 343, __pyx_L3_error) } } else { switch (PyTuple_GET_SIZE(__pyx_args)) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[0] = PyTuple_GET_ITEM(__pyx_args, 0); break; default: goto __pyx_L5_argtuple_error; } } __pyx_v_obj = values[0]; __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 343, __pyx_L3_error) if (values[2]) { __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 343, __pyx_L3_error) } else { __pyx_v_dtype_is_object = ((int)0); } } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 343, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return -1; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("__cinit__", 0); /* "View.MemoryView":344 * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj # <<<<<<<<<<<<<< * self.flags = flags * if type(self) is memoryview or obj is not None: */ __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); __Pyx_GOTREF(__pyx_v_self->obj); __Pyx_DECREF(__pyx_v_self->obj); __pyx_v_self->obj = __pyx_v_obj; /* "View.MemoryView":345 * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): * self.obj = obj * self.flags = flags # <<<<<<<<<<<<<< * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) */ __pyx_v_self->flags = __pyx_v_flags; /* "View.MemoryView":346 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); __pyx_t_3 = (__pyx_t_2 != 0); if (!__pyx_t_3) { } else { __pyx_t_1 = __pyx_t_3; goto __pyx_L4_bool_binop_done; } __pyx_t_3 = (__pyx_v_obj != Py_None); __pyx_t_2 = (__pyx_t_3 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L4_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":347 * self.flags = flags * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None */ __pyx_t_4 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 347, __pyx_L1_error) /* "View.MemoryView":348 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_t_1 = ((((PyObject *)__pyx_v_self->view.obj) == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":349 * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; /* "View.MemoryView":350 * if <PyObject *> self.view.obj == NULL: * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * global __pyx_memoryview_thread_locks_used */ Py_INCREF(Py_None); /* "View.MemoryView":348 * if type(self) is memoryview or obj is not None: * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: # <<<<<<<<<<<<<< * (<__pyx_buffer *> &self.view).obj = Py_None * Py_INCREF(Py_None) */ } /* "View.MemoryView":346 * self.obj = obj * self.flags = flags * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< * __Pyx_GetBuffer(obj, &self.view, flags) * if <PyObject *> self.view.obj == NULL: */ } /* "View.MemoryView":353 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ __pyx_t_1 = ((__pyx_memoryview_thread_locks_used < 8) != 0); if (__pyx_t_1) { /* "View.MemoryView":354 * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: */ __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); /* "View.MemoryView":355 * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< * if self.lock is NULL: * self.lock = PyThread_allocate_lock() */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); /* "View.MemoryView":353 * * global __pyx_memoryview_thread_locks_used * if __pyx_memoryview_thread_locks_used < THREAD_LOCKS_PREALLOCATED: # <<<<<<<<<<<<<< * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 */ } /* "View.MemoryView":356 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":357 * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< * if self.lock is NULL: * raise MemoryError */ __pyx_v_self->lock = PyThread_allocate_lock(); /* "View.MemoryView":358 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ __pyx_t_1 = ((__pyx_v_self->lock == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":359 * self.lock = PyThread_allocate_lock() * if self.lock is NULL: * raise MemoryError # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ PyErr_NoMemory(); __PYX_ERR(1, 359, __pyx_L1_error) /* "View.MemoryView":358 * if self.lock is NULL: * self.lock = PyThread_allocate_lock() * if self.lock is NULL: # <<<<<<<<<<<<<< * raise MemoryError * */ } /* "View.MemoryView":356 * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] * __pyx_memoryview_thread_locks_used += 1 * if self.lock is NULL: # <<<<<<<<<<<<<< * self.lock = PyThread_allocate_lock() * if self.lock is NULL: */ } /* "View.MemoryView":361 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":362 * * if flags & PyBUF_FORMAT: * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< * else: * self.dtype_is_object = dtype_is_object */ __pyx_t_2 = (((__pyx_v_self->view.format[0]) == 'O') != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L11_bool_binop_done; } __pyx_t_2 = (((__pyx_v_self->view.format[1]) == '\x00') != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_self->dtype_is_object = __pyx_t_1; /* "View.MemoryView":361 * raise MemoryError * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: */ goto __pyx_L10; } /* "View.MemoryView":364 * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') * else: * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( */ /*else*/ { __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; } __pyx_L10:; /* "View.MemoryView":366 * self.dtype_is_object = dtype_is_object * * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( # <<<<<<<<<<<<<< * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL */ __pyx_v_self->acquisition_count_aligned_p = ((__pyx_atomic_int *)__pyx_align_pointer(((void *)(&(__pyx_v_self->acquisition_count[0]))), (sizeof(__pyx_atomic_int)))); /* "View.MemoryView":368 * self.acquisition_count_aligned_p = <__pyx_atomic_int *> align_pointer( * <void *> &self.acquisition_count[0], sizeof(__pyx_atomic_int)) * self.typeinfo = NULL # <<<<<<<<<<<<<< * * def __dealloc__(memoryview self): */ __pyx_v_self->typeinfo = NULL; /* "View.MemoryView":343 * cdef __Pyx_TypeInfo *typeinfo * * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< * self.obj = obj * self.flags = flags */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":370 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* Python wrapper */ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { int __pyx_v_i; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; PyThread_type_lock __pyx_t_5; PyThread_type_lock __pyx_t_6; __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":371 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ __pyx_t_1 = (__pyx_v_self->obj != Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":372 * def __dealloc__(memoryview self): * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< * * cdef int i */ __Pyx_ReleaseBuffer((&__pyx_v_self->view)); /* "View.MemoryView":371 * * def __dealloc__(memoryview self): * if self.obj is not None: # <<<<<<<<<<<<<< * __Pyx_ReleaseBuffer(&self.view) * */ } /* "View.MemoryView":376 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ __pyx_t_2 = ((__pyx_v_self->lock != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":377 * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 */ __pyx_t_3 = __pyx_memoryview_thread_locks_used; for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { __pyx_v_i = __pyx_t_4; /* "View.MemoryView":378 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ __pyx_t_2 = (((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock) != 0); if (__pyx_t_2) { /* "View.MemoryView":379 * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( */ __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); /* "View.MemoryView":380 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ __pyx_t_2 = ((__pyx_v_i != __pyx_memoryview_thread_locks_used) != 0); if (__pyx_t_2) { /* "View.MemoryView":382 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< * break * else: */ __pyx_t_5 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_v_i]); /* "View.MemoryView":381 * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break */ (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_5; (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_6; /* "View.MemoryView":380 * if __pyx_memoryview_thread_locks[i] is self.lock: * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) */ } /* "View.MemoryView":383 * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) * break # <<<<<<<<<<<<<< * else: * PyThread_free_lock(self.lock) */ goto __pyx_L6_break; /* "View.MemoryView":378 * if self.lock != NULL: * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< * __pyx_memoryview_thread_locks_used -= 1 * if i != __pyx_memoryview_thread_locks_used: */ } } /*else*/ { /* "View.MemoryView":385 * break * else: * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< * * cdef char *get_item_pointer(memoryview self, object index) except NULL: */ PyThread_free_lock(__pyx_v_self->lock); } __pyx_L6_break:; /* "View.MemoryView":376 * cdef int i * global __pyx_memoryview_thread_locks_used * if self.lock != NULL: # <<<<<<<<<<<<<< * for i in range(__pyx_memoryview_thread_locks_used): * if __pyx_memoryview_thread_locks[i] is self.lock: */ } /* "View.MemoryView":370 * self.typeinfo = NULL * * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< * if self.obj is not None: * __Pyx_ReleaseBuffer(&self.view) */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":387 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { Py_ssize_t __pyx_v_dim; char *__pyx_v_itemp; PyObject *__pyx_v_idx = NULL; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t __pyx_t_3; PyObject *(*__pyx_t_4)(PyObject *); PyObject *__pyx_t_5 = NULL; Py_ssize_t __pyx_t_6; char *__pyx_t_7; __Pyx_RefNannySetupContext("get_item_pointer", 0); /* "View.MemoryView":389 * cdef char *get_item_pointer(memoryview self, object index) except NULL: * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf # <<<<<<<<<<<<<< * * for dim, idx in enumerate(index): */ __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); /* "View.MemoryView":391 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ __pyx_t_1 = 0; if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; __pyx_t_4 = NULL; } else { __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = Py_TYPE(__pyx_t_2)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 391, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_4)) { if (likely(PyList_CheckExact(__pyx_t_2))) { if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 391, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } else { if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely(0 < 0)) __PYX_ERR(1, 391, __pyx_L1_error) #else __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 391, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif } } else { __pyx_t_5 = __pyx_t_4(__pyx_t_2); if (unlikely(!__pyx_t_5)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 391, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_5); } __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); __pyx_t_5 = 0; __pyx_v_dim = __pyx_t_1; __pyx_t_1 = (__pyx_t_1 + 1); /* "View.MemoryView":392 * * for dim, idx in enumerate(index): * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< * * return itemp */ __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 392, __pyx_L1_error) __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 392, __pyx_L1_error) __pyx_v_itemp = __pyx_t_7; /* "View.MemoryView":391 * cdef char *itemp = <char *> self.view.buf * * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< * itemp = pybuffer_index(&self.view, itemp, idx, dim) * */ } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":394 * itemp = pybuffer_index(&self.view, itemp, idx, dim) * * return itemp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_itemp; goto __pyx_L0; /* "View.MemoryView":387 * PyThread_free_lock(self.lock) * * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< * cdef Py_ssize_t dim * cdef char *itemp = <char *> self.view.buf */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_idx); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":397 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* Python wrapper */ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_indices = NULL; char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; char *__pyx_t_6; __Pyx_RefNannySetupContext("__getitem__", 0); /* "View.MemoryView":398 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":399 * def __getitem__(memoryview self, object index): * if index is Ellipsis: * return self # <<<<<<<<<<<<<< * * have_slices, indices = _unellipsify(index, self.view.ndim) */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __pyx_r = ((PyObject *)__pyx_v_self); goto __pyx_L0; /* "View.MemoryView":398 * * def __getitem__(memoryview self, object index): * if index is Ellipsis: # <<<<<<<<<<<<<< * return self * */ } /* "View.MemoryView":401 * return self * * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * cdef char *itemp */ __pyx_t_3 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (likely(__pyx_t_3 != Py_None)) { PyObject* sequence = __pyx_t_3; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 401, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_4 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_5 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); #else __pyx_t_4 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 401, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); #endif __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 401, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_4; __pyx_t_4 = 0; __pyx_v_indices = __pyx_t_5; __pyx_t_5 = 0; /* "View.MemoryView":404 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ __pyx_t_2 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_2 < 0)) __PYX_ERR(1, 404, __pyx_L1_error) if (__pyx_t_2) { /* "View.MemoryView":405 * cdef char *itemp * if have_slices: * return memview_slice(self, indices) # <<<<<<<<<<<<<< * else: * itemp = self.get_item_pointer(indices) */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 405, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":404 * * cdef char *itemp * if have_slices: # <<<<<<<<<<<<<< * return memview_slice(self, indices) * else: */ } /* "View.MemoryView":407 * return memview_slice(self, indices) * else: * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< * return self.convert_item_to_object(itemp) * */ /*else*/ { __pyx_t_6 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_6 == ((char *)NULL))) __PYX_ERR(1, 407, __pyx_L1_error) __pyx_v_itemp = __pyx_t_6; /* "View.MemoryView":408 * else: * itemp = self.get_item_pointer(indices) * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< * * def __setitem__(memoryview self, object index, object value): */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 408, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":397 * * * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< * if index is Ellipsis: * return self */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_indices); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":410 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* Python wrapper */ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { PyObject *__pyx_v_have_slices = NULL; PyObject *__pyx_v_obj = NULL; int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; int __pyx_t_4; __Pyx_RefNannySetupContext("__setitem__", 0); __Pyx_INCREF(__pyx_v_index); /* "View.MemoryView":411 * * def __setitem__(memoryview self, object index, object value): * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< * * if have_slices: */ __pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (likely(__pyx_t_1 != Py_None)) { PyObject* sequence = __pyx_t_1; #if !CYTHON_COMPILING_IN_PYPY Py_ssize_t size = Py_SIZE(sequence); #else Py_ssize_t size = PySequence_Size(sequence); #endif if (unlikely(size != 2)) { if (size > 2) __Pyx_RaiseTooManyValuesError(2); else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); __PYX_ERR(1, 411, __pyx_L1_error) } #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_3); #else __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); #endif __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else { __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 411, __pyx_L1_error) } __pyx_v_have_slices = __pyx_t_2; __pyx_t_2 = 0; __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":413 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 413, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":414 * * if have_slices: * obj = self.is_slice(value) # <<<<<<<<<<<<<< * if obj: * self.setitem_slice_assignment(self[index], obj) */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 414, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_obj = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":415 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely(__pyx_t_4 < 0)) __PYX_ERR(1, 415, __pyx_L1_error) if (__pyx_t_4) { /* "View.MemoryView":416 * obj = self.is_slice(value) * if obj: * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< * else: * self.setitem_slice_assign_scalar(self[index], value) */ __pyx_t_1 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 416, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":415 * if have_slices: * obj = self.is_slice(value) * if obj: # <<<<<<<<<<<<<< * self.setitem_slice_assignment(self[index], obj) * else: */ goto __pyx_L4; } /* "View.MemoryView":418 * self.setitem_slice_assignment(self[index], obj) * else: * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< * else: * self.setitem_indexed(index, value) */ /*else*/ { __pyx_t_3 = PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 418, __pyx_L1_error) __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 418, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L4:; /* "View.MemoryView":413 * have_slices, index = _unellipsify(index, self.view.ndim) * * if have_slices: # <<<<<<<<<<<<<< * obj = self.is_slice(value) * if obj: */ goto __pyx_L3; } /* "View.MemoryView":420 * self.setitem_slice_assign_scalar(self[index], value) * else: * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< * * cdef is_slice(self, obj): */ /*else*/ { __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 420, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } __pyx_L3:; /* "View.MemoryView":410 * return self.convert_item_to_object(itemp) * * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< * have_slices, index = _unellipsify(index, self.view.ndim) * */ /* function exit code */ __pyx_r = 0; goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __pyx_L0:; __Pyx_XDECREF(__pyx_v_have_slices); __Pyx_XDECREF(__pyx_v_obj); __Pyx_XDECREF(__pyx_v_index); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":422 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; int __pyx_t_9; __Pyx_RefNannySetupContext("is_slice", 0); __Pyx_INCREF(__pyx_v_obj); /* "View.MemoryView":423 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":424 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); __Pyx_XGOTREF(__pyx_t_5); /*try:*/ { /* "View.MemoryView":425 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_6 = __Pyx_PyInt_From_int((__pyx_v_self->flags | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 425, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":426 * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) # <<<<<<<<<<<<<< * except TypeError: * return None */ __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 426, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); /* "View.MemoryView":425 * if not isinstance(obj, memoryview): * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< * self.dtype_is_object) * except TypeError: */ __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 425, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_8); __Pyx_INCREF(__pyx_v_obj); __Pyx_GIVEREF(__pyx_v_obj); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 425, __pyx_L4_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":424 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ } __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; goto __pyx_L9_try_end; __pyx_L4_error:; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":427 * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) * except TypeError: # <<<<<<<<<<<<<< * return None * */ __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); if (__pyx_t_9) { __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 427, __pyx_L6_except_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GOTREF(__pyx_t_8); __Pyx_GOTREF(__pyx_t_6); /* "View.MemoryView":428 * self.dtype_is_object) * except TypeError: * return None # <<<<<<<<<<<<<< * * return obj */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; goto __pyx_L7_except_return; } goto __pyx_L6_except_error; __pyx_L6_except_error:; /* "View.MemoryView":424 * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): * try: # <<<<<<<<<<<<<< * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, * self.dtype_is_object) */ __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L1_error; __pyx_L7_except_return:; __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_XGIVEREF(__pyx_t_5); __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); goto __pyx_L0; __pyx_L9_try_end:; } /* "View.MemoryView":423 * * cdef is_slice(self, obj): * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< * try: * obj = memoryview(obj, self.flags|PyBUF_ANY_CONTIGUOUS, */ } /* "View.MemoryView":430 * return None * * return obj # <<<<<<<<<<<<<< * * cdef setitem_slice_assignment(self, dst, src): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_obj); __pyx_r = __pyx_v_obj; goto __pyx_L0; /* "View.MemoryView":422 * self.setitem_indexed(index, value) * * cdef is_slice(self, obj): # <<<<<<<<<<<<<< * if not isinstance(obj, memoryview): * try: */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_obj); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":432 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { __Pyx_memviewslice __pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_src_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); /* "View.MemoryView":436 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 436, __pyx_L1_error) /* "View.MemoryView":437 * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], # <<<<<<<<<<<<<< * src.ndim, dst.ndim, self.dtype_is_object) * */ if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 437, __pyx_L1_error) /* "View.MemoryView":438 * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 438, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_2 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 438, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 438, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_1); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 438, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":436 * cdef __Pyx_memviewslice src_slice * * memoryview_copy_contents(get_slice_from_memview(src, &src_slice)[0], # <<<<<<<<<<<<<< * get_slice_from_memview(dst, &dst_slice)[0], * src.ndim, dst.ndim, self.dtype_is_object) */ __pyx_t_4 = __pyx_memoryview_copy_contents((__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice))[0]), (__pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice))[0]), __pyx_t_2, __pyx_t_3, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 436, __pyx_L1_error) /* "View.MemoryView":432 * return obj * * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice dst_slice * cdef __Pyx_memviewslice src_slice */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":440 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { int __pyx_v_array[0x80]; void *__pyx_v_tmp; void *__pyx_v_item; __Pyx_memviewslice *__pyx_v_dst_slice; __Pyx_memviewslice __pyx_v_tmp_slice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; int __pyx_t_3; int __pyx_t_4; char const *__pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; PyObject *__pyx_t_10 = NULL; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); /* "View.MemoryView":442 * cdef setitem_slice_assign_scalar(self, memoryview dst, value): * cdef int array[128] * cdef void *tmp = NULL # <<<<<<<<<<<<<< * cdef void *item * */ __pyx_v_tmp = NULL; /* "View.MemoryView":447 * cdef __Pyx_memviewslice *dst_slice * cdef __Pyx_memviewslice tmp_slice * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< * * if <size_t>self.view.itemsize > sizeof(array): */ __pyx_v_dst_slice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); /* "View.MemoryView":449 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ __pyx_t_1 = ((((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))) != 0); if (__pyx_t_1) { /* "View.MemoryView":450 * * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< * if tmp == NULL: * raise MemoryError */ __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); /* "View.MemoryView":451 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ __pyx_t_1 = ((__pyx_v_tmp == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":452 * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: * raise MemoryError # <<<<<<<<<<<<<< * item = tmp * else: */ PyErr_NoMemory(); __PYX_ERR(1, 452, __pyx_L1_error) /* "View.MemoryView":451 * if <size_t>self.view.itemsize > sizeof(array): * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: # <<<<<<<<<<<<<< * raise MemoryError * item = tmp */ } /* "View.MemoryView":453 * if tmp == NULL: * raise MemoryError * item = tmp # <<<<<<<<<<<<<< * else: * item = <void *> array */ __pyx_v_item = __pyx_v_tmp; /* "View.MemoryView":449 * dst_slice = get_slice_from_memview(dst, &tmp_slice) * * if <size_t>self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< * tmp = PyMem_Malloc(self.view.itemsize) * if tmp == NULL: */ goto __pyx_L3; } /* "View.MemoryView":455 * item = tmp * else: * item = <void *> array # <<<<<<<<<<<<<< * * try: */ /*else*/ { __pyx_v_item = ((void *)__pyx_v_array); } __pyx_L3:; /* "View.MemoryView":457 * item = <void *> array * * try: # <<<<<<<<<<<<<< * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value */ /*try:*/ { /* "View.MemoryView":458 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ __pyx_t_1 = (__pyx_v_self->dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":459 * try: * if self.dtype_is_object: * (<PyObject **> item)[0] = <PyObject *> value # <<<<<<<<<<<<<< * else: * self.assign_item_from_object(<char *> item, value) */ (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); /* "View.MemoryView":458 * * try: * if self.dtype_is_object: # <<<<<<<<<<<<<< * (<PyObject **> item)[0] = <PyObject *> value * else: */ goto __pyx_L8; } /* "View.MemoryView":461 * (<PyObject **> item)[0] = <PyObject *> value * else: * self.assign_item_from_object(<char *> item, value) # <<<<<<<<<<<<<< * * */ /*else*/ { __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 461, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_L8:; /* "View.MemoryView":465 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":466 * * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, * item, self.dtype_is_object) */ __pyx_t_2 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 466, __pyx_L6_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":465 * * * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, */ } /* "View.MemoryView":467 * if self.view.suboffsets != NULL: * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< * item, self.dtype_is_object) * finally: */ __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); } /* "View.MemoryView":470 * item, self.dtype_is_object) * finally: * PyMem_Free(tmp) # <<<<<<<<<<<<<< * * cdef setitem_indexed(self, index, value): */ /*finally:*/ { /*normal exit:*/{ PyMem_Free(__pyx_v_tmp); goto __pyx_L7; } __pyx_L6_error:; /*exception exit:*/{ __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8) < 0)) __Pyx_ErrFetch(&__pyx_t_6, &__pyx_t_7, &__pyx_t_8); __Pyx_XGOTREF(__pyx_t_6); __Pyx_XGOTREF(__pyx_t_7); __Pyx_XGOTREF(__pyx_t_8); __Pyx_XGOTREF(__pyx_t_9); __Pyx_XGOTREF(__pyx_t_10); __Pyx_XGOTREF(__pyx_t_11); __pyx_t_3 = __pyx_lineno; __pyx_t_4 = __pyx_clineno; __pyx_t_5 = __pyx_filename; { PyMem_Free(__pyx_v_tmp); } if (PY_MAJOR_VERSION >= 3) { __Pyx_XGIVEREF(__pyx_t_9); __Pyx_XGIVEREF(__pyx_t_10); __Pyx_XGIVEREF(__pyx_t_11); __Pyx_ExceptionReset(__pyx_t_9, __pyx_t_10, __pyx_t_11); } __Pyx_XGIVEREF(__pyx_t_6); __Pyx_XGIVEREF(__pyx_t_7); __Pyx_XGIVEREF(__pyx_t_8); __Pyx_ErrRestore(__pyx_t_6, __pyx_t_7, __pyx_t_8); __pyx_t_6 = 0; __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_4; __pyx_filename = __pyx_t_5; goto __pyx_L1_error; } __pyx_L7:; } /* "View.MemoryView":440 * src.ndim, dst.ndim, self.dtype_is_object) * * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< * cdef int array[128] * cdef void *tmp = NULL */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":472 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { char *__pyx_v_itemp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations char *__pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("setitem_indexed", 0); /* "View.MemoryView":473 * * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< * self.assign_item_from_object(itemp, value) * */ __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 473, __pyx_L1_error) __pyx_v_itemp = __pyx_t_1; /* "View.MemoryView":474 * cdef setitem_indexed(self, index, value): * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 474, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":472 * PyMem_Free(tmp) * * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< * cdef char *itemp = self.get_item_pointer(index) * self.assign_item_from_object(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":476 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_v_struct = NULL; PyObject *__pyx_v_bytesitem = 0; PyObject *__pyx_v_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; int __pyx_t_8; PyObject *__pyx_t_9 = NULL; size_t __pyx_t_10; int __pyx_t_11; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":479 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef bytes bytesitem * */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 479, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":482 * cdef bytes bytesitem * * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< * try: * result = struct.unpack(self.view.format, bytesitem) */ __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 482, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":483 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); __Pyx_XGOTREF(__pyx_t_2); __Pyx_XGOTREF(__pyx_t_3); __Pyx_XGOTREF(__pyx_t_4); /*try:*/ { /* "View.MemoryView":484 * bytesitem = itemp[:self.view.itemsize] * try: * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< * except struct.error: * raise ValueError("Unable to convert item to object") */ __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_5); __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = NULL; __pyx_t_8 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_5))) { __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); if (likely(__pyx_t_7)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); __Pyx_INCREF(__pyx_t_7); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_5, function); __pyx_t_8 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_5)) { PyObject *__pyx_temp[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_5, __pyx_temp+1-__pyx_t_8, 2+__pyx_t_8); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(2+__pyx_t_8); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_9); if (__pyx_t_7) { __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_7); __pyx_t_7 = NULL; } __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+__pyx_t_8, __pyx_t_6); __Pyx_INCREF(__pyx_v_bytesitem); __Pyx_GIVEREF(__pyx_v_bytesitem); PyTuple_SET_ITEM(__pyx_t_9, 1+__pyx_t_8, __pyx_v_bytesitem); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_5, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 484, __pyx_L3_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __pyx_v_result = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":483 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ } /* "View.MemoryView":488 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ /*else:*/ { __pyx_t_10 = strlen(__pyx_v_self->view.format); __pyx_t_11 = ((__pyx_t_10 == 1) != 0); if (__pyx_t_11) { /* "View.MemoryView":489 * else: * if len(self.view.format) == 1: * return result[0] # <<<<<<<<<<<<<< * return result * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 489, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L6_except_return; /* "View.MemoryView":488 * raise ValueError("Unable to convert item to object") * else: * if len(self.view.format) == 1: # <<<<<<<<<<<<<< * return result[0] * return result */ } /* "View.MemoryView":490 * if len(self.view.format) == 1: * return result[0] * return result # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_result); __pyx_r = __pyx_v_result; goto __pyx_L6_except_return; } __pyx_L3_error:; __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_XDECREF(__pyx_t_9); __pyx_t_9 = 0; __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":485 * try: * result = struct.unpack(self.view.format, bytesitem) * except struct.error: # <<<<<<<<<<<<<< * raise ValueError("Unable to convert item to object") * else: */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 485, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_8 = __Pyx_PyErr_ExceptionMatches(__pyx_t_1); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; if (__pyx_t_8) { __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_5, &__pyx_t_9) < 0) __PYX_ERR(1, 485, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GOTREF(__pyx_t_5); __Pyx_GOTREF(__pyx_t_9); /* "View.MemoryView":486 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 486, __pyx_L5_except_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_Raise(__pyx_t_6, 0, 0, 0); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __PYX_ERR(1, 486, __pyx_L5_except_error) } goto __pyx_L5_except_error; __pyx_L5_except_error:; /* "View.MemoryView":483 * * bytesitem = itemp[:self.view.itemsize] * try: # <<<<<<<<<<<<<< * result = struct.unpack(self.view.format, bytesitem) * except struct.error: */ __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L1_error; __pyx_L6_except_return:; __Pyx_XGIVEREF(__pyx_t_2); __Pyx_XGIVEREF(__pyx_t_3); __Pyx_XGIVEREF(__pyx_t_4); __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); goto __pyx_L0; } /* "View.MemoryView":476 * self.assign_item_from_object(itemp, value) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesitem); __Pyx_XDECREF(__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":492 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_v_struct = NULL; char __pyx_v_c; PyObject *__pyx_v_bytesvalue = 0; Py_ssize_t __pyx_v_i; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; int __pyx_t_3; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; PyObject *__pyx_t_8 = NULL; Py_ssize_t __pyx_t_9; PyObject *__pyx_t_10 = NULL; char *__pyx_t_11; char *__pyx_t_12; char *__pyx_t_13; char *__pyx_t_14; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":495 * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" * import struct # <<<<<<<<<<<<<< * cdef char c * cdef bytes bytesvalue */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_struct, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 495, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_v_struct = __pyx_t_1; __pyx_t_1 = 0; /* "View.MemoryView":500 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ __pyx_t_2 = PyTuple_Check(__pyx_v_value); __pyx_t_3 = (__pyx_t_2 != 0); if (__pyx_t_3) { /* "View.MemoryView":501 * * if isinstance(value, tuple): * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< * else: * bytesvalue = struct.pack(self.view.format, value) */ __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_5 = PyTuple_New(1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = PyNumber_Add(__pyx_t_5, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_6, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 501, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 501, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; /* "View.MemoryView":500 * cdef Py_ssize_t i * * if isinstance(value, tuple): # <<<<<<<<<<<<<< * bytesvalue = struct.pack(self.view.format, *value) * else: */ goto __pyx_L3; } /* "View.MemoryView":503 * bytesvalue = struct.pack(self.view.format, *value) * else: * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< * * for i, c in enumerate(bytesvalue): */ /*else*/ { __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_5 = NULL; __pyx_t_7 = 0; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_6); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_6, function); __pyx_t_7 = 1; } } #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_6)) { PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_1, __pyx_v_value}; __pyx_t_4 = __Pyx_PyCFunction_FastCall(__pyx_t_6, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; } else #endif { __pyx_t_8 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_8); if (__pyx_t_5) { __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_t_5); __pyx_t_5 = NULL; } __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_8, 0+__pyx_t_7, __pyx_t_1); __Pyx_INCREF(__pyx_v_value); __Pyx_GIVEREF(__pyx_v_value); PyTuple_SET_ITEM(__pyx_t_8, 1+__pyx_t_7, __pyx_v_value); __pyx_t_1 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_6, __pyx_t_8, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 503, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; } __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (!(likely(PyBytes_CheckExact(__pyx_t_4))||((__pyx_t_4) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "bytes", Py_TYPE(__pyx_t_4)->tp_name), 0))) __PYX_ERR(1, 503, __pyx_L1_error) __pyx_v_bytesvalue = ((PyObject*)__pyx_t_4); __pyx_t_4 = 0; } __pyx_L3:; /* "View.MemoryView":505 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = 0; if (unlikely(__pyx_v_bytesvalue == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); __PYX_ERR(1, 505, __pyx_L1_error) } __Pyx_INCREF(__pyx_v_bytesvalue); __pyx_t_10 = __pyx_v_bytesvalue; __pyx_t_12 = PyBytes_AS_STRING(__pyx_t_10); __pyx_t_13 = (__pyx_t_12 + PyBytes_GET_SIZE(__pyx_t_10)); for (__pyx_t_14 = __pyx_t_12; __pyx_t_14 < __pyx_t_13; __pyx_t_14++) { __pyx_t_11 = __pyx_t_14; __pyx_v_c = (__pyx_t_11[0]); /* "View.MemoryView":506 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ __pyx_v_i = __pyx_t_9; /* "View.MemoryView":505 * bytesvalue = struct.pack(self.view.format, value) * * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< * itemp[i] = c * */ __pyx_t_9 = (__pyx_t_9 + 1); /* "View.MemoryView":506 * * for i, c in enumerate(bytesvalue): * itemp[i] = c # <<<<<<<<<<<<<< * * @cname('getbuffer') */ (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; } __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; /* "View.MemoryView":492 * return result * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * """Only used if instantiated manually by the user, or if Cython doesn't * know how to convert the type""" */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_10); __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_struct); __Pyx_XDECREF(__pyx_v_bytesvalue); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":509 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* Python wrapper */ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ static CYTHON_UNUSED int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; Py_ssize_t *__pyx_t_2; char *__pyx_t_3; void *__pyx_t_4; int __pyx_t_5; Py_ssize_t __pyx_t_6; __Pyx_RefNannySetupContext("__getbuffer__", 0); if (__pyx_v_info != NULL) { __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); __Pyx_GIVEREF(__pyx_v_info->obj); } /* "View.MemoryView":510 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":511 * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: * info.shape = self.view.shape # <<<<<<<<<<<<<< * else: * info.shape = NULL */ __pyx_t_2 = __pyx_v_self->view.shape; __pyx_v_info->shape = __pyx_t_2; /* "View.MemoryView":510 * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.shape = self.view.shape * else: */ goto __pyx_L3; } /* "View.MemoryView":513 * info.shape = self.view.shape * else: * info.shape = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_STRIDES: */ /*else*/ { __pyx_v_info->shape = NULL; } __pyx_L3:; /* "View.MemoryView":515 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); if (__pyx_t_1) { /* "View.MemoryView":516 * * if flags & PyBUF_STRIDES: * info.strides = self.view.strides # <<<<<<<<<<<<<< * else: * info.strides = NULL */ __pyx_t_2 = __pyx_v_self->view.strides; __pyx_v_info->strides = __pyx_t_2; /* "View.MemoryView":515 * info.shape = NULL * * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< * info.strides = self.view.strides * else: */ goto __pyx_L4; } /* "View.MemoryView":518 * info.strides = self.view.strides * else: * info.strides = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_INDIRECT: */ /*else*/ { __pyx_v_info->strides = NULL; } __pyx_L4:; /* "View.MemoryView":520 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); if (__pyx_t_1) { /* "View.MemoryView":521 * * if flags & PyBUF_INDIRECT: * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< * else: * info.suboffsets = NULL */ __pyx_t_2 = __pyx_v_self->view.suboffsets; __pyx_v_info->suboffsets = __pyx_t_2; /* "View.MemoryView":520 * info.strides = NULL * * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< * info.suboffsets = self.view.suboffsets * else: */ goto __pyx_L5; } /* "View.MemoryView":523 * info.suboffsets = self.view.suboffsets * else: * info.suboffsets = NULL # <<<<<<<<<<<<<< * * if flags & PyBUF_FORMAT: */ /*else*/ { __pyx_v_info->suboffsets = NULL; } __pyx_L5:; /* "View.MemoryView":525 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); if (__pyx_t_1) { /* "View.MemoryView":526 * * if flags & PyBUF_FORMAT: * info.format = self.view.format # <<<<<<<<<<<<<< * else: * info.format = NULL */ __pyx_t_3 = __pyx_v_self->view.format; __pyx_v_info->format = __pyx_t_3; /* "View.MemoryView":525 * info.suboffsets = NULL * * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< * info.format = self.view.format * else: */ goto __pyx_L6; } /* "View.MemoryView":528 * info.format = self.view.format * else: * info.format = NULL # <<<<<<<<<<<<<< * * info.buf = self.view.buf */ /*else*/ { __pyx_v_info->format = NULL; } __pyx_L6:; /* "View.MemoryView":530 * info.format = NULL * * info.buf = self.view.buf # <<<<<<<<<<<<<< * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize */ __pyx_t_4 = __pyx_v_self->view.buf; __pyx_v_info->buf = __pyx_t_4; /* "View.MemoryView":531 * * info.buf = self.view.buf * info.ndim = self.view.ndim # <<<<<<<<<<<<<< * info.itemsize = self.view.itemsize * info.len = self.view.len */ __pyx_t_5 = __pyx_v_self->view.ndim; __pyx_v_info->ndim = __pyx_t_5; /* "View.MemoryView":532 * info.buf = self.view.buf * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< * info.len = self.view.len * info.readonly = 0 */ __pyx_t_6 = __pyx_v_self->view.itemsize; __pyx_v_info->itemsize = __pyx_t_6; /* "View.MemoryView":533 * info.ndim = self.view.ndim * info.itemsize = self.view.itemsize * info.len = self.view.len # <<<<<<<<<<<<<< * info.readonly = 0 * info.obj = self */ __pyx_t_6 = __pyx_v_self->view.len; __pyx_v_info->len = __pyx_t_6; /* "View.MemoryView":534 * info.itemsize = self.view.itemsize * info.len = self.view.len * info.readonly = 0 # <<<<<<<<<<<<<< * info.obj = self * */ __pyx_v_info->readonly = 0; /* "View.MemoryView":535 * info.len = self.view.len * info.readonly = 0 * info.obj = self # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); __Pyx_GOTREF(__pyx_v_info->obj); __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = ((PyObject *)__pyx_v_self); /* "View.MemoryView":509 * * @cname('getbuffer') * def __getbuffer__(self, Py_buffer *info, int flags): # <<<<<<<<<<<<<< * if flags & PyBUF_STRIDES: * info.shape = self.view.shape */ /* function exit code */ __pyx_r = 0; if (__pyx_v_info != NULL && __pyx_v_info->obj == Py_None) { __Pyx_GOTREF(Py_None); __Pyx_DECREF(Py_None); __pyx_v_info->obj = NULL; } __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":541 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":542 * @property * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< * transpose_memslice(&result.from_slice) * return result */ __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 542, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 542, __pyx_L1_error) __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":543 * def T(self): * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< * return result * */ __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 543, __pyx_L1_error) /* "View.MemoryView":544 * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) * return result # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":541 * * @property * def T(self): # <<<<<<<<<<<<<< * cdef _memoryviewslice result = memoryview_copy(self) * transpose_memslice(&result.from_slice) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":547 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":548 * @property * def base(self): * return self.obj # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->obj); __pyx_r = __pyx_v_self->obj; goto __pyx_L0; /* "View.MemoryView":547 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.obj * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":551 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_length; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":552 * @property * def shape(self): * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 552, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { __pyx_t_2 = __pyx_t_4; __pyx_v_length = (__pyx_t_2[0]); __pyx_t_5 = PyInt_FromSsize_t(__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 552, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 552, __pyx_L1_error) __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 552, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":551 * * @property * def shape(self): # <<<<<<<<<<<<<< * return tuple([length for length in self.view.shape[:self.view.ndim]]) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":555 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_stride; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":556 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ __pyx_t_1 = ((__pyx_v_self->view.strides == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":558 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 558, __pyx_L1_error) /* "View.MemoryView":556 * @property * def strides(self): * if self.view.strides == NULL: # <<<<<<<<<<<<<< * * raise ValueError("Buffer view does not expose strides") */ } /* "View.MemoryView":560 * raise ValueError("Buffer view does not expose strides") * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_v_stride = (__pyx_t_3[0]); __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 560, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_6; __pyx_t_6 = 0; goto __pyx_L0; /* "View.MemoryView":555 * * @property * def strides(self): # <<<<<<<<<<<<<< * if self.view.strides == NULL: * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":563 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; Py_ssize_t *__pyx_t_6; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":564 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ __pyx_t_1 = ((__pyx_v_self->view.suboffsets == NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":565 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_tuple__11, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":564 * @property * def suboffsets(self): * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< * return (-1,) * self.view.ndim * */ } /* "View.MemoryView":567 * return (-1,) * self.view.ndim * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 567, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_5 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); for (__pyx_t_6 = __pyx_v_self->view.suboffsets; __pyx_t_6 < __pyx_t_5; __pyx_t_6++) { __pyx_t_4 = __pyx_t_6; __pyx_v_suboffset = (__pyx_t_4[0]); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_suboffset); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 567, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); if (unlikely(__Pyx_ListComp_Append(__pyx_t_3, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 567, __pyx_L1_error) __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; } __pyx_t_2 = PyList_AsTuple(((PyObject*)__pyx_t_3)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 567, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":563 * * @property * def suboffsets(self): # <<<<<<<<<<<<<< * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":570 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":571 * @property * def ndim(self): * return self.view.ndim # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 571, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":570 * * @property * def ndim(self): # <<<<<<<<<<<<<< * return self.view.ndim * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":574 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":575 * @property * def itemsize(self): * return self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 575, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":574 * * @property * def itemsize(self): # <<<<<<<<<<<<<< * return self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":578 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":579 * @property * def nbytes(self): * return self.size * self.view.itemsize # <<<<<<<<<<<<<< * * @property */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 579, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":578 * * @property * def nbytes(self): # <<<<<<<<<<<<<< * return self.size * self.view.itemsize * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":582 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_v_result = NULL; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; Py_ssize_t *__pyx_t_3; Py_ssize_t *__pyx_t_4; Py_ssize_t *__pyx_t_5; PyObject *__pyx_t_6 = NULL; __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":583 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ __pyx_t_1 = (__pyx_v_self->_size == Py_None); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":584 * def size(self): * if self._size is None: * result = 1 # <<<<<<<<<<<<<< * * for length in self.view.shape[:self.view.ndim]: */ __Pyx_INCREF(__pyx_int_1); __pyx_v_result = __pyx_int_1; /* "View.MemoryView":586 * result = 1 * * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< * result *= length * */ __pyx_t_4 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); for (__pyx_t_5 = __pyx_v_self->view.shape; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { __pyx_t_3 = __pyx_t_5; __pyx_t_6 = PyInt_FromSsize_t((__pyx_t_3[0])); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 586, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_6); __pyx_t_6 = 0; /* "View.MemoryView":587 * * for length in self.view.shape[:self.view.ndim]: * result *= length # <<<<<<<<<<<<<< * * self._size = result */ __pyx_t_6 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 587, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_6); __pyx_t_6 = 0; } /* "View.MemoryView":589 * result *= length * * self._size = result # <<<<<<<<<<<<<< * * return self._size */ __Pyx_INCREF(__pyx_v_result); __Pyx_GIVEREF(__pyx_v_result); __Pyx_GOTREF(__pyx_v_self->_size); __Pyx_DECREF(__pyx_v_self->_size); __pyx_v_self->_size = __pyx_v_result; /* "View.MemoryView":583 * @property * def size(self): * if self._size is None: # <<<<<<<<<<<<<< * result = 1 * */ } /* "View.MemoryView":591 * self._size = result * * return self._size # <<<<<<<<<<<<<< * * def __len__(self): */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->_size); __pyx_r = __pyx_v_self->_size; goto __pyx_L0; /* "View.MemoryView":582 * * @property * def size(self): # <<<<<<<<<<<<<< * if self._size is None: * result = 1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":593 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* Python wrapper */ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { Py_ssize_t __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("__len__", 0); /* "View.MemoryView":594 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ __pyx_t_1 = ((__pyx_v_self->view.ndim >= 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":595 * def __len__(self): * if self.view.ndim >= 1: * return self.view.shape[0] # <<<<<<<<<<<<<< * * return 0 */ __pyx_r = (__pyx_v_self->view.shape[0]); goto __pyx_L0; /* "View.MemoryView":594 * * def __len__(self): * if self.view.ndim >= 1: # <<<<<<<<<<<<<< * return self.view.shape[0] * */ } /* "View.MemoryView":597 * return self.view.shape[0] * * return 0 # <<<<<<<<<<<<<< * * def __repr__(self): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":593 * return self._size * * def __len__(self): # <<<<<<<<<<<<<< * if self.view.ndim >= 1: * return self.view.shape[0] */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":599 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* Python wrapper */ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("__repr__", 0); /* "View.MemoryView":600 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":601 * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) # <<<<<<<<<<<<<< * * def __str__(self): */ __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 601, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(((PyObject *)__pyx_v_self)); __Pyx_GIVEREF(((PyObject *)__pyx_v_self)); PyTuple_SET_ITEM(__pyx_t_2, 0, ((PyObject *)__pyx_v_self)); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_id, __pyx_t_2, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 601, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":600 * * def __repr__(self): * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< * id(self)) * */ __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 600, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_3; __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":599 * return 0 * * def __repr__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r at 0x%x>" % (self.base.__class__.__name__, * id(self)) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":603 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* Python wrapper */ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("__str__", 0); /* "View.MemoryView":604 * * def __str__(self): * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 604, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 604, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 604, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 604, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1); __pyx_t_1 = 0; __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 604, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":603 * id(self)) * * def __str__(self): # <<<<<<<<<<<<<< * return "<MemoryView of %r object>" % (self.base.__class__.__name__,) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":607 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_c_contig", 0); /* "View.MemoryView":610 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'C', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":611 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< * * def is_f_contig(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 611, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":607 * * * def is_c_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":613 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* Python wrapper */ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice *__pyx_v_mslice; __Pyx_memviewslice __pyx_v_tmp; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("is_f_contig", 0); /* "View.MemoryView":616 * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< * return slice_is_contig(mslice[0], 'F', self.view.ndim) * */ __pyx_v_mslice = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); /* "View.MemoryView":617 * cdef __Pyx_memviewslice tmp * mslice = get_slice_from_memview(self, &tmp) * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< * * def copy(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 617, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":613 * return slice_is_contig(mslice[0], 'C', self.view.ndim) * * def is_f_contig(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice *mslice * cdef __Pyx_memviewslice tmp */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":619 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_mslice; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy", 0); /* "View.MemoryView":621 * def copy(self): * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &mslice) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); /* "View.MemoryView":623 * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS * * slice_copy(self, &mslice) # <<<<<<<<<<<<<< * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); /* "View.MemoryView":624 * * slice_copy(self, &mslice) * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_C_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 624, __pyx_L1_error) __pyx_v_mslice = __pyx_t_1; /* "View.MemoryView":629 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< * * def copy_fortran(self): */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 629, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":619 * return slice_is_contig(mslice[0], 'F', self.view.ndim) * * def copy(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice mslice * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":631 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* Python wrapper */ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; int __pyx_v_flags; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_memviewslice __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("copy_fortran", 0); /* "View.MemoryView":633 * def copy_fortran(self): * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< * * slice_copy(self, &src) */ __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); /* "View.MemoryView":635 * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS * * slice_copy(self, &src) # <<<<<<<<<<<<<< * dst = slice_copy_contig(&src, "fortran", self.view.ndim, * self.view.itemsize, */ __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); /* "View.MemoryView":636 * * slice_copy(self, &src) * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< * self.view.itemsize, * flags|PyBUF_F_CONTIGUOUS, */ __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 636, __pyx_L1_error) __pyx_v_dst = __pyx_t_1; /* "View.MemoryView":641 * self.dtype_is_object) * * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 641, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":631 * return memoryview_copy_from_slice(self, &mslice) * * def copy_fortran(self): # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice src, dst * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":645 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { struct __pyx_memoryview_obj *__pyx_v_result = 0; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); /* "View.MemoryView":646 * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< * result.typeinfo = typeinfo * return result */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_o); __Pyx_GIVEREF(__pyx_v_o); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 646, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":647 * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo # <<<<<<<<<<<<<< * return result * */ __pyx_v_result->typeinfo = __pyx_v_typeinfo; /* "View.MemoryView":648 * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_check') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":645 * * @cname('__pyx_memoryview_new') * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< * cdef memoryview result = memoryview(o, flags, dtype_is_object) * result.typeinfo = typeinfo */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":651 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; __Pyx_RefNannySetupContext("memoryview_check", 0); /* "View.MemoryView":652 * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): * return isinstance(o, memoryview) # <<<<<<<<<<<<<< * * cdef tuple _unellipsify(object index, int ndim): */ __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); __pyx_r = __pyx_t_1; goto __pyx_L0; /* "View.MemoryView":651 * * @cname('__pyx_memoryview_check') * cdef inline bint memoryview_check(object o): # <<<<<<<<<<<<<< * return isinstance(o, memoryview) * */ /* function exit code */ __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":654 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { PyObject *__pyx_v_tup = NULL; PyObject *__pyx_v_result = NULL; int __pyx_v_have_slices; int __pyx_v_seen_ellipsis; CYTHON_UNUSED PyObject *__pyx_v_idx = NULL; PyObject *__pyx_v_item = NULL; Py_ssize_t __pyx_v_nslices; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; Py_ssize_t __pyx_t_5; PyObject *(*__pyx_t_6)(PyObject *); PyObject *__pyx_t_7 = NULL; Py_ssize_t __pyx_t_8; int __pyx_t_9; int __pyx_t_10; PyObject *__pyx_t_11 = NULL; __Pyx_RefNannySetupContext("_unellipsify", 0); /* "View.MemoryView":659 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ __pyx_t_1 = PyTuple_Check(__pyx_v_index); __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":660 * """ * if not isinstance(index, tuple): * tup = (index,) # <<<<<<<<<<<<<< * else: * tup = index */ __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 660, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_index); __Pyx_GIVEREF(__pyx_v_index); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index); __pyx_v_tup = __pyx_t_3; __pyx_t_3 = 0; /* "View.MemoryView":659 * full slices. * """ * if not isinstance(index, tuple): # <<<<<<<<<<<<<< * tup = (index,) * else: */ goto __pyx_L3; } /* "View.MemoryView":662 * tup = (index,) * else: * tup = index # <<<<<<<<<<<<<< * * result = [] */ /*else*/ { __Pyx_INCREF(__pyx_v_index); __pyx_v_tup = __pyx_v_index; } __pyx_L3:; /* "View.MemoryView":664 * tup = index * * result = [] # <<<<<<<<<<<<<< * have_slices = False * seen_ellipsis = False */ __pyx_t_3 = PyList_New(0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 664, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_v_result = ((PyObject*)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":665 * * result = [] * have_slices = False # <<<<<<<<<<<<<< * seen_ellipsis = False * for idx, item in enumerate(tup): */ __pyx_v_have_slices = 0; /* "View.MemoryView":666 * result = [] * have_slices = False * seen_ellipsis = False # <<<<<<<<<<<<<< * for idx, item in enumerate(tup): * if item is Ellipsis: */ __pyx_v_seen_ellipsis = 0; /* "View.MemoryView":667 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ __Pyx_INCREF(__pyx_int_0); __pyx_t_3 = __pyx_int_0; if (likely(PyList_CheckExact(__pyx_v_tup)) || PyTuple_CheckExact(__pyx_v_tup)) { __pyx_t_4 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_4); __pyx_t_5 = 0; __pyx_t_6 = NULL; } else { __pyx_t_5 = -1; __pyx_t_4 = PyObject_GetIter(__pyx_v_tup); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 667, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_6 = Py_TYPE(__pyx_t_4)->tp_iternext; if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 667, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_6)) { if (likely(PyList_CheckExact(__pyx_t_4))) { if (__pyx_t_5 >= PyList_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyList_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 667, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 667, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } else { if (__pyx_t_5 >= PyTuple_GET_SIZE(__pyx_t_4)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_7 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_5); __Pyx_INCREF(__pyx_t_7); __pyx_t_5++; if (unlikely(0 < 0)) __PYX_ERR(1, 667, __pyx_L1_error) #else __pyx_t_7 = PySequence_ITEM(__pyx_t_4, __pyx_t_5); __pyx_t_5++; if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 667, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); #endif } } else { __pyx_t_7 = __pyx_t_6(__pyx_t_4); if (unlikely(!__pyx_t_7)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 667, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_7); } __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_7); __pyx_t_7 = 0; __Pyx_INCREF(__pyx_t_3); __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_3); __pyx_t_7 = __Pyx_PyInt_AddObjC(__pyx_t_3, __pyx_int_1, 1, 0); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 667, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = __pyx_t_7; __pyx_t_7 = 0; /* "View.MemoryView":668 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":669 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ __pyx_t_1 = ((!(__pyx_v_seen_ellipsis != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":670 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_t_8 = PyObject_Length(__pyx_v_tup); if (unlikely(__pyx_t_8 == ((Py_ssize_t)-1))) __PYX_ERR(1, 670, __pyx_L1_error) __pyx_t_7 = PyList_New(1 * ((((__pyx_v_ndim - __pyx_t_8) + 1)<0) ? 0:((__pyx_v_ndim - __pyx_t_8) + 1))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 670, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < ((__pyx_v_ndim - __pyx_t_8) + 1); __pyx_temp++) { __Pyx_INCREF(__pyx_slice__14); __Pyx_GIVEREF(__pyx_slice__14); PyList_SET_ITEM(__pyx_t_7, __pyx_temp, __pyx_slice__14); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_7); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 670, __pyx_L1_error) __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; /* "View.MemoryView":671 * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True # <<<<<<<<<<<<<< * else: * result.append(slice(None)) */ __pyx_v_seen_ellipsis = 1; /* "View.MemoryView":669 * for idx, item in enumerate(tup): * if item is Ellipsis: * if not seen_ellipsis: # <<<<<<<<<<<<<< * result.extend([slice(None)] * (ndim - len(tup) + 1)) * seen_ellipsis = True */ goto __pyx_L7; } /* "View.MemoryView":673 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ /*else*/ { __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_slice__15); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 673, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":674 * else: * result.append(slice(None)) * have_slices = True # <<<<<<<<<<<<<< * else: * if not isinstance(item, slice) and not PyIndex_Check(item): */ __pyx_v_have_slices = 1; /* "View.MemoryView":668 * seen_ellipsis = False * for idx, item in enumerate(tup): * if item is Ellipsis: # <<<<<<<<<<<<<< * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) */ goto __pyx_L6; } /* "View.MemoryView":676 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ /*else*/ { __pyx_t_2 = PySlice_Check(__pyx_v_item); __pyx_t_10 = ((!(__pyx_t_2 != 0)) != 0); if (__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = ((!(PyIndex_Check(__pyx_v_item) != 0)) != 0); __pyx_t_1 = __pyx_t_10; __pyx_L9_bool_binop_done:; if (__pyx_t_1) { /* "View.MemoryView":677 * else: * if not isinstance(item, slice) and not PyIndex_Check(item): * raise TypeError("Cannot index with type '%s'" % type(item)) # <<<<<<<<<<<<<< * * have_slices = have_slices or isinstance(item, slice) */ __pyx_t_7 = __Pyx_PyString_Format(__pyx_kp_s_Cannot_index_with_type_s, ((PyObject *)Py_TYPE(__pyx_v_item))); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 677, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __pyx_t_11 = PyTuple_New(1); if (unlikely(!__pyx_t_11)) __PYX_ERR(1, 677, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_11); __Pyx_GIVEREF(__pyx_t_7); PyTuple_SET_ITEM(__pyx_t_11, 0, __pyx_t_7); __pyx_t_7 = 0; __pyx_t_7 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_t_11, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 677, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; __Pyx_Raise(__pyx_t_7, 0, 0, 0); __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __PYX_ERR(1, 677, __pyx_L1_error) /* "View.MemoryView":676 * have_slices = True * else: * if not isinstance(item, slice) and not PyIndex_Check(item): # <<<<<<<<<<<<<< * raise TypeError("Cannot index with type '%s'" % type(item)) * */ } /* "View.MemoryView":679 * raise TypeError("Cannot index with type '%s'" % type(item)) * * have_slices = have_slices or isinstance(item, slice) # <<<<<<<<<<<<<< * result.append(item) * */ __pyx_t_10 = (__pyx_v_have_slices != 0); if (!__pyx_t_10) { } else { __pyx_t_1 = __pyx_t_10; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = PySlice_Check(__pyx_v_item); __pyx_t_2 = (__pyx_t_10 != 0); __pyx_t_1 = __pyx_t_2; __pyx_L11_bool_binop_done:; __pyx_v_have_slices = __pyx_t_1; /* "View.MemoryView":680 * * have_slices = have_slices or isinstance(item, slice) * result.append(item) # <<<<<<<<<<<<<< * * nslices = ndim - len(result) */ __pyx_t_9 = __Pyx_PyList_Append(__pyx_v_result, __pyx_v_item); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 680, __pyx_L1_error) } __pyx_L6:; /* "View.MemoryView":667 * have_slices = False * seen_ellipsis = False * for idx, item in enumerate(tup): # <<<<<<<<<<<<<< * if item is Ellipsis: * if not seen_ellipsis: */ } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":682 * result.append(item) * * nslices = ndim - len(result) # <<<<<<<<<<<<<< * if nslices: * result.extend([slice(None)] * nslices) */ __pyx_t_5 = PyList_GET_SIZE(__pyx_v_result); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 682, __pyx_L1_error) __pyx_v_nslices = (__pyx_v_ndim - __pyx_t_5); /* "View.MemoryView":683 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ __pyx_t_1 = (__pyx_v_nslices != 0); if (__pyx_t_1) { /* "View.MemoryView":684 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_t_3 = PyList_New(1 * ((__pyx_v_nslices<0) ? 0:__pyx_v_nslices)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); { Py_ssize_t __pyx_temp; for (__pyx_temp=0; __pyx_temp < __pyx_v_nslices; __pyx_temp++) { __Pyx_INCREF(__pyx_slice__16); __Pyx_GIVEREF(__pyx_slice__16); PyList_SET_ITEM(__pyx_t_3, __pyx_temp, __pyx_slice__16); } } __pyx_t_9 = __Pyx_PyList_Extend(__pyx_v_result, __pyx_t_3); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":683 * * nslices = ndim - len(result) * if nslices: # <<<<<<<<<<<<<< * result.extend([slice(None)] * nslices) * */ } /* "View.MemoryView":686 * result.extend([slice(None)] * nslices) * * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): */ __Pyx_XDECREF(__pyx_r); if (!__pyx_v_have_slices) { } else { __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 686, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; goto __pyx_L14_bool_binop_done; } __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 686, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __pyx_t_4; __pyx_t_4 = 0; __pyx_L14_bool_binop_done:; __pyx_t_4 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 686, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_7 = PyTuple_New(2); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 686, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_7, 1, __pyx_t_4); __pyx_t_3 = 0; __pyx_t_4 = 0; __pyx_r = ((PyObject*)__pyx_t_7); __pyx_t_7 = 0; goto __pyx_L0; /* "View.MemoryView":654 * return isinstance(o, memoryview) * * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< * """ * Replace all ellipses with full slices and fill incomplete indices with */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_11); __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF(__pyx_v_tup); __Pyx_XDECREF(__pyx_v_result); __Pyx_XDECREF(__pyx_v_idx); __Pyx_XDECREF(__pyx_v_item); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":688 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ static PyObject *assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; Py_ssize_t *__pyx_t_2; Py_ssize_t *__pyx_t_3; int __pyx_t_4; PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); /* "View.MemoryView":689 * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") */ __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { __pyx_t_1 = __pyx_t_3; __pyx_v_suboffset = (__pyx_t_1[0]); /* "View.MemoryView":690 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ __pyx_t_4 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_4) { /* "View.MemoryView":691 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 691, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_Raise(__pyx_t_5, 0, 0, 0); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; __PYX_ERR(1, 691, __pyx_L1_error) /* "View.MemoryView":690 * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * raise ValueError("Indirect dimensions not supported") * */ } } /* "View.MemoryView":688 * return have_slices or nslices, tuple(result) * * cdef assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim): # <<<<<<<<<<<<<< * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":698 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { int __pyx_v_new_ndim; int __pyx_v_suboffset_dim; int __pyx_v_dim; __Pyx_memviewslice __pyx_v_src; __Pyx_memviewslice __pyx_v_dst; __Pyx_memviewslice *__pyx_v_p_src; struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; __Pyx_memviewslice *__pyx_v_p_dst; int *__pyx_v_p_suboffset_dim; Py_ssize_t __pyx_v_start; Py_ssize_t __pyx_v_stop; Py_ssize_t __pyx_v_step; int __pyx_v_have_start; int __pyx_v_have_stop; int __pyx_v_have_step; PyObject *__pyx_v_index = NULL; struct __pyx_memoryview_obj *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; struct __pyx_memoryview_obj *__pyx_t_4; char *__pyx_t_5; int __pyx_t_6; Py_ssize_t __pyx_t_7; PyObject *(*__pyx_t_8)(PyObject *); PyObject *__pyx_t_9 = NULL; Py_ssize_t __pyx_t_10; int __pyx_t_11; Py_ssize_t __pyx_t_12; __Pyx_RefNannySetupContext("memview_slice", 0); /* "View.MemoryView":699 * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< * cdef bint negative_step * cdef __Pyx_memviewslice src, dst */ __pyx_v_new_ndim = 0; __pyx_v_suboffset_dim = -1; /* "View.MemoryView":706 * * * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< * * cdef _memoryviewslice memviewsliceobj */ memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst))); /* "View.MemoryView":710 * cdef _memoryviewslice memviewsliceobj * * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ #ifndef CYTHON_WITHOUT_ASSERTIONS if (unlikely(!Py_OptimizeFlag)) { if (unlikely(!((__pyx_v_memview->view.ndim > 0) != 0))) { PyErr_SetNone(PyExc_AssertionError); __PYX_ERR(1, 710, __pyx_L1_error) } } #endif /* "View.MemoryView":712 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":713 * * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview # <<<<<<<<<<<<<< * p_src = &memviewsliceobj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 713, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":714 * if isinstance(memview, _memoryviewslice): * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, &src) */ __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); /* "View.MemoryView":712 * assert memview.view.ndim > 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * memviewsliceobj = memview * p_src = &memviewsliceobj.from_slice */ goto __pyx_L3; } /* "View.MemoryView":716 * p_src = &memviewsliceobj.from_slice * else: * slice_copy(memview, &src) # <<<<<<<<<<<<<< * p_src = &src * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); /* "View.MemoryView":717 * else: * slice_copy(memview, &src) * p_src = &src # <<<<<<<<<<<<<< * * */ __pyx_v_p_src = (&__pyx_v_src); } __pyx_L3:; /* "View.MemoryView":723 * * * dst.memview = p_src.memview # <<<<<<<<<<<<<< * dst.data = p_src.data * */ __pyx_t_4 = __pyx_v_p_src->memview; __pyx_v_dst.memview = __pyx_t_4; /* "View.MemoryView":724 * * dst.memview = p_src.memview * dst.data = p_src.data # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_v_p_src->data; __pyx_v_dst.data = __pyx_t_5; /* "View.MemoryView":729 * * * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< * cdef int *p_suboffset_dim = &suboffset_dim * cdef Py_ssize_t start, stop, step */ __pyx_v_p_dst = (&__pyx_v_dst); /* "View.MemoryView":730 * * cdef __Pyx_memviewslice *p_dst = &dst * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< * cdef Py_ssize_t start, stop, step * cdef bint have_start, have_stop, have_step */ __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); /* "View.MemoryView":734 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ __pyx_t_6 = 0; if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { __pyx_t_3 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_3); __pyx_t_7 = 0; __pyx_t_8 = NULL; } else { __pyx_t_7 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 734, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_8 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 734, __pyx_L1_error) } for (;;) { if (likely(!__pyx_t_8)) { if (likely(PyList_CheckExact(__pyx_t_3))) { if (__pyx_t_7 >= PyList_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 734, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 734, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } else { if (__pyx_t_7 >= PyTuple_GET_SIZE(__pyx_t_3)) break; #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS __pyx_t_9 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_7); __Pyx_INCREF(__pyx_t_9); __pyx_t_7++; if (unlikely(0 < 0)) __PYX_ERR(1, 734, __pyx_L1_error) #else __pyx_t_9 = PySequence_ITEM(__pyx_t_3, __pyx_t_7); __pyx_t_7++; if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 734, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); #endif } } else { __pyx_t_9 = __pyx_t_8(__pyx_t_3); if (unlikely(!__pyx_t_9)) { PyObject* exc_type = PyErr_Occurred(); if (exc_type) { if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); else __PYX_ERR(1, 734, __pyx_L1_error) } break; } __Pyx_GOTREF(__pyx_t_9); } __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_9); __pyx_t_9 = 0; __pyx_v_dim = __pyx_t_6; __pyx_t_6 = (__pyx_t_6 + 1); /* "View.MemoryView":735 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ __pyx_t_2 = (PyIndex_Check(__pyx_v_index) != 0); if (__pyx_t_2) { /* "View.MemoryView":739 * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, * index, 0, 0, # start, stop, step # <<<<<<<<<<<<<< * 0, 0, 0, # have_{start,stop,step} * False) */ __pyx_t_10 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_10 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 739, __pyx_L1_error) /* "View.MemoryView":736 * for dim, index in enumerate(indices): * if PyIndex_Check(index): * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_t_10, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 736, __pyx_L1_error) /* "View.MemoryView":735 * * for dim, index in enumerate(indices): * if PyIndex_Check(index): # <<<<<<<<<<<<<< * slice_memviewslice( * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], */ goto __pyx_L6; } /* "View.MemoryView":742 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ __pyx_t_2 = (__pyx_v_index == Py_None); __pyx_t_1 = (__pyx_t_2 != 0); if (__pyx_t_1) { /* "View.MemoryView":743 * False) * elif index is None: * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 */ (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; /* "View.MemoryView":744 * elif index is None: * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 */ (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; /* "View.MemoryView":745 * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< * new_ndim += 1 * else: */ (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; /* "View.MemoryView":746 * p_dst.strides[new_ndim] = 0 * p_dst.suboffsets[new_ndim] = -1 * new_ndim += 1 # <<<<<<<<<<<<<< * else: * start = index.start or 0 */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); /* "View.MemoryView":742 * 0, 0, 0, # have_{start,stop,step} * False) * elif index is None: # <<<<<<<<<<<<<< * p_dst.shape[new_ndim] = 1 * p_dst.strides[new_ndim] = 0 */ goto __pyx_L6; } /* "View.MemoryView":748 * new_ndim += 1 * else: * start = index.start or 0 # <<<<<<<<<<<<<< * stop = index.stop or 0 * step = index.step or 0 */ /*else*/ { __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 748, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 748, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 748, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L7_bool_binop_done; } __pyx_t_10 = 0; __pyx_L7_bool_binop_done:; __pyx_v_start = __pyx_t_10; /* "View.MemoryView":749 * else: * start = index.start or 0 * stop = index.stop or 0 # <<<<<<<<<<<<<< * step = index.step or 0 * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 749, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 749, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 749, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L9_bool_binop_done; } __pyx_t_10 = 0; __pyx_L9_bool_binop_done:; __pyx_v_stop = __pyx_t_10; /* "View.MemoryView":750 * start = index.start or 0 * stop = index.stop or 0 * step = index.step or 0 # <<<<<<<<<<<<<< * * have_start = index.start is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 750, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_9); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 750, __pyx_L1_error) if (!__pyx_t_1) { __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } else { __pyx_t_12 = __Pyx_PyIndex_AsSsize_t(__pyx_t_9); if (unlikely((__pyx_t_12 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 750, __pyx_L1_error) __pyx_t_10 = __pyx_t_12; __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; goto __pyx_L11_bool_binop_done; } __pyx_t_10 = 0; __pyx_L11_bool_binop_done:; __pyx_v_step = __pyx_t_10; /* "View.MemoryView":752 * step = index.step or 0 * * have_start = index.start is not None # <<<<<<<<<<<<<< * have_stop = index.stop is not None * have_step = index.step is not None */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 752, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_start = __pyx_t_1; /* "View.MemoryView":753 * * have_start = index.start is not None * have_stop = index.stop is not None # <<<<<<<<<<<<<< * have_step = index.step is not None * */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 753, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_stop = __pyx_t_1; /* "View.MemoryView":754 * have_start = index.start is not None * have_stop = index.stop is not None * have_step = index.step is not None # <<<<<<<<<<<<<< * * slice_memviewslice( */ __pyx_t_9 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 754, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __pyx_t_1 = (__pyx_t_9 != Py_None); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; __pyx_v_have_step = __pyx_t_1; /* "View.MemoryView":756 * have_step = index.step is not None * * slice_memviewslice( # <<<<<<<<<<<<<< * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], * dim, new_ndim, p_suboffset_dim, */ __pyx_t_11 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_11 == ((int)-1))) __PYX_ERR(1, 756, __pyx_L1_error) /* "View.MemoryView":762 * have_start, have_stop, have_step, * True) * new_ndim += 1 # <<<<<<<<<<<<<< * * if isinstance(memview, _memoryviewslice): */ __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); } __pyx_L6:; /* "View.MemoryView":734 * cdef bint have_start, have_stop, have_step * * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< * if PyIndex_Check(index): * slice_memviewslice( */ } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":764 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":765 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":766 * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< * memviewsliceobj.to_dtype_func, * memview.dtype_is_object) */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 766, __pyx_L1_error) } /* "View.MemoryView":767 * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< * memview.dtype_is_object) * else: */ if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 767, __pyx_L1_error) } /* "View.MemoryView":765 * * if isinstance(memview, _memoryviewslice): * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< * memviewsliceobj.to_object_func, * memviewsliceobj.to_dtype_func, */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 765, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 765, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; /* "View.MemoryView":764 * new_ndim += 1 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * return memoryview_fromslice(dst, new_ndim, * memviewsliceobj.to_object_func, */ } /* "View.MemoryView":770 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ /*else*/ { __Pyx_XDECREF(((PyObject *)__pyx_r)); /* "View.MemoryView":771 * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 770, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); /* "View.MemoryView":770 * memview.dtype_is_object) * else: * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< * memview.dtype_is_object) * */ if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 770, __pyx_L1_error) __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_3); __pyx_t_3 = 0; goto __pyx_L0; } /* "View.MemoryView":698 * * @cname('__pyx_memview_slice') * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< * cdef int new_ndim = 0, suboffset_dim = -1, dim * cdef bint negative_step */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); __Pyx_XDECREF(__pyx_v_index); __Pyx_XGIVEREF((PyObject *)__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":795 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { Py_ssize_t __pyx_v_new_shape; int __pyx_v_negative_step; int __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":815 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ __pyx_t_1 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_1) { /* "View.MemoryView":817 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ __pyx_t_1 = ((__pyx_v_start < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":818 * * if start < 0: * start += shape # <<<<<<<<<<<<<< * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":817 * if not is_slice: * * if start < 0: # <<<<<<<<<<<<<< * start += shape * if not 0 <= start < shape: */ } /* "View.MemoryView":819 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ __pyx_t_1 = (0 <= __pyx_v_start); if (__pyx_t_1) { __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); } __pyx_t_2 = ((!(__pyx_t_1 != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":820 * start += shape * if not 0 <= start < shape: * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< * else: * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"Index out of bounds (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 820, __pyx_L1_error) /* "View.MemoryView":819 * if start < 0: * start += shape * if not 0 <= start < shape: # <<<<<<<<<<<<<< * _err_dim(IndexError, "Index out of bounds (axis %d)", dim) * else: */ } /* "View.MemoryView":815 * cdef bint negative_step * * if not is_slice: # <<<<<<<<<<<<<< * * if start < 0: */ goto __pyx_L3; } /* "View.MemoryView":823 * else: * * negative_step = have_step != 0 and step < 0 # <<<<<<<<<<<<<< * * if have_step and step == 0: */ /*else*/ { __pyx_t_1 = ((__pyx_v_have_step != 0) != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L6_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step < 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L6_bool_binop_done:; __pyx_v_negative_step = __pyx_t_2; /* "View.MemoryView":825 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ __pyx_t_1 = (__pyx_v_have_step != 0); if (__pyx_t_1) { } else { __pyx_t_2 = __pyx_t_1; goto __pyx_L9_bool_binop_done; } __pyx_t_1 = ((__pyx_v_step == 0) != 0); __pyx_t_2 = __pyx_t_1; __pyx_L9_bool_binop_done:; if (__pyx_t_2) { /* "View.MemoryView":826 * * if have_step and step == 0: * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Step may not be zero (axis %d)"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 826, __pyx_L1_error) /* "View.MemoryView":825 * negative_step = have_step != 0 and step < 0 * * if have_step and step == 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Step may not be zero (axis %d)", dim) * */ } /* "View.MemoryView":829 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ __pyx_t_2 = (__pyx_v_have_start != 0); if (__pyx_t_2) { /* "View.MemoryView":830 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":831 * if have_start: * if start < 0: * start += shape # <<<<<<<<<<<<<< * if start < 0: * start = 0 */ __pyx_v_start = (__pyx_v_start + __pyx_v_shape); /* "View.MemoryView":832 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ __pyx_t_2 = ((__pyx_v_start < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":833 * start += shape * if start < 0: * start = 0 # <<<<<<<<<<<<<< * elif start >= shape: * if negative_step: */ __pyx_v_start = 0; /* "View.MemoryView":832 * if start < 0: * start += shape * if start < 0: # <<<<<<<<<<<<<< * start = 0 * elif start >= shape: */ } /* "View.MemoryView":830 * * if have_start: * if start < 0: # <<<<<<<<<<<<<< * start += shape * if start < 0: */ goto __pyx_L12; } /* "View.MemoryView":834 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ __pyx_t_2 = ((__pyx_v_start >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":835 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":836 * elif start >= shape: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = shape */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":835 * start = 0 * elif start >= shape: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L14; } /* "View.MemoryView":838 * start = shape - 1 * else: * start = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ /*else*/ { __pyx_v_start = __pyx_v_shape; } __pyx_L14:; /* "View.MemoryView":834 * if start < 0: * start = 0 * elif start >= shape: # <<<<<<<<<<<<<< * if negative_step: * start = shape - 1 */ } __pyx_L12:; /* "View.MemoryView":829 * * * if have_start: # <<<<<<<<<<<<<< * if start < 0: * start += shape */ goto __pyx_L11; } /* "View.MemoryView":840 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":841 * else: * if negative_step: * start = shape - 1 # <<<<<<<<<<<<<< * else: * start = 0 */ __pyx_v_start = (__pyx_v_shape - 1); /* "View.MemoryView":840 * start = shape * else: * if negative_step: # <<<<<<<<<<<<<< * start = shape - 1 * else: */ goto __pyx_L15; } /* "View.MemoryView":843 * start = shape - 1 * else: * start = 0 # <<<<<<<<<<<<<< * * if have_stop: */ /*else*/ { __pyx_v_start = 0; } __pyx_L15:; } __pyx_L11:; /* "View.MemoryView":845 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ __pyx_t_2 = (__pyx_v_have_stop != 0); if (__pyx_t_2) { /* "View.MemoryView":846 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":847 * if have_stop: * if stop < 0: * stop += shape # <<<<<<<<<<<<<< * if stop < 0: * stop = 0 */ __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); /* "View.MemoryView":848 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ __pyx_t_2 = ((__pyx_v_stop < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":849 * stop += shape * if stop < 0: * stop = 0 # <<<<<<<<<<<<<< * elif stop > shape: * stop = shape */ __pyx_v_stop = 0; /* "View.MemoryView":848 * if stop < 0: * stop += shape * if stop < 0: # <<<<<<<<<<<<<< * stop = 0 * elif stop > shape: */ } /* "View.MemoryView":846 * * if have_stop: * if stop < 0: # <<<<<<<<<<<<<< * stop += shape * if stop < 0: */ goto __pyx_L17; } /* "View.MemoryView":850 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ __pyx_t_2 = ((__pyx_v_stop > __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":851 * stop = 0 * elif stop > shape: * stop = shape # <<<<<<<<<<<<<< * else: * if negative_step: */ __pyx_v_stop = __pyx_v_shape; /* "View.MemoryView":850 * if stop < 0: * stop = 0 * elif stop > shape: # <<<<<<<<<<<<<< * stop = shape * else: */ } __pyx_L17:; /* "View.MemoryView":845 * start = 0 * * if have_stop: # <<<<<<<<<<<<<< * if stop < 0: * stop += shape */ goto __pyx_L16; } /* "View.MemoryView":853 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ /*else*/ { __pyx_t_2 = (__pyx_v_negative_step != 0); if (__pyx_t_2) { /* "View.MemoryView":854 * else: * if negative_step: * stop = -1 # <<<<<<<<<<<<<< * else: * stop = shape */ __pyx_v_stop = -1L; /* "View.MemoryView":853 * stop = shape * else: * if negative_step: # <<<<<<<<<<<<<< * stop = -1 * else: */ goto __pyx_L19; } /* "View.MemoryView":856 * stop = -1 * else: * stop = shape # <<<<<<<<<<<<<< * * if not have_step: */ /*else*/ { __pyx_v_stop = __pyx_v_shape; } __pyx_L19:; } __pyx_L16:; /* "View.MemoryView":858 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ __pyx_t_2 = ((!(__pyx_v_have_step != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":859 * * if not have_step: * step = 1 # <<<<<<<<<<<<<< * * */ __pyx_v_step = 1; /* "View.MemoryView":858 * stop = shape * * if not have_step: # <<<<<<<<<<<<<< * step = 1 * */ } /* "View.MemoryView":863 * * with cython.cdivision(True): * new_shape = (stop - start) // step # <<<<<<<<<<<<<< * * if (stop - start) - step * new_shape: */ __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); /* "View.MemoryView":865 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); if (__pyx_t_2) { /* "View.MemoryView":866 * * if (stop - start) - step * new_shape: * new_shape += 1 # <<<<<<<<<<<<<< * * if new_shape < 0: */ __pyx_v_new_shape = (__pyx_v_new_shape + 1); /* "View.MemoryView":865 * new_shape = (stop - start) // step * * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< * new_shape += 1 * */ } /* "View.MemoryView":868 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ __pyx_t_2 = ((__pyx_v_new_shape < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":869 * * if new_shape < 0: * new_shape = 0 # <<<<<<<<<<<<<< * * */ __pyx_v_new_shape = 0; /* "View.MemoryView":868 * new_shape += 1 * * if new_shape < 0: # <<<<<<<<<<<<<< * new_shape = 0 * */ } /* "View.MemoryView":872 * * * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset */ (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); /* "View.MemoryView":873 * * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< * dst.suboffsets[new_ndim] = suboffset * */ (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; /* "View.MemoryView":874 * dst.strides[new_ndim] = stride * step * dst.shape[new_ndim] = new_shape * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< * * */ (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; } __pyx_L3:; /* "View.MemoryView":877 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ __pyx_t_2 = (((__pyx_v_suboffset_dim[0]) < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":878 * * if suboffset_dim[0] < 0: * dst.data += start * stride # <<<<<<<<<<<<<< * else: * dst.suboffsets[suboffset_dim[0]] += start * stride */ __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); /* "View.MemoryView":877 * * * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< * dst.data += start * stride * else: */ goto __pyx_L23; } /* "View.MemoryView":880 * dst.data += start * stride * else: * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< * * if suboffset >= 0: */ /*else*/ { __pyx_t_3 = (__pyx_v_suboffset_dim[0]); (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); } __pyx_L23:; /* "View.MemoryView":882 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":883 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ __pyx_t_2 = ((!(__pyx_v_is_slice != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":884 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ __pyx_t_2 = ((__pyx_v_new_ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":885 * if not is_slice: * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset # <<<<<<<<<<<<<< * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " */ __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); /* "View.MemoryView":884 * if suboffset >= 0: * if not is_slice: * if new_ndim == 0: # <<<<<<<<<<<<<< * dst.data = (<char **> dst.data)[0] + suboffset * else: */ goto __pyx_L26; } /* "View.MemoryView":887 * dst.data = (<char **> dst.data)[0] + suboffset * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< * "must be indexed and not sliced", dim) * else: */ /*else*/ { /* "View.MemoryView":888 * else: * _err_dim(IndexError, "All dimensions preceding dimension %d " * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< * else: * suboffset_dim[0] = new_ndim */ __pyx_t_3 = __pyx_memoryview_err_dim(__pyx_builtin_IndexError, ((char *)"All dimensions preceding dimension %d must be indexed and not sliced"), __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 887, __pyx_L1_error) } __pyx_L26:; /* "View.MemoryView":883 * * if suboffset >= 0: * if not is_slice: # <<<<<<<<<<<<<< * if new_ndim == 0: * dst.data = (<char **> dst.data)[0] + suboffset */ goto __pyx_L25; } /* "View.MemoryView":890 * "must be indexed and not sliced", dim) * else: * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< * * return 0 */ /*else*/ { (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; } __pyx_L25:; /* "View.MemoryView":882 * dst.suboffsets[suboffset_dim[0]] += start * stride * * if suboffset >= 0: # <<<<<<<<<<<<<< * if not is_slice: * if new_ndim == 0: */ } /* "View.MemoryView":892 * suboffset_dim[0] = new_ndim * * return 0 # <<<<<<<<<<<<<< * * */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":795 * * @cname('__pyx_memoryview_slice_memviewslice') * cdef int slice_memviewslice( # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":898 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { Py_ssize_t __pyx_v_shape; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_suboffset; Py_ssize_t __pyx_v_itemsize; char *__pyx_v_resultp; char *__pyx_r; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; __Pyx_RefNannySetupContext("pybuffer_index", 0); /* "View.MemoryView":900 * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< * cdef Py_ssize_t itemsize = view.itemsize * cdef char *resultp */ __pyx_v_suboffset = -1L; /* "View.MemoryView":901 * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< * cdef char *resultp * */ __pyx_t_1 = __pyx_v_view->itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":904 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ __pyx_t_2 = ((__pyx_v_view->ndim == 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":905 * * if view.ndim == 0: * shape = view.len / itemsize # <<<<<<<<<<<<<< * stride = itemsize * else: */ if (unlikely(__pyx_v_itemsize == 0)) { PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); __PYX_ERR(1, 905, __pyx_L1_error) } else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); __PYX_ERR(1, 905, __pyx_L1_error) } __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); /* "View.MemoryView":906 * if view.ndim == 0: * shape = view.len / itemsize * stride = itemsize # <<<<<<<<<<<<<< * else: * shape = view.shape[dim] */ __pyx_v_stride = __pyx_v_itemsize; /* "View.MemoryView":904 * cdef char *resultp * * if view.ndim == 0: # <<<<<<<<<<<<<< * shape = view.len / itemsize * stride = itemsize */ goto __pyx_L3; } /* "View.MemoryView":908 * stride = itemsize * else: * shape = view.shape[dim] # <<<<<<<<<<<<<< * stride = view.strides[dim] * if view.suboffsets != NULL: */ /*else*/ { __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); /* "View.MemoryView":909 * else: * shape = view.shape[dim] * stride = view.strides[dim] # <<<<<<<<<<<<<< * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] */ __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); /* "View.MemoryView":910 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ __pyx_t_2 = ((__pyx_v_view->suboffsets != NULL) != 0); if (__pyx_t_2) { /* "View.MemoryView":911 * stride = view.strides[dim] * if view.suboffsets != NULL: * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< * * if index < 0: */ __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); /* "View.MemoryView":910 * shape = view.shape[dim] * stride = view.strides[dim] * if view.suboffsets != NULL: # <<<<<<<<<<<<<< * suboffset = view.suboffsets[dim] * */ } } __pyx_L3:; /* "View.MemoryView":913 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":914 * * if index < 0: * index += view.shape[dim] # <<<<<<<<<<<<<< * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) */ __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); /* "View.MemoryView":915 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index < 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":916 * index += view.shape[dim] * if index < 0: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * if index >= shape: */ __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 916, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 916, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 916, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_3, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 916, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_4, 0, 0, 0); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __PYX_ERR(1, 916, __pyx_L1_error) /* "View.MemoryView":915 * if index < 0: * index += view.shape[dim] * if index < 0: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":913 * suboffset = view.suboffsets[dim] * * if index < 0: # <<<<<<<<<<<<<< * index += view.shape[dim] * if index < 0: */ } /* "View.MemoryView":918 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ __pyx_t_2 = ((__pyx_v_index >= __pyx_v_shape) != 0); if (__pyx_t_2) { /* "View.MemoryView":919 * * if index >= shape: * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) # <<<<<<<<<<<<<< * * resultp = bufp + index * stride */ __pyx_t_4 = PyInt_FromSsize_t(__pyx_v_dim); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_IndexError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 919, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 919, __pyx_L1_error) /* "View.MemoryView":918 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * if index >= shape: # <<<<<<<<<<<<<< * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * */ } /* "View.MemoryView":921 * raise IndexError("Out of bounds on buffer access (axis %d)" % dim) * * resultp = bufp + index * stride # <<<<<<<<<<<<<< * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset */ __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); /* "View.MemoryView":922 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ __pyx_t_2 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":923 * resultp = bufp + index * stride * if suboffset >= 0: * resultp = (<char **> resultp)[0] + suboffset # <<<<<<<<<<<<<< * * return resultp */ __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); /* "View.MemoryView":922 * * resultp = bufp + index * stride * if suboffset >= 0: # <<<<<<<<<<<<<< * resultp = (<char **> resultp)[0] + suboffset * */ } /* "View.MemoryView":925 * resultp = (<char **> resultp)[0] + suboffset * * return resultp # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_resultp; goto __pyx_L0; /* "View.MemoryView":898 * * @cname('__pyx_pybuffer_index') * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< * Py_ssize_t dim) except NULL: * cdef Py_ssize_t shape, stride, suboffset = -1 */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":931 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { int __pyx_v_ndim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; int __pyx_v_i; int __pyx_v_j; int __pyx_r; int __pyx_t_1; Py_ssize_t *__pyx_t_2; long __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; int __pyx_t_6; int __pyx_t_7; int __pyx_t_8; /* "View.MemoryView":932 * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< * * cdef Py_ssize_t *shape = memslice.shape */ __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; __pyx_v_ndim = __pyx_t_1; /* "View.MemoryView":934 * cdef int ndim = memslice.memview.view.ndim * * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< * cdef Py_ssize_t *strides = memslice.strides * */ __pyx_t_2 = __pyx_v_memslice->shape; __pyx_v_shape = __pyx_t_2; /* "View.MemoryView":935 * * cdef Py_ssize_t *shape = memslice.shape * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< * * */ __pyx_t_2 = __pyx_v_memslice->strides; __pyx_v_strides = __pyx_t_2; /* "View.MemoryView":939 * * cdef int i, j * for i in range(ndim / 2): # <<<<<<<<<<<<<< * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] */ __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_3; __pyx_t_1+=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":940 * cdef int i, j * for i in range(ndim / 2): * j = ndim - 1 - i # <<<<<<<<<<<<<< * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] */ __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); /* "View.MemoryView":941 * for i in range(ndim / 2): * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< * shape[i], shape[j] = shape[j], shape[i] * */ __pyx_t_4 = (__pyx_v_strides[__pyx_v_j]); __pyx_t_5 = (__pyx_v_strides[__pyx_v_i]); (__pyx_v_strides[__pyx_v_i]) = __pyx_t_4; (__pyx_v_strides[__pyx_v_j]) = __pyx_t_5; /* "View.MemoryView":942 * j = ndim - 1 - i * strides[i], strides[j] = strides[j], strides[i] * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: */ __pyx_t_5 = (__pyx_v_shape[__pyx_v_j]); __pyx_t_4 = (__pyx_v_shape[__pyx_v_i]); (__pyx_v_shape[__pyx_v_i]) = __pyx_t_5; (__pyx_v_shape[__pyx_v_j]) = __pyx_t_4; /* "View.MemoryView":944 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0) != 0); if (!__pyx_t_7) { } else { __pyx_t_6 = __pyx_t_7; goto __pyx_L6_bool_binop_done; } __pyx_t_7 = (((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0) != 0); __pyx_t_6 = __pyx_t_7; __pyx_L6_bool_binop_done:; if (__pyx_t_6) { /* "View.MemoryView":945 * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< * * return 1 */ __pyx_t_8 = __pyx_memoryview_err(__pyx_builtin_ValueError, ((char *)"Cannot transpose memoryview with indirect dimensions")); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(1, 945, __pyx_L1_error) /* "View.MemoryView":944 * shape[i], shape[j] = shape[j], shape[i] * * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * */ } } /* "View.MemoryView":947 * _err(ValueError, "Cannot transpose memoryview with indirect dimensions") * * return 1 # <<<<<<<<<<<<<< * * */ __pyx_r = 1; goto __pyx_L0; /* "View.MemoryView":931 * * @cname('__pyx_memslice_transpose') * cdef int transpose_memslice(__Pyx_memviewslice *memslice) nogil except 0: # <<<<<<<<<<<<<< * cdef int ndim = memslice.memview.view.ndim * */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = 0; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":964 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* Python wrapper */ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); } static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__dealloc__", 0); /* "View.MemoryView":965 * * def __dealloc__(self): * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< * * cdef convert_item_to_object(self, char *itemp): */ __PYX_XDEC_MEMVIEW((&__pyx_v_self->from_slice), 1); /* "View.MemoryView":964 * cdef int (*to_dtype_func)(char *, object) except 0 * * def __dealloc__(self): # <<<<<<<<<<<<<< * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":967 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; __Pyx_RefNannySetupContext("convert_item_to_object", 0); /* "View.MemoryView":968 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ __pyx_t_1 = ((__pyx_v_self->to_object_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":969 * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: * return self.to_object_func(itemp) # <<<<<<<<<<<<<< * else: * return memoryview.convert_item_to_object(self, itemp) */ __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 969, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; /* "View.MemoryView":968 * * cdef convert_item_to_object(self, char *itemp): * if self.to_object_func != NULL: # <<<<<<<<<<<<<< * return self.to_object_func(itemp) * else: */ } /* "View.MemoryView":971 * return self.to_object_func(itemp) * else: * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< * * cdef assign_item_from_object(self, char *itemp, object value): */ /*else*/ { __Pyx_XDECREF(__pyx_r); __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 971, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_r = __pyx_t_2; __pyx_t_2 = 0; goto __pyx_L0; } /* "View.MemoryView":967 * __PYX_XDEC_MEMVIEW(&self.from_slice, 1) * * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< * if self.to_object_func != NULL: * return self.to_object_func(itemp) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":973 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("assign_item_from_object", 0); /* "View.MemoryView":974 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ __pyx_t_1 = ((__pyx_v_self->to_dtype_func != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":975 * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< * else: * memoryview.assign_item_from_object(self, itemp, value) */ __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 975, __pyx_L1_error) /* "View.MemoryView":974 * * cdef assign_item_from_object(self, char *itemp, object value): * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< * self.to_dtype_func(itemp, value) * else: */ goto __pyx_L3; } /* "View.MemoryView":977 * self.to_dtype_func(itemp, value) * else: * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< * * @property */ /*else*/ { __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 977, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } __pyx_L3:; /* "View.MemoryView":973 * return memoryview.convert_item_to_object(self, itemp) * * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< * if self.to_dtype_func != NULL: * self.to_dtype_func(itemp, value) */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":980 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ static PyObject *__pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(PyObject *__pyx_v_self) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); __pyx_r = __pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView_16_memoryviewslice_4base___get__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__get__", 0); /* "View.MemoryView":981 * @property * def base(self): * return self.from_object # <<<<<<<<<<<<<< * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v_self->from_object); __pyx_r = __pyx_v_self->from_object; goto __pyx_L0; /* "View.MemoryView":980 * * @property * def base(self): # <<<<<<<<<<<<<< * return self.from_object * */ /* function exit code */ __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__reduce_cython__", 0); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 2, __pyx_L1_error) /* "(tree fragment)":1 * def __reduce_cython__(self): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* Python wrapper */ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("__setstate_cython__", 0); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":3 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":987 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; Py_ssize_t __pyx_v_suboffset; PyObject *__pyx_v_length = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; __Pyx_TypeInfo *__pyx_t_4; Py_buffer __pyx_t_5; Py_ssize_t *__pyx_t_6; Py_ssize_t *__pyx_t_7; Py_ssize_t *__pyx_t_8; Py_ssize_t __pyx_t_9; __Pyx_RefNannySetupContext("memoryview_fromslice", 0); /* "View.MemoryView":995 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ __pyx_t_1 = ((((PyObject *)__pyx_v_memviewslice.memview) == Py_None) != 0); if (__pyx_t_1) { /* "View.MemoryView":996 * * if <PyObject *> memviewslice.memview == Py_None: * return None # <<<<<<<<<<<<<< * * */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(Py_None); __pyx_r = Py_None; goto __pyx_L0; /* "View.MemoryView":995 * cdef _memoryviewslice result * * if <PyObject *> memviewslice.memview == Py_None: # <<<<<<<<<<<<<< * return None * */ } /* "View.MemoryView":1001 * * * result = _memoryviewslice(None, 0, dtype_is_object) # <<<<<<<<<<<<<< * * result.from_slice = memviewslice */ __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1001, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1001, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(Py_None); __Pyx_GIVEREF(Py_None); PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None); __Pyx_INCREF(__pyx_int_0); __Pyx_GIVEREF(__pyx_int_0); PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1001, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1003 * result = _memoryviewslice(None, 0, dtype_is_object) * * result.from_slice = memviewslice # <<<<<<<<<<<<<< * __PYX_INC_MEMVIEW(&memviewslice, 1) * */ __pyx_v_result->from_slice = __pyx_v_memviewslice; /* "View.MemoryView":1004 * * result.from_slice = memviewslice * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< * * result.from_object = (<memoryview> memviewslice.memview).base */ __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); /* "View.MemoryView":1006 * __PYX_INC_MEMVIEW(&memviewslice, 1) * * result.from_object = (<memoryview> memviewslice.memview).base # <<<<<<<<<<<<<< * result.typeinfo = memviewslice.memview.typeinfo * */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_memviewslice.memview), __pyx_n_s_base); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1006, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_GIVEREF(__pyx_t_2); __Pyx_GOTREF(__pyx_v_result->from_object); __Pyx_DECREF(__pyx_v_result->from_object); __pyx_v_result->from_object = __pyx_t_2; __pyx_t_2 = 0; /* "View.MemoryView":1007 * * result.from_object = (<memoryview> memviewslice.memview).base * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< * * result.view = memviewslice.memview.view */ __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; /* "View.MemoryView":1009 * result.typeinfo = memviewslice.memview.typeinfo * * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim */ __pyx_t_5 = __pyx_v_memviewslice.memview->view; __pyx_v_result->__pyx_base.view = __pyx_t_5; /* "View.MemoryView":1010 * * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data # <<<<<<<<<<<<<< * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None */ __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); /* "View.MemoryView":1011 * result.view = memviewslice.memview.view * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim # <<<<<<<<<<<<<< * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) */ __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; /* "View.MemoryView":1012 * result.view.buf = <void *> memviewslice.data * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< * Py_INCREF(Py_None) * */ ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; /* "View.MemoryView":1013 * result.view.ndim = ndim * (<__pyx_buffer *> &result.view).obj = Py_None * Py_INCREF(Py_None) # <<<<<<<<<<<<<< * * result.flags = PyBUF_RECORDS */ Py_INCREF(Py_None); /* "View.MemoryView":1015 * Py_INCREF(Py_None) * * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< * * result.view.shape = <Py_ssize_t *> result.from_slice.shape */ __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; /* "View.MemoryView":1017 * result.flags = PyBUF_RECORDS * * result.view.shape = <Py_ssize_t *> result.from_slice.shape # <<<<<<<<<<<<<< * result.view.strides = <Py_ssize_t *> result.from_slice.strides * */ __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); /* "View.MemoryView":1018 * * result.view.shape = <Py_ssize_t *> result.from_slice.shape * result.view.strides = <Py_ssize_t *> result.from_slice.strides # <<<<<<<<<<<<<< * * */ __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); /* "View.MemoryView":1021 * * * result.view.suboffsets = NULL # <<<<<<<<<<<<<< * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: */ __pyx_v_result->__pyx_base.view.suboffsets = NULL; /* "View.MemoryView":1022 * * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets */ __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_v_suboffset = (__pyx_t_6[0]); /* "View.MemoryView":1023 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ __pyx_t_1 = ((__pyx_v_suboffset >= 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1024 * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets # <<<<<<<<<<<<<< * break * */ __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); /* "View.MemoryView":1025 * if suboffset >= 0: * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break # <<<<<<<<<<<<<< * * result.view.len = result.view.itemsize */ goto __pyx_L5_break; /* "View.MemoryView":1023 * result.view.suboffsets = NULL * for suboffset in result.from_slice.suboffsets[:ndim]: * if suboffset >= 0: # <<<<<<<<<<<<<< * result.view.suboffsets = <Py_ssize_t *> result.from_slice.suboffsets * break */ } } __pyx_L5_break:; /* "View.MemoryView":1027 * break * * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< * for length in result.view.shape[:ndim]: * result.view.len *= length */ __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; /* "View.MemoryView":1028 * * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< * result.view.len *= length * */ __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { __pyx_t_6 = __pyx_t_8; __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1028, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); __pyx_t_2 = 0; /* "View.MemoryView":1029 * result.view.len = result.view.itemsize * for length in result.view.shape[:ndim]: * result.view.len *= length # <<<<<<<<<<<<<< * * result.to_object_func = to_object_func */ __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1029, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1029, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1029, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __pyx_v_result->__pyx_base.view.len = __pyx_t_9; } /* "View.MemoryView":1031 * result.view.len *= length * * result.to_object_func = to_object_func # <<<<<<<<<<<<<< * result.to_dtype_func = to_dtype_func * */ __pyx_v_result->to_object_func = __pyx_v_to_object_func; /* "View.MemoryView":1032 * * result.to_object_func = to_object_func * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< * * return result */ __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; /* "View.MemoryView":1034 * result.to_dtype_func = to_dtype_func * * return result # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_get_slice_from_memoryview') */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(((PyObject *)__pyx_v_result)); __pyx_r = ((PyObject *)__pyx_v_result); goto __pyx_L0; /* "View.MemoryView":987 * * @cname('__pyx_memoryview_fromslice') * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< * int ndim, * object (*to_object_func)(char *), */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_result); __Pyx_XDECREF(__pyx_v_length); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1037 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; __Pyx_memviewslice *__pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *__pyx_t_3 = NULL; __Pyx_RefNannySetupContext("get_slice_from_memview", 0); /* "View.MemoryView":1040 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1041 * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): * obj = memview # <<<<<<<<<<<<<< * return &obj.from_slice * else: */ if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1041, __pyx_L1_error) __pyx_t_3 = ((PyObject *)__pyx_v_memview); __Pyx_INCREF(__pyx_t_3); __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_3); __pyx_t_3 = 0; /* "View.MemoryView":1042 * if isinstance(memview, _memoryviewslice): * obj = memview * return &obj.from_slice # <<<<<<<<<<<<<< * else: * slice_copy(memview, mslice) */ __pyx_r = (&__pyx_v_obj->from_slice); goto __pyx_L0; /* "View.MemoryView":1040 * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * obj = memview * return &obj.from_slice */ } /* "View.MemoryView":1044 * return &obj.from_slice * else: * slice_copy(memview, mslice) # <<<<<<<<<<<<<< * return mslice * */ /*else*/ { __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); /* "View.MemoryView":1045 * else: * slice_copy(memview, mslice) * return mslice # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_slice_copy') */ __pyx_r = __pyx_v_mslice; goto __pyx_L0; } /* "View.MemoryView":1037 * * @cname('__pyx_memoryview_get_slice_from_memoryview') * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< * __Pyx_memviewslice *mslice): * cdef _memoryviewslice obj */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_3); __Pyx_WriteUnraisable("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename, 1, 0); __pyx_r = 0; __pyx_L0:; __Pyx_XDECREF((PyObject *)__pyx_v_obj); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1048 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { int __pyx_v_dim; Py_ssize_t *__pyx_v_shape; Py_ssize_t *__pyx_v_strides; Py_ssize_t *__pyx_v_suboffsets; __Pyx_RefNannyDeclarations Py_ssize_t *__pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; __Pyx_RefNannySetupContext("slice_copy", 0); /* "View.MemoryView":1052 * cdef (Py_ssize_t*) shape, strides, suboffsets * * shape = memview.view.shape # <<<<<<<<<<<<<< * strides = memview.view.strides * suboffsets = memview.view.suboffsets */ __pyx_t_1 = __pyx_v_memview->view.shape; __pyx_v_shape = __pyx_t_1; /* "View.MemoryView":1053 * * shape = memview.view.shape * strides = memview.view.strides # <<<<<<<<<<<<<< * suboffsets = memview.view.suboffsets * */ __pyx_t_1 = __pyx_v_memview->view.strides; __pyx_v_strides = __pyx_t_1; /* "View.MemoryView":1054 * shape = memview.view.shape * strides = memview.view.strides * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< * * dst.memview = <__pyx_memoryview *> memview */ __pyx_t_1 = __pyx_v_memview->view.suboffsets; __pyx_v_suboffsets = __pyx_t_1; /* "View.MemoryView":1056 * suboffsets = memview.view.suboffsets * * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< * dst.data = <char *> memview.view.buf * */ __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); /* "View.MemoryView":1057 * * dst.memview = <__pyx_memoryview *> memview * dst.data = <char *> memview.view.buf # <<<<<<<<<<<<<< * * for dim in range(memview.view.ndim): */ __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); /* "View.MemoryView":1059 * dst.data = <char *> memview.view.buf * * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] */ __pyx_t_2 = __pyx_v_memview->view.ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_dim = __pyx_t_3; /* "View.MemoryView":1060 * * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 */ (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); /* "View.MemoryView":1061 * for dim in range(memview.view.ndim): * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 * */ (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); /* "View.MemoryView":1062 * dst.shape[dim] = shape[dim] * dst.strides[dim] = strides[dim] * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object') */ if ((__pyx_v_suboffsets != 0)) { __pyx_t_4 = (__pyx_v_suboffsets[__pyx_v_dim]); } else { __pyx_t_4 = -1L; } (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_4; } /* "View.MemoryView":1048 * * @cname('__pyx_memoryview_slice_copy') * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst): # <<<<<<<<<<<<<< * cdef int dim * cdef (Py_ssize_t*) shape, strides, suboffsets */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1065 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { __Pyx_memviewslice __pyx_v_memviewslice; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; __Pyx_RefNannySetupContext("memoryview_copy", 0); /* "View.MemoryView":1068 * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< * return memoryview_copy_from_slice(memview, &memviewslice) * */ __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); /* "View.MemoryView":1069 * cdef __Pyx_memviewslice memviewslice * slice_copy(memview, &memviewslice) * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_object_from_slice') */ __Pyx_XDECREF(__pyx_r); __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1069, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_r = __pyx_t_1; __pyx_t_1 = 0; goto __pyx_L0; /* "View.MemoryView":1065 * * @cname('__pyx_memoryview_copy_object') * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< * "Create a new memoryview object" * cdef __Pyx_memviewslice memviewslice */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1072 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { PyObject *(*__pyx_v_to_object_func)(char *); int (*__pyx_v_to_dtype_func)(char *, PyObject *); PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; int __pyx_t_2; PyObject *(*__pyx_t_3)(char *); int (*__pyx_t_4)(char *, PyObject *); PyObject *__pyx_t_5 = NULL; __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); /* "View.MemoryView":1079 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); __pyx_t_2 = (__pyx_t_1 != 0); if (__pyx_t_2) { /* "View.MemoryView":1080 * * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: */ __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; __pyx_v_to_object_func = __pyx_t_3; /* "View.MemoryView":1081 * if isinstance(memview, _memoryviewslice): * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< * else: * to_object_func = NULL */ __pyx_t_4 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; __pyx_v_to_dtype_func = __pyx_t_4; /* "View.MemoryView":1079 * cdef int (*to_dtype_func)(char *, object) except 0 * * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< * to_object_func = (<_memoryviewslice> memview).to_object_func * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func */ goto __pyx_L3; } /* "View.MemoryView":1083 * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func * else: * to_object_func = NULL # <<<<<<<<<<<<<< * to_dtype_func = NULL * */ /*else*/ { __pyx_v_to_object_func = NULL; /* "View.MemoryView":1084 * else: * to_object_func = NULL * to_dtype_func = NULL # <<<<<<<<<<<<<< * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, */ __pyx_v_to_dtype_func = NULL; } __pyx_L3:; /* "View.MemoryView":1086 * to_dtype_func = NULL * * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< * to_object_func, to_dtype_func, * memview.dtype_is_object) */ __Pyx_XDECREF(__pyx_r); /* "View.MemoryView":1088 * return memoryview_fromslice(memviewslice[0], memview.view.ndim, * to_object_func, to_dtype_func, * memview.dtype_is_object) # <<<<<<<<<<<<<< * * */ __pyx_t_5 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1086, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __pyx_r = __pyx_t_5; __pyx_t_5 = 0; goto __pyx_L0; /* "View.MemoryView":1072 * * @cname('__pyx_memoryview_copy_object_from_slice') * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< * """ * Create a new memoryview object from a given memoryview object and slice. */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "View.MemoryView":1094 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { Py_ssize_t __pyx_r; int __pyx_t_1; /* "View.MemoryView":1095 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ __pyx_t_1 = ((__pyx_v_arg < 0) != 0); if (__pyx_t_1) { /* "View.MemoryView":1096 * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: * return -arg # <<<<<<<<<<<<<< * else: * return arg */ __pyx_r = (-__pyx_v_arg); goto __pyx_L0; /* "View.MemoryView":1095 * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: * if arg < 0: # <<<<<<<<<<<<<< * return -arg * else: */ } /* "View.MemoryView":1098 * return -arg * else: * return arg # <<<<<<<<<<<<<< * * @cname('__pyx_get_best_slice_order') */ /*else*/ { __pyx_r = __pyx_v_arg; goto __pyx_L0; } /* "View.MemoryView":1094 * * * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) nogil: # <<<<<<<<<<<<<< * if arg < 0: * return -arg */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1101 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_c_stride; Py_ssize_t __pyx_v_f_stride; char __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1106 * """ * cdef int i * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< * cdef Py_ssize_t f_stride = 0 * */ __pyx_v_c_stride = 0; /* "View.MemoryView":1107 * cdef int i * cdef Py_ssize_t c_stride = 0 * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_f_stride = 0; /* "View.MemoryView":1109 * cdef Py_ssize_t f_stride = 0 * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1110 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1111 * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1112 * if mslice.shape[i] > 1: * c_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * for i in range(ndim): */ goto __pyx_L4_break; /* "View.MemoryView":1110 * * for i in range(ndim - 1, -1, -1): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * c_stride = mslice.strides[i] * break */ } } __pyx_L4_break:; /* "View.MemoryView":1114 * break * * for i in range(ndim): # <<<<<<<<<<<<<< * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] */ __pyx_t_1 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_1; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1115 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ __pyx_t_2 = (((__pyx_v_mslice->shape[__pyx_v_i]) > 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1116 * for i in range(ndim): * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< * break * */ __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1117 * if mslice.shape[i] > 1: * f_stride = mslice.strides[i] * break # <<<<<<<<<<<<<< * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): */ goto __pyx_L7_break; /* "View.MemoryView":1115 * * for i in range(ndim): * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< * f_stride = mslice.strides[i] * break */ } } __pyx_L7_break:; /* "View.MemoryView":1119 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ __pyx_t_2 = ((abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1120 * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): * return 'C' # <<<<<<<<<<<<<< * else: * return 'F' */ __pyx_r = 'C'; goto __pyx_L0; /* "View.MemoryView":1119 * break * * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< * return 'C' * else: */ } /* "View.MemoryView":1122 * return 'C' * else: * return 'F' # <<<<<<<<<<<<<< * * @cython.cdivision(True) */ /*else*/ { __pyx_r = 'F'; goto __pyx_L0; } /* "View.MemoryView":1101 * * @cname('__pyx_get_best_slice_order') * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) nogil: # <<<<<<<<<<<<<< * """ * Figure out the best memory access order for a given slice. */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1125 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; Py_ssize_t __pyx_v_dst_extent; Py_ssize_t __pyx_v_src_stride; Py_ssize_t __pyx_v_dst_stride; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; Py_ssize_t __pyx_t_4; Py_ssize_t __pyx_t_5; /* "View.MemoryView":1132 * * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] */ __pyx_v_src_extent = (__pyx_v_src_shape[0]); /* "View.MemoryView":1133 * cdef Py_ssize_t i * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] */ __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); /* "View.MemoryView":1134 * cdef Py_ssize_t src_extent = src_shape[0] * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t dst_stride = dst_strides[0] * */ __pyx_v_src_stride = (__pyx_v_src_strides[0]); /* "View.MemoryView":1135 * cdef Py_ssize_t dst_extent = dst_shape[0] * cdef Py_ssize_t src_stride = src_strides[0] * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); /* "View.MemoryView":1137 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1138 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ __pyx_t_2 = ((__pyx_v_src_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } __pyx_t_2 = ((__pyx_v_dst_stride > 0) != 0); if (__pyx_t_2) { } else { __pyx_t_1 = __pyx_t_2; goto __pyx_L5_bool_binop_done; } /* "View.MemoryView":1139 * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize * dst_extent) * else: */ __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); if (__pyx_t_2) { __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); } __pyx_t_3 = (__pyx_t_2 != 0); __pyx_t_1 = __pyx_t_3; __pyx_L5_bool_binop_done:; /* "View.MemoryView":1138 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ if (__pyx_t_1) { /* "View.MemoryView":1140 * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent)); /* "View.MemoryView":1138 * * if ndim == 1: * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< * <size_t> src_stride == itemsize == <size_t> dst_stride): * memcpy(dst_data, src_data, itemsize * dst_extent) */ goto __pyx_L4; } /* "View.MemoryView":1142 * memcpy(dst_data, src_data, itemsize * dst_extent) * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * memcpy(dst_data, src_data, itemsize) * src_data += src_stride */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1143 * else: * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< * src_data += src_stride * dst_data += dst_stride */ memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize); /* "View.MemoryView":1144 * for i in range(dst_extent): * memcpy(dst_data, src_data, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * else: */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1145 * memcpy(dst_data, src_data, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * else: * for i in range(dst_extent): */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L4:; /* "View.MemoryView":1137 * cdef Py_ssize_t dst_stride = dst_strides[0] * * if ndim == 1: # <<<<<<<<<<<<<< * if (src_stride > 0 and dst_stride > 0 and * <size_t> src_stride == itemsize == <size_t> dst_stride): */ goto __pyx_L3; } /* "View.MemoryView":1147 * dst_data += dst_stride * else: * for i in range(dst_extent): # <<<<<<<<<<<<<< * _copy_strided_to_strided(src_data, src_strides + 1, * dst_data, dst_strides + 1, */ /*else*/ { __pyx_t_4 = __pyx_v_dst_extent; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1148 * else: * for i in range(dst_extent): * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< * dst_data, dst_strides + 1, * src_shape + 1, dst_shape + 1, */ _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); /* "View.MemoryView":1152 * src_shape + 1, dst_shape + 1, * ndim - 1, itemsize) * src_data += src_stride # <<<<<<<<<<<<<< * dst_data += dst_stride * */ __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); /* "View.MemoryView":1153 * ndim - 1, itemsize) * src_data += src_stride * dst_data += dst_stride # <<<<<<<<<<<<<< * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, */ __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); } } __pyx_L3:; /* "View.MemoryView":1125 * * @cython.cdivision(True) * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< * char *dst_data, Py_ssize_t *dst_strides, * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, */ /* function exit code */ } /* "View.MemoryView":1155 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { /* "View.MemoryView":1158 * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< * src.shape, dst.shape, ndim, itemsize) * */ _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1155 * dst_data += dst_stride * * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *dst, * int ndim, size_t itemsize) nogil: */ /* function exit code */ } /* "View.MemoryView":1162 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { int __pyx_v_i; Py_ssize_t __pyx_v_size; Py_ssize_t __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1165 * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i * cdef Py_ssize_t size = src.memview.view.itemsize # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_size = __pyx_t_1; /* "View.MemoryView":1167 * cdef Py_ssize_t size = src.memview.view.itemsize * * for i in range(ndim): # <<<<<<<<<<<<<< * size *= src.shape[i] * */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1168 * * for i in range(ndim): * size *= src.shape[i] # <<<<<<<<<<<<<< * * return size */ __pyx_v_size = (__pyx_v_size * (__pyx_v_src->shape[__pyx_v_i])); } /* "View.MemoryView":1170 * size *= src.shape[i] * * return size # <<<<<<<<<<<<<< * * @cname('__pyx_fill_contig_strides_array') */ __pyx_r = __pyx_v_size; goto __pyx_L0; /* "View.MemoryView":1162 * * @cname('__pyx_memoryview_slice_get_size') * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) nogil: # <<<<<<<<<<<<<< * "Return the size of the memory occupied by the slice in number of bytes" * cdef int i */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1173 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { int __pyx_v_idx; Py_ssize_t __pyx_r; int __pyx_t_1; int __pyx_t_2; int __pyx_t_3; /* "View.MemoryView":1182 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ __pyx_t_1 = ((__pyx_v_order == 'F') != 0); if (__pyx_t_1) { /* "View.MemoryView":1183 * * if order == 'F': * for idx in range(ndim): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ __pyx_t_2 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_idx = __pyx_t_3; /* "View.MemoryView":1184 * if order == 'F': * for idx in range(ndim): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * else: */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1185 * for idx in range(ndim): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * else: * for idx in range(ndim - 1, -1, -1): */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } /* "View.MemoryView":1182 * cdef int idx * * if order == 'F': # <<<<<<<<<<<<<< * for idx in range(ndim): * strides[idx] = stride */ goto __pyx_L3; } /* "View.MemoryView":1187 * stride = stride * shape[idx] * else: * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * strides[idx] = stride * stride = stride * shape[idx] */ /*else*/ { for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1L; __pyx_t_2-=1) { __pyx_v_idx = __pyx_t_2; /* "View.MemoryView":1188 * else: * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride # <<<<<<<<<<<<<< * stride = stride * shape[idx] * */ (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; /* "View.MemoryView":1189 * for idx in range(ndim - 1, -1, -1): * strides[idx] = stride * stride = stride * shape[idx] # <<<<<<<<<<<<<< * * return stride */ __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); } } __pyx_L3:; /* "View.MemoryView":1191 * stride = stride * shape[idx] * * return stride # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_data_to_temp') */ __pyx_r = __pyx_v_stride; goto __pyx_L0; /* "View.MemoryView":1173 * * @cname('__pyx_fill_contig_strides_array') * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, * int ndim, char order) nogil: */ /* function exit code */ __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1194 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { int __pyx_v_i; void *__pyx_v_result; size_t __pyx_v_itemsize; size_t __pyx_v_size; void *__pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; struct __pyx_memoryview_obj *__pyx_t_4; int __pyx_t_5; /* "View.MemoryView":1205 * cdef void *result * * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef size_t size = slice_get_size(src, ndim) * */ __pyx_t_1 = __pyx_v_src->memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1206 * * cdef size_t itemsize = src.memview.view.itemsize * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< * * result = malloc(size) */ __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); /* "View.MemoryView":1208 * cdef size_t size = slice_get_size(src, ndim) * * result = malloc(size) # <<<<<<<<<<<<<< * if not result: * _err(MemoryError, NULL) */ __pyx_v_result = malloc(__pyx_v_size); /* "View.MemoryView":1209 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ __pyx_t_2 = ((!(__pyx_v_result != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1210 * result = malloc(size) * if not result: * _err(MemoryError, NULL) # <<<<<<<<<<<<<< * * */ __pyx_t_3 = __pyx_memoryview_err(__pyx_builtin_MemoryError, NULL); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1210, __pyx_L1_error) /* "View.MemoryView":1209 * * result = malloc(size) * if not result: # <<<<<<<<<<<<<< * _err(MemoryError, NULL) * */ } /* "View.MemoryView":1213 * * * tmpslice.data = <char *> result # <<<<<<<<<<<<<< * tmpslice.memview = src.memview * for i in range(ndim): */ __pyx_v_tmpslice->data = ((char *)__pyx_v_result); /* "View.MemoryView":1214 * * tmpslice.data = <char *> result * tmpslice.memview = src.memview # <<<<<<<<<<<<<< * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] */ __pyx_t_4 = __pyx_v_src->memview; __pyx_v_tmpslice->memview = __pyx_t_4; /* "View.MemoryView":1215 * tmpslice.data = <char *> result * tmpslice.memview = src.memview * for i in range(ndim): # <<<<<<<<<<<<<< * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1216 * tmpslice.memview = src.memview * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< * tmpslice.suboffsets[i] = -1 * */ (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); /* "View.MemoryView":1217 * for i in range(ndim): * tmpslice.shape[i] = src.shape[i] * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, */ (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1219 * tmpslice.suboffsets[i] = -1 * * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, # <<<<<<<<<<<<<< * ndim, order) * */ __pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order); /* "View.MemoryView":1223 * * * for i in range(ndim): # <<<<<<<<<<<<<< * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 */ __pyx_t_3 = __pyx_v_ndim; for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_3; __pyx_t_5+=1) { __pyx_v_i = __pyx_t_5; /* "View.MemoryView":1224 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ __pyx_t_2 = (((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1225 * for i in range(ndim): * if tmpslice.shape[i] == 1: * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< * * if slice_is_contig(src[0], order, ndim): */ (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; /* "View.MemoryView":1224 * * for i in range(ndim): * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< * tmpslice.strides[i] = 0 * */ } } /* "View.MemoryView":1227 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ __pyx_t_2 = (__pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1228 * * if slice_is_contig(src[0], order, ndim): * memcpy(result, src.data, size) # <<<<<<<<<<<<<< * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) */ memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size); /* "View.MemoryView":1227 * tmpslice.strides[i] = 0 * * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< * memcpy(result, src.data, size) * else: */ goto __pyx_L9; } /* "View.MemoryView":1230 * memcpy(result, src.data, size) * else: * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< * * return result */ /*else*/ { copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); } __pyx_L9:; /* "View.MemoryView":1232 * copy_strided_to_strided(src, tmpslice, ndim, itemsize) * * return result # <<<<<<<<<<<<<< * * */ __pyx_r = __pyx_v_result; goto __pyx_L0; /* "View.MemoryView":1194 * * @cname('__pyx_memoryview_copy_data_to_temp') * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< * __Pyx_memviewslice *tmpslice, * char order, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = NULL; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1237 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_extents", 0); /* "View.MemoryView":1240 * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % * (i, extent1, extent2)) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err_dim') */ __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_i); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_extent1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_extent2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1240, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_1); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_2); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3); __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; /* "View.MemoryView":1239 * cdef int _err_extents(int i, Py_ssize_t extent1, * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % # <<<<<<<<<<<<<< * (i, extent1, extent2)) * */ __pyx_t_3 = __Pyx_PyString_Format(__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1239, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1239, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1239, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 1239, __pyx_L1_error) /* "View.MemoryView":1237 * * @cname('__pyx_memoryview_err_extents') * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< * Py_ssize_t extent2) except -1 with gil: * raise ValueError("got differing extents in dimension %d (got %d and %d)" % */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1243 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, char *__pyx_v_msg, int __pyx_v_dim) { int __pyx_r; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err_dim", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1244 * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: * raise error(msg.decode('ascii') % dim) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_err') */ __pyx_t_2 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __pyx_t_4 = PyUnicode_Format(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_INCREF(__pyx_v_error); __pyx_t_3 = __pyx_v_error; __pyx_t_2 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_3); if (likely(__pyx_t_2)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); __Pyx_INCREF(__pyx_t_2); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_3, function); } } if (!__pyx_t_2) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_3)) { PyObject *__pyx_temp[2] = {__pyx_t_2, __pyx_t_4}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_3, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_5 = PyTuple_New(1+1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_5); __Pyx_GIVEREF(__pyx_t_2); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_2); __pyx_t_2 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_5, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_5, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1244, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; } } __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_Raise(__pyx_t_1, 0, 0, 0); __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; __PYX_ERR(1, 1244, __pyx_L1_error) /* "View.MemoryView":1243 * * @cname('__pyx_memoryview_err_dim') * cdef int _err_dim(object error, char *msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii') % dim) * */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1247 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ static int __pyx_memoryview_err(PyObject *__pyx_v_error, char *__pyx_v_msg) { int __pyx_r; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("_err", 0); __Pyx_INCREF(__pyx_v_error); /* "View.MemoryView":1248 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ __pyx_t_1 = ((__pyx_v_msg != NULL) != 0); if (__pyx_t_1) { /* "View.MemoryView":1249 * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: * raise error(msg.decode('ascii')) # <<<<<<<<<<<<<< * else: * raise error */ __pyx_t_3 = __Pyx_decode_c_string(__pyx_v_msg, 0, strlen(__pyx_v_msg), NULL, NULL, PyUnicode_DecodeASCII); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_INCREF(__pyx_v_error); __pyx_t_4 = __pyx_v_error; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_4, function); } } if (!__pyx_t_5) { __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __Pyx_GOTREF(__pyx_t_2); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3}; __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_3}; __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_3); __pyx_t_3 = 0; __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1249, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_Raise(__pyx_t_2, 0, 0, 0); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __PYX_ERR(1, 1249, __pyx_L1_error) /* "View.MemoryView":1248 * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: * if msg != NULL: # <<<<<<<<<<<<<< * raise error(msg.decode('ascii')) * else: */ } /* "View.MemoryView":1251 * raise error(msg.decode('ascii')) * else: * raise error # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_copy_contents') */ /*else*/ { __Pyx_Raise(__pyx_v_error, 0, 0, 0); __PYX_ERR(1, 1251, __pyx_L1_error) } /* "View.MemoryView":1247 * * @cname('__pyx_memoryview_err') * cdef int _err(object error, char *msg) except -1 with gil: # <<<<<<<<<<<<<< * if msg != NULL: * raise error(msg.decode('ascii')) */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = -1; __Pyx_XDECREF(__pyx_v_error); __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif return __pyx_r; } /* "View.MemoryView":1254 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { void *__pyx_v_tmpdata; size_t __pyx_v_itemsize; int __pyx_v_i; char __pyx_v_order; int __pyx_v_broadcasting; int __pyx_v_direct_copy; __Pyx_memviewslice __pyx_v_tmp; int __pyx_v_ndim; int __pyx_r; Py_ssize_t __pyx_t_1; int __pyx_t_2; int __pyx_t_3; int __pyx_t_4; int __pyx_t_5; void *__pyx_t_6; int __pyx_t_7; /* "View.MemoryView":1262 * Check for overlapping memory and verify the shapes. * """ * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< * cdef size_t itemsize = src.memview.view.itemsize * cdef int i */ __pyx_v_tmpdata = NULL; /* "View.MemoryView":1263 * """ * cdef void *tmpdata = NULL * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< * cdef int i * cdef char order = get_best_order(&src, src_ndim) */ __pyx_t_1 = __pyx_v_src.memview->view.itemsize; __pyx_v_itemsize = __pyx_t_1; /* "View.MemoryView":1265 * cdef size_t itemsize = src.memview.view.itemsize * cdef int i * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< * cdef bint broadcasting = False * cdef bint direct_copy = False */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); /* "View.MemoryView":1266 * cdef int i * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False # <<<<<<<<<<<<<< * cdef bint direct_copy = False * cdef __Pyx_memviewslice tmp */ __pyx_v_broadcasting = 0; /* "View.MemoryView":1267 * cdef char order = get_best_order(&src, src_ndim) * cdef bint broadcasting = False * cdef bint direct_copy = False # <<<<<<<<<<<<<< * cdef __Pyx_memviewslice tmp * */ __pyx_v_direct_copy = 0; /* "View.MemoryView":1270 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ __pyx_t_2 = ((__pyx_v_src_ndim < __pyx_v_dst_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1271 * * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); /* "View.MemoryView":1270 * cdef __Pyx_memviewslice tmp * * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: */ goto __pyx_L3; } /* "View.MemoryView":1272 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ __pyx_t_2 = ((__pyx_v_dst_ndim < __pyx_v_src_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1273 * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< * * cdef int ndim = max(src_ndim, dst_ndim) */ __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); /* "View.MemoryView":1272 * if src_ndim < dst_ndim: * broadcast_leading(&src, src_ndim, dst_ndim) * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< * broadcast_leading(&dst, dst_ndim, src_ndim) * */ } __pyx_L3:; /* "View.MemoryView":1275 * broadcast_leading(&dst, dst_ndim, src_ndim) * * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< * * for i in range(ndim): */ __pyx_t_3 = __pyx_v_dst_ndim; __pyx_t_4 = __pyx_v_src_ndim; if (((__pyx_t_3 > __pyx_t_4) != 0)) { __pyx_t_5 = __pyx_t_3; } else { __pyx_t_5 = __pyx_t_4; } __pyx_v_ndim = __pyx_t_5; /* "View.MemoryView":1277 * cdef int ndim = max(src_ndim, dst_ndim) * * for i in range(ndim): # <<<<<<<<<<<<<< * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: */ __pyx_t_5 = __pyx_v_ndim; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_5; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1278 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])) != 0); if (__pyx_t_2) { /* "View.MemoryView":1279 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ __pyx_t_2 = (((__pyx_v_src.shape[__pyx_v_i]) == 1) != 0); if (__pyx_t_2) { /* "View.MemoryView":1280 * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: * broadcasting = True # <<<<<<<<<<<<<< * src.strides[i] = 0 * else: */ __pyx_v_broadcasting = 1; /* "View.MemoryView":1281 * if src.shape[i] == 1: * broadcasting = True * src.strides[i] = 0 # <<<<<<<<<<<<<< * else: * _err_extents(i, dst.shape[i], src.shape[i]) */ (__pyx_v_src.strides[__pyx_v_i]) = 0; /* "View.MemoryView":1279 * for i in range(ndim): * if src.shape[i] != dst.shape[i]: * if src.shape[i] == 1: # <<<<<<<<<<<<<< * broadcasting = True * src.strides[i] = 0 */ goto __pyx_L7; } /* "View.MemoryView":1283 * src.strides[i] = 0 * else: * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< * * if src.suboffsets[i] >= 0: */ /*else*/ { __pyx_t_4 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1283, __pyx_L1_error) } __pyx_L7:; /* "View.MemoryView":1278 * * for i in range(ndim): * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< * if src.shape[i] == 1: * broadcasting = True */ } /* "View.MemoryView":1285 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ __pyx_t_2 = (((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0) != 0); if (__pyx_t_2) { /* "View.MemoryView":1286 * * if src.suboffsets[i] >= 0: * _err_dim(ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< * * if slices_overlap(&src, &dst, ndim, itemsize): */ __pyx_t_4 = __pyx_memoryview_err_dim(__pyx_builtin_ValueError, ((char *)"Dimension %d is not direct"), __pyx_v_i); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 1286, __pyx_L1_error) /* "View.MemoryView":1285 * _err_extents(i, dst.shape[i], src.shape[i]) * * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< * _err_dim(ValueError, "Dimension %d is not direct", i) * */ } } /* "View.MemoryView":1288 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ __pyx_t_2 = (__pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize) != 0); if (__pyx_t_2) { /* "View.MemoryView":1290 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ __pyx_t_2 = ((!(__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim) != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1291 * * if not slice_is_contig(src, order, ndim): * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) */ __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); /* "View.MemoryView":1290 * if slices_overlap(&src, &dst, ndim, itemsize): * * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< * order = get_best_order(&dst, ndim) * */ } /* "View.MemoryView":1293 * order = get_best_order(&dst, ndim) * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< * src = tmp * */ __pyx_t_6 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_6 == ((void *)NULL))) __PYX_ERR(1, 1293, __pyx_L1_error) __pyx_v_tmpdata = __pyx_t_6; /* "View.MemoryView":1294 * * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) * src = tmp # <<<<<<<<<<<<<< * * if not broadcasting: */ __pyx_v_src = __pyx_v_tmp; /* "View.MemoryView":1288 * _err_dim(ValueError, "Dimension %d is not direct", i) * * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< * * if not slice_is_contig(src, order, ndim): */ } /* "View.MemoryView":1296 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ __pyx_t_2 = ((!(__pyx_v_broadcasting != 0)) != 0); if (__pyx_t_2) { /* "View.MemoryView":1299 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1300 * * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); /* "View.MemoryView":1299 * * * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): */ goto __pyx_L12; } /* "View.MemoryView":1301 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ __pyx_t_2 = (__pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim) != 0); if (__pyx_t_2) { /* "View.MemoryView":1302 * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< * * if direct_copy: */ __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); /* "View.MemoryView":1301 * if slice_is_contig(src, 'C', ndim): * direct_copy = slice_is_contig(dst, 'C', ndim) * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< * direct_copy = slice_is_contig(dst, 'F', ndim) * */ } __pyx_L12:; /* "View.MemoryView":1304 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_2 = (__pyx_v_direct_copy != 0); if (__pyx_t_2) { /* "View.MemoryView":1306 * if direct_copy: * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1307 * * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) */ memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim)); /* "View.MemoryView":1308 * refcount_copying(&dst, dtype_is_object, ndim, False) * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * free(tmpdata) * return 0 */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1309 * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1310 * refcount_copying(&dst, dtype_is_object, ndim, True) * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * if order == 'F' == get_best_order(&dst, ndim): */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1304 * direct_copy = slice_is_contig(dst, 'F', ndim) * * if direct_copy: # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ } /* "View.MemoryView":1296 * src = tmp * * if not broadcasting: # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1312 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ __pyx_t_2 = (__pyx_v_order == 'F'); if (__pyx_t_2) { __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); } __pyx_t_7 = (__pyx_t_2 != 0); if (__pyx_t_7) { /* "View.MemoryView":1315 * * * transpose_memslice(&src) # <<<<<<<<<<<<<< * transpose_memslice(&dst) * */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1315, __pyx_L1_error) /* "View.MemoryView":1316 * * transpose_memslice(&src) * transpose_memslice(&dst) # <<<<<<<<<<<<<< * * refcount_copying(&dst, dtype_is_object, ndim, False) */ __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)0))) __PYX_ERR(1, 1316, __pyx_L1_error) /* "View.MemoryView":1312 * return 0 * * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< * * */ } /* "View.MemoryView":1318 * transpose_memslice(&dst) * * refcount_copying(&dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1319 * * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< * refcount_copying(&dst, dtype_is_object, ndim, True) * */ copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); /* "View.MemoryView":1320 * refcount_copying(&dst, dtype_is_object, ndim, False) * copy_strided_to_strided(&src, &dst, ndim, itemsize) * refcount_copying(&dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * free(tmpdata) */ __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1322 * refcount_copying(&dst, dtype_is_object, ndim, True) * * free(tmpdata) # <<<<<<<<<<<<<< * return 0 * */ free(__pyx_v_tmpdata); /* "View.MemoryView":1323 * * free(tmpdata) * return 0 # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_broadcast_leading') */ __pyx_r = 0; goto __pyx_L0; /* "View.MemoryView":1254 * * @cname('__pyx_memoryview_copy_contents') * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< * __Pyx_memviewslice dst, * int src_ndim, int dst_ndim, */ /* function exit code */ __pyx_L1_error:; { #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } __pyx_r = -1; __pyx_L0:; return __pyx_r; } /* "View.MemoryView":1326 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { int __pyx_v_i; int __pyx_v_offset; int __pyx_t_1; int __pyx_t_2; /* "View.MemoryView":1330 * int ndim_other) nogil: * cdef int i * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< * * for i in range(ndim - 1, -1, -1): */ __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); /* "View.MemoryView":1332 * cdef int offset = ndim_other - ndim * * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] */ for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1L; __pyx_t_1-=1) { __pyx_v_i = __pyx_t_1; /* "View.MemoryView":1333 * * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] */ (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); /* "View.MemoryView":1334 * for i in range(ndim - 1, -1, -1): * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * */ (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); /* "View.MemoryView":1335 * mslice.shape[i + offset] = mslice.shape[i] * mslice.strides[i + offset] = mslice.strides[i] * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< * * for i in range(offset): */ (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); } /* "View.MemoryView":1337 * mslice.suboffsets[i + offset] = mslice.suboffsets[i] * * for i in range(offset): # <<<<<<<<<<<<<< * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] */ __pyx_t_1 = __pyx_v_offset; for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1338 * * for i in range(offset): * mslice.shape[i] = 1 # <<<<<<<<<<<<<< * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 */ (__pyx_v_mslice->shape[__pyx_v_i]) = 1; /* "View.MemoryView":1339 * for i in range(offset): * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< * mslice.suboffsets[i] = -1 * */ (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); /* "View.MemoryView":1340 * mslice.shape[i] = 1 * mslice.strides[i] = mslice.strides[0] * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< * * */ (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; } /* "View.MemoryView":1326 * * @cname('__pyx_memoryview_broadcast_leading') * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< * int ndim, * int ndim_other) nogil: */ /* function exit code */ } /* "View.MemoryView":1348 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { int __pyx_t_1; /* "View.MemoryView":1352 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ __pyx_t_1 = (__pyx_v_dtype_is_object != 0); if (__pyx_t_1) { /* "View.MemoryView":1353 * * if dtype_is_object: * refcount_objects_in_slice_with_gil(dst.data, dst.shape, # <<<<<<<<<<<<<< * dst.strides, ndim, inc) * */ __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1352 * * * if dtype_is_object: # <<<<<<<<<<<<<< * refcount_objects_in_slice_with_gil(dst.data, dst.shape, * dst.strides, ndim, inc) */ } /* "View.MemoryView":1348 * * @cname('__pyx_memoryview_refcount_copying') * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, # <<<<<<<<<<<<<< * int ndim, bint inc) nogil: * */ /* function exit code */ } /* "View.MemoryView":1357 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { __Pyx_RefNannyDeclarations #ifdef WITH_THREAD PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); #endif __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); /* "View.MemoryView":1360 * Py_ssize_t *strides, int ndim, * bint inc) with gil: * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< * * @cname('__pyx_memoryview_refcount_objects_in_slice') */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); /* "View.MemoryView":1357 * * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * bint inc) with gil: */ /* function exit code */ __Pyx_RefNannyFinishContext(); #ifdef WITH_THREAD __Pyx_PyGILState_Release(__pyx_gilstate_save); #endif } /* "View.MemoryView":1363 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; __Pyx_RefNannyDeclarations Py_ssize_t __pyx_t_1; Py_ssize_t __pyx_t_2; int __pyx_t_3; __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); /* "View.MemoryView":1367 * cdef Py_ssize_t i * * for i in range(shape[0]): # <<<<<<<<<<<<<< * if ndim == 1: * if inc: */ __pyx_t_1 = (__pyx_v_shape[0]); for (__pyx_t_2 = 0; __pyx_t_2 < __pyx_t_1; __pyx_t_2+=1) { __pyx_v_i = __pyx_t_2; /* "View.MemoryView":1368 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ __pyx_t_3 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_3) { /* "View.MemoryView":1369 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ __pyx_t_3 = (__pyx_v_inc != 0); if (__pyx_t_3) { /* "View.MemoryView":1370 * if ndim == 1: * if inc: * Py_INCREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * Py_DECREF((<PyObject **> data)[0]) */ Py_INCREF((((PyObject **)__pyx_v_data)[0])); /* "View.MemoryView":1369 * for i in range(shape[0]): * if ndim == 1: * if inc: # <<<<<<<<<<<<<< * Py_INCREF((<PyObject **> data)[0]) * else: */ goto __pyx_L6; } /* "View.MemoryView":1372 * Py_INCREF((<PyObject **> data)[0]) * else: * Py_DECREF((<PyObject **> data)[0]) # <<<<<<<<<<<<<< * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, */ /*else*/ { Py_DECREF((((PyObject **)__pyx_v_data)[0])); } __pyx_L6:; /* "View.MemoryView":1368 * * for i in range(shape[0]): * if ndim == 1: # <<<<<<<<<<<<<< * if inc: * Py_INCREF((<PyObject **> data)[0]) */ goto __pyx_L5; } /* "View.MemoryView":1374 * Py_DECREF((<PyObject **> data)[0]) * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, inc) * */ /*else*/ { /* "View.MemoryView":1375 * else: * refcount_objects_in_slice(data, shape + 1, strides + 1, * ndim - 1, inc) # <<<<<<<<<<<<<< * * data += strides[0] */ __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); } __pyx_L5:; /* "View.MemoryView":1377 * ndim - 1, inc) * * data += strides[0] # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + (__pyx_v_strides[0])); } /* "View.MemoryView":1363 * * @cname('__pyx_memoryview_refcount_objects_in_slice') * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, bint inc): * cdef Py_ssize_t i */ /* function exit code */ __Pyx_RefNannyFinishContext(); } /* "View.MemoryView":1383 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { /* "View.MemoryView":1386 * size_t itemsize, void *item, * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) # <<<<<<<<<<<<<< * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); /* "View.MemoryView":1387 * bint dtype_is_object) nogil: * refcount_copying(dst, dtype_is_object, ndim, False) * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, # <<<<<<<<<<<<<< * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) */ __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1389 * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, * itemsize, item) * refcount_copying(dst, dtype_is_object, ndim, True) # <<<<<<<<<<<<<< * * */ __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); /* "View.MemoryView":1383 * * @cname('__pyx_memoryview_slice_assign_scalar') * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< * size_t itemsize, void *item, * bint dtype_is_object) nogil: */ /* function exit code */ } /* "View.MemoryView":1393 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { CYTHON_UNUSED Py_ssize_t __pyx_v_i; Py_ssize_t __pyx_v_stride; Py_ssize_t __pyx_v_extent; int __pyx_t_1; Py_ssize_t __pyx_t_2; Py_ssize_t __pyx_t_3; /* "View.MemoryView":1397 * size_t itemsize, void *item) nogil: * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< * cdef Py_ssize_t extent = shape[0] * */ __pyx_v_stride = (__pyx_v_strides[0]); /* "View.MemoryView":1398 * cdef Py_ssize_t i * cdef Py_ssize_t stride = strides[0] * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< * * if ndim == 1: */ __pyx_v_extent = (__pyx_v_shape[0]); /* "View.MemoryView":1400 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ __pyx_t_1 = ((__pyx_v_ndim == 1) != 0); if (__pyx_t_1) { /* "View.MemoryView":1401 * * if ndim == 1: * for i in range(extent): # <<<<<<<<<<<<<< * memcpy(data, item, itemsize) * data += stride */ __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1402 * if ndim == 1: * for i in range(extent): * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< * data += stride * else: */ memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize); /* "View.MemoryView":1403 * for i in range(extent): * memcpy(data, item, itemsize) * data += stride # <<<<<<<<<<<<<< * else: * for i in range(extent): */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } /* "View.MemoryView":1400 * cdef Py_ssize_t extent = shape[0] * * if ndim == 1: # <<<<<<<<<<<<<< * for i in range(extent): * memcpy(data, item, itemsize) */ goto __pyx_L3; } /* "View.MemoryView":1405 * data += stride * else: * for i in range(extent): # <<<<<<<<<<<<<< * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) */ /*else*/ { __pyx_t_2 = __pyx_v_extent; for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { __pyx_v_i = __pyx_t_3; /* "View.MemoryView":1406 * else: * for i in range(extent): * _slice_assign_scalar(data, shape + 1, strides + 1, # <<<<<<<<<<<<<< * ndim - 1, itemsize, item) * data += stride */ __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); /* "View.MemoryView":1408 * _slice_assign_scalar(data, shape + 1, strides + 1, * ndim - 1, itemsize, item) * data += stride # <<<<<<<<<<<<<< * * */ __pyx_v_data = (__pyx_v_data + __pyx_v_stride); } } __pyx_L3:; /* "View.MemoryView":1393 * * @cname('__pyx_memoryview__slice_assign_scalar') * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< * Py_ssize_t *strides, int ndim, * size_t itemsize, void *item) nogil: */ /* function exit code */ } /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError */ /* Python wrapper */ static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, METH_VARARGS|METH_KEYWORDS, 0}; static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { PyObject *__pyx_v___pyx_type = 0; long __pyx_v___pyx_checksum; PyObject *__pyx_v___pyx_state = 0; PyObject *__pyx_r = 0; __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); { static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; PyObject* values[3] = {0,0,0}; if (unlikely(__pyx_kwds)) { Py_ssize_t kw_args; const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); switch (pos_args) { case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); CYTHON_FALLTHROUGH; case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); CYTHON_FALLTHROUGH; case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); CYTHON_FALLTHROUGH; case 0: break; default: goto __pyx_L5_argtuple_error; } kw_args = PyDict_Size(__pyx_kwds); switch (pos_args) { case 0: if (likely((values[0] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_pyx_type)) != 0)) kw_args--; else goto __pyx_L5_argtuple_error; CYTHON_FALLTHROUGH; case 1: if (likely((values[1] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_pyx_checksum)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) } CYTHON_FALLTHROUGH; case 2: if (likely((values[2] = PyDict_GetItem(__pyx_kwds, __pyx_n_s_pyx_state)) != 0)) kw_args--; else { __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) } } if (unlikely(kw_args > 0)) { if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) } } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { goto __pyx_L5_argtuple_error; } else { values[0] = PyTuple_GET_ITEM(__pyx_args, 0); values[1] = PyTuple_GET_ITEM(__pyx_args, 1); values[2] = PyTuple_GET_ITEM(__pyx_args, 2); } __pyx_v___pyx_type = values[0]; __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) __pyx_v___pyx_state = values[2]; } goto __pyx_L4_argument_unpacking_done; __pyx_L5_argtuple_error:; __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 1, __pyx_L3_error) __pyx_L3_error:; __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __Pyx_RefNannyFinishContext(); return NULL; __pyx_L4_argument_unpacking_done:; __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); /* function exit code */ __Pyx_RefNannyFinishContext(); return __pyx_r; } static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_v___pyx_PickleError = NULL; PyObject *__pyx_v___pyx_result = NULL; PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations int __pyx_t_1; PyObject *__pyx_t_2 = NULL; PyObject *__pyx_t_3 = NULL; PyObject *__pyx_t_4 = NULL; PyObject *__pyx_t_5 = NULL; PyObject *__pyx_t_6 = NULL; int __pyx_t_7; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); /* "(tree fragment)":2 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ __pyx_t_1 = ((__pyx_v___pyx_checksum != 0xb068931) != 0); if (__pyx_t_1) { /* "(tree fragment)":3 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) */ __pyx_t_2 = PyList_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_n_s_PickleError); __Pyx_GIVEREF(__pyx_n_s_PickleError); PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_PickleError); __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_2, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 3, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __Pyx_INCREF(__pyx_t_2); __pyx_v___pyx_PickleError = __pyx_t_2; __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":4 * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) # <<<<<<<<<<<<<< * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: */ __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_4 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_INCREF(__pyx_v___pyx_PickleError); __pyx_t_2 = __pyx_v___pyx_PickleError; __pyx_t_5 = NULL; if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_2))) { __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_5)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_5); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } if (!__pyx_t_5) { __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_4}; __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_5, __pyx_t_4}; __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } else #endif { __pyx_t_6 = PyTuple_New(1+1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_5); __pyx_t_5 = NULL; __Pyx_GIVEREF(__pyx_t_4); PyTuple_SET_ITEM(__pyx_t_6, 0+1, __pyx_t_4); __pyx_t_4 = 0; __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_6, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __Pyx_Raise(__pyx_t_3, 0, 0, 0); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; __PYX_ERR(1, 4, __pyx_L1_error) /* "(tree fragment)":2 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): * if __pyx_checksum != 0xb068931: # <<<<<<<<<<<<<< * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) */ } /* "(tree fragment)":5 * from pickle import PickleError as __pyx_PickleError * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) */ __pyx_t_2 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_2); __pyx_t_6 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_2); if (likely(__pyx_t_6)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); __Pyx_INCREF(__pyx_t_6); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_2, function); } } if (!__pyx_t_6) { __pyx_t_3 = __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_v___pyx_type); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_v___pyx_type}; __pyx_t_3 = __Pyx_PyFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_2)) { PyObject *__pyx_temp[2] = {__pyx_t_6, __pyx_v___pyx_type}; __pyx_t_3 = __Pyx_PyCFunction_FastCall(__pyx_t_2, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_3); } else #endif { __pyx_t_4 = PyTuple_New(1+1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_4); __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_6); __pyx_t_6 = NULL; __Pyx_INCREF(__pyx_v___pyx_type); __Pyx_GIVEREF(__pyx_v___pyx_type); PyTuple_SET_ITEM(__pyx_t_4, 0+1, __pyx_v___pyx_type); __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_4, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; } } __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; __pyx_v___pyx_result = __pyx_t_3; __pyx_t_3 = 0; /* "(tree fragment)":6 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ __pyx_t_1 = (__pyx_v___pyx_state != Py_None); __pyx_t_7 = (__pyx_t_1 != 0); if (__pyx_t_7) { /* "(tree fragment)":7 * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) # <<<<<<<<<<<<<< * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): */ if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None)||(PyErr_Format(PyExc_TypeError, "Expected %.16s, got %.200s", "tuple", Py_TYPE(__pyx_v___pyx_state)->tp_name), 0))) __PYX_ERR(1, 7, __pyx_L1_error) __pyx_t_3 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_3); __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; /* "(tree fragment)":6 * raise __pyx_PickleError("Incompatible checksums (%s vs 0xb068931 = (name))" % __pyx_checksum) * __pyx_result = Enum.__new__(__pyx_type) * if __pyx_state is not None: # <<<<<<<<<<<<<< * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result */ } /* "(tree fragment)":8 * if __pyx_state is not None: * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result # <<<<<<<<<<<<<< * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] */ __Pyx_XDECREF(__pyx_r); __Pyx_INCREF(__pyx_v___pyx_result); __pyx_r = __pyx_v___pyx_result; goto __pyx_L0; /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError */ /* function exit code */ __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_2); __Pyx_XDECREF(__pyx_t_3); __Pyx_XDECREF(__pyx_t_4); __Pyx_XDECREF(__pyx_t_5); __Pyx_XDECREF(__pyx_t_6); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = NULL; __pyx_L0:; __Pyx_XDECREF(__pyx_v___pyx_PickleError); __Pyx_XDECREF(__pyx_v___pyx_result); __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } /* "(tree fragment)":9 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { PyObject *__pyx_r = NULL; __Pyx_RefNannyDeclarations PyObject *__pyx_t_1 = NULL; int __pyx_t_2; Py_ssize_t __pyx_t_3; int __pyx_t_4; int __pyx_t_5; PyObject *__pyx_t_6 = NULL; PyObject *__pyx_t_7 = NULL; PyObject *__pyx_t_8 = NULL; PyObject *__pyx_t_9 = NULL; __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); /* "(tree fragment)":10 * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 10, __pyx_L1_error) } __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 10, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __Pyx_GOTREF(__pyx_v___pyx_result->name); __Pyx_DECREF(__pyx_v___pyx_result->name); __pyx_v___pyx_result->name = __pyx_t_1; __pyx_t_1 = 0; /* "(tree fragment)":11 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); __PYX_ERR(1, 11, __pyx_L1_error) } __pyx_t_3 = PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 11, __pyx_L1_error) __pyx_t_4 = ((__pyx_t_3 > 1) != 0); if (__pyx_t_4) { } else { __pyx_t_2 = __pyx_t_4; goto __pyx_L4_bool_binop_done; } __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 11, __pyx_L1_error) __pyx_t_5 = (__pyx_t_4 != 0); __pyx_t_2 = __pyx_t_5; __pyx_L4_bool_binop_done:; if (__pyx_t_2) { /* "(tree fragment)":12 * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< */ __pyx_t_6 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_update); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_7); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; if (unlikely(__pyx_v___pyx_state == Py_None)) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); __PYX_ERR(1, 12, __pyx_L1_error) } __pyx_t_6 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_6); __pyx_t_8 = NULL; if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { __pyx_t_8 = PyMethod_GET_SELF(__pyx_t_7); if (likely(__pyx_t_8)) { PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); __Pyx_INCREF(__pyx_t_8); __Pyx_INCREF(function); __Pyx_DECREF_SET(__pyx_t_7, function); } } if (!__pyx_t_8) { __pyx_t_1 = __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_t_6); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; __Pyx_GOTREF(__pyx_t_1); } else { #if CYTHON_FAST_PYCALL if (PyFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_8, __pyx_t_6}; __pyx_t_1 = __Pyx_PyFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif #if CYTHON_FAST_PYCCALL if (__Pyx_PyFastCFunction_Check(__pyx_t_7)) { PyObject *__pyx_temp[2] = {__pyx_t_8, __pyx_t_6}; __pyx_t_1 = __Pyx_PyCFunction_FastCall(__pyx_t_7, __pyx_temp+1-1, 1+1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; } else #endif { __pyx_t_9 = PyTuple_New(1+1); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_9); __Pyx_GIVEREF(__pyx_t_8); PyTuple_SET_ITEM(__pyx_t_9, 0, __pyx_t_8); __pyx_t_8 = NULL; __Pyx_GIVEREF(__pyx_t_6); PyTuple_SET_ITEM(__pyx_t_9, 0+1, __pyx_t_6); __pyx_t_6 = 0; __pyx_t_1 = __Pyx_PyObject_Call(__pyx_t_7, __pyx_t_9, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; } } __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":11 * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< * __pyx_result.__dict__.update(__pyx_state[1]) */ } /* "(tree fragment)":9 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /* function exit code */ __pyx_r = Py_None; __Pyx_INCREF(Py_None); goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); __Pyx_XDECREF(__pyx_t_6); __Pyx_XDECREF(__pyx_t_7); __Pyx_XDECREF(__pyx_t_8); __Pyx_XDECREF(__pyx_t_9); __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); __pyx_r = 0; __pyx_L0:; __Pyx_XGIVEREF(__pyx_r); __Pyx_RefNannyFinishContext(); return __pyx_r; } static struct __pyx_vtabstruct_array __pyx_vtable_array; static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_array_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_array_obj *)o); p->__pyx_vtab = __pyx_vtabptr_array; p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_array(PyObject *o) { struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_array___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->mode); Py_CLEAR(p->_format); (*Py_TYPE(o)->tp_free)(o); } static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_array___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { PyObject *v = PyObject_GenericGetAttr(o, n); if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); v = __pyx_array___getattr__(o, n); } return v; } static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); } static PyMethodDef __pyx_methods_array[] = { {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_array_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_array_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_array[] = { {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_array = { __pyx_array___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_array, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_array = { __pyx_array___len__, /*mp_length*/ __pyx_array___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_array = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_array_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_array = { PyVarObject_HEAD_INIT(0, 0) "thread_demo.array", /*tp_name*/ sizeof(struct __pyx_array_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_array, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif 0, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ __pyx_tp_getattro_array, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ 0, /*tp_doc*/ 0, /*tp_traverse*/ 0, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_array, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_array, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_array, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { struct __pyx_MemviewEnum_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_MemviewEnum_obj *)o); p->name = Py_None; Py_INCREF(Py_None); return o; } static void __pyx_tp_dealloc_Enum(PyObject *o) { struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); Py_CLEAR(p->name); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { int e; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; if (p->name) { e = (*v)(p->name, a); if (e) return e; } return 0; } static int __pyx_tp_clear_Enum(PyObject *o) { PyObject* tmp; struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; tmp = ((PyObject*)p->name); p->name = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); return 0; } static PyMethodDef __pyx_methods_Enum[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_MemviewEnum = { PyVarObject_HEAD_INIT(0, 0) "thread_demo.Enum", /*tp_name*/ sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_Enum, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_MemviewEnum___repr__, /*tp_repr*/ 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ 0, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_Enum, /*tp_traverse*/ __pyx_tp_clear_Enum, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_Enum, /*tp_methods*/ 0, /*tp_members*/ 0, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ __pyx_MemviewEnum___init__, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_Enum, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryview_obj *p; PyObject *o; if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { o = (*t->tp_alloc)(t, 0); } else { o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); } if (unlikely(!o)) return 0; p = ((struct __pyx_memoryview_obj *)o); p->__pyx_vtab = __pyx_vtabptr_memoryview; p->obj = Py_None; Py_INCREF(Py_None); p->_size = Py_None; Py_INCREF(Py_None); p->_array_interface = Py_None; Py_INCREF(Py_None); p->view.obj = NULL; if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; return o; bad: Py_DECREF(o); o = 0; return NULL; } static void __pyx_tp_dealloc_memoryview(PyObject *o) { struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryview___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->obj); Py_CLEAR(p->_size); Py_CLEAR(p->_array_interface); (*Py_TYPE(o)->tp_free)(o); } static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; if (p->obj) { e = (*v)(p->obj, a); if (e) return e; } if (p->_size) { e = (*v)(p->_size, a); if (e) return e; } if (p->_array_interface) { e = (*v)(p->_array_interface, a); if (e) return e; } if (p->view.obj) { e = (*v)(p->view.obj, a); if (e) return e; } return 0; } static int __pyx_tp_clear_memoryview(PyObject *o) { PyObject* tmp; struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; tmp = ((PyObject*)p->obj); p->obj = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_size); p->_size = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); tmp = ((PyObject*)p->_array_interface); p->_array_interface = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); Py_CLEAR(p->view.obj); return 0; } static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { PyObject *r; PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); Py_DECREF(x); return r; } static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { if (v) { return __pyx_memoryview___setitem__(o, i, v); } else { PyErr_Format(PyExc_NotImplementedError, "Subscript deletion not supported by %.200s", Py_TYPE(o)->tp_name); return -1; } } static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); } static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); } static PyMethodDef __pyx_methods_memoryview[] = { {"is_c_contig", (PyCFunction)__pyx_memoryview_is_c_contig, METH_NOARGS, 0}, {"is_f_contig", (PyCFunction)__pyx_memoryview_is_f_contig, METH_NOARGS, 0}, {"copy", (PyCFunction)__pyx_memoryview_copy, METH_NOARGS, 0}, {"copy_fortran", (PyCFunction)__pyx_memoryview_copy_fortran, METH_NOARGS, 0}, {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryview_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets_memoryview[] = { {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PySequenceMethods __pyx_tp_as_sequence_memoryview = { __pyx_memoryview___len__, /*sq_length*/ 0, /*sq_concat*/ 0, /*sq_repeat*/ __pyx_sq_item_memoryview, /*sq_item*/ 0, /*sq_slice*/ 0, /*sq_ass_item*/ 0, /*sq_ass_slice*/ 0, /*sq_contains*/ 0, /*sq_inplace_concat*/ 0, /*sq_inplace_repeat*/ }; static PyMappingMethods __pyx_tp_as_mapping_memoryview = { __pyx_memoryview___len__, /*mp_length*/ __pyx_memoryview___getitem__, /*mp_subscript*/ __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ }; static PyBufferProcs __pyx_tp_as_buffer_memoryview = { #if PY_MAJOR_VERSION < 3 0, /*bf_getreadbuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getwritebuffer*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getsegcount*/ #endif #if PY_MAJOR_VERSION < 3 0, /*bf_getcharbuffer*/ #endif __pyx_memoryview_getbuffer, /*bf_getbuffer*/ 0, /*bf_releasebuffer*/ }; static PyTypeObject __pyx_type___pyx_memoryview = { PyVarObject_HEAD_INIT(0, 0) "thread_demo.memoryview", /*tp_name*/ sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif __pyx_memoryview___repr__, /*tp_repr*/ 0, /*tp_as_number*/ &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ __pyx_memoryview___str__, /*tp_str*/ 0, /*tp_getattro*/ 0, /*tp_setattro*/ &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ 0, /*tp_doc*/ __pyx_tp_traverse_memoryview, /*tp_traverse*/ __pyx_tp_clear_memoryview, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods_memoryview, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets_memoryview, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new_memoryview, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { struct __pyx_memoryviewslice_obj *p; PyObject *o = __pyx_tp_new_memoryview(t, a, k); if (unlikely(!o)) return 0; p = ((struct __pyx_memoryviewslice_obj *)o); p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; p->from_object = Py_None; Py_INCREF(Py_None); p->from_slice.memview = NULL; return o; } static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; #if CYTHON_USE_TP_FINALIZE if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { if (PyObject_CallFinalizerFromDealloc(o)) return; } #endif PyObject_GC_UnTrack(o); { PyObject *etype, *eval, *etb; PyErr_Fetch(&etype, &eval, &etb); ++Py_REFCNT(o); __pyx_memoryviewslice___dealloc__(o); --Py_REFCNT(o); PyErr_Restore(etype, eval, etb); } Py_CLEAR(p->from_object); PyObject_GC_Track(o); __pyx_tp_dealloc_memoryview(o); } static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { int e; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; if (p->from_object) { e = (*v)(p->from_object, a); if (e) return e; } return 0; } static int __pyx_tp_clear__memoryviewslice(PyObject *o) { PyObject* tmp; struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; __pyx_tp_clear_memoryview(o); tmp = ((PyObject*)p->from_object); p->from_object = Py_None; Py_INCREF(Py_None); Py_XDECREF(tmp); __PYX_XDEC_MEMVIEW(&p->from_slice, 1); return 0; } static PyObject *__pyx_getprop___pyx_memoryviewslice_base(PyObject *o, CYTHON_UNUSED void *x) { return __pyx_pw_15View_dot_MemoryView_16_memoryviewslice_4base_1__get__(o); } static PyMethodDef __pyx_methods__memoryviewslice[] = { {"__reduce_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, METH_NOARGS, 0}, {"__setstate_cython__", (PyCFunction)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, METH_O, 0}, {0, 0, 0, 0} }; static struct PyGetSetDef __pyx_getsets__memoryviewslice[] = { {(char *)"base", __pyx_getprop___pyx_memoryviewslice_base, 0, (char *)0, 0}, {0, 0, 0, 0, 0} }; static PyTypeObject __pyx_type___pyx_memoryviewslice = { PyVarObject_HEAD_INIT(0, 0) "thread_demo._memoryviewslice", /*tp_name*/ sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ 0, /*tp_itemsize*/ __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ 0, /*tp_print*/ 0, /*tp_getattr*/ 0, /*tp_setattr*/ #if PY_MAJOR_VERSION < 3 0, /*tp_compare*/ #endif #if PY_MAJOR_VERSION >= 3 0, /*tp_as_async*/ #endif #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___repr__, /*tp_repr*/ #else 0, /*tp_repr*/ #endif 0, /*tp_as_number*/ 0, /*tp_as_sequence*/ 0, /*tp_as_mapping*/ 0, /*tp_hash*/ 0, /*tp_call*/ #if CYTHON_COMPILING_IN_PYPY __pyx_memoryview___str__, /*tp_str*/ #else 0, /*tp_str*/ #endif 0, /*tp_getattro*/ 0, /*tp_setattro*/ 0, /*tp_as_buffer*/ Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ "Internal class for passing memoryview slices to Python", /*tp_doc*/ __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ __pyx_tp_clear__memoryviewslice, /*tp_clear*/ 0, /*tp_richcompare*/ 0, /*tp_weaklistoffset*/ 0, /*tp_iter*/ 0, /*tp_iternext*/ __pyx_methods__memoryviewslice, /*tp_methods*/ 0, /*tp_members*/ __pyx_getsets__memoryviewslice, /*tp_getset*/ 0, /*tp_base*/ 0, /*tp_dict*/ 0, /*tp_descr_get*/ 0, /*tp_descr_set*/ 0, /*tp_dictoffset*/ 0, /*tp_init*/ 0, /*tp_alloc*/ __pyx_tp_new__memoryviewslice, /*tp_new*/ 0, /*tp_free*/ 0, /*tp_is_gc*/ 0, /*tp_bases*/ 0, /*tp_mro*/ 0, /*tp_cache*/ 0, /*tp_subclasses*/ 0, /*tp_weaklist*/ 0, /*tp_del*/ 0, /*tp_version_tag*/ #if PY_VERSION_HEX >= 0x030400a1 0, /*tp_finalize*/ #endif }; static PyMethodDef __pyx_methods[] = { {0, 0, 0, 0} }; #if PY_MAJOR_VERSION >= 3 #if CYTHON_PEP489_MULTI_PHASE_INIT static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ static int __pyx_pymod_exec_thread_demo(PyObject* module); /*proto*/ static PyModuleDef_Slot __pyx_moduledef_slots[] = { {Py_mod_create, (void*)__pyx_pymod_create}, {Py_mod_exec, (void*)__pyx_pymod_exec_thread_demo}, {0, NULL} }; #endif static struct PyModuleDef __pyx_moduledef = { PyModuleDef_HEAD_INIT, "thread_demo", __pyx_k_Created_on_Tue_Mar_20_09_22_22, /* m_doc */ #if CYTHON_PEP489_MULTI_PHASE_INIT 0, /* m_size */ #else -1, /* m_size */ #endif __pyx_methods /* m_methods */, #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_moduledef_slots, /* m_slots */ #else NULL, /* m_reload */ #endif NULL, /* m_traverse */ NULL, /* m_clear */ NULL /* m_free */ }; #endif static __Pyx_StringTabEntry __pyx_string_tab[] = { {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, {&__pyx_kp_s_Cannot_index_with_type_s, __pyx_k_Cannot_index_with_type_s, sizeof(__pyx_k_Cannot_index_with_type_s), 0, 0, 1, 0}, {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, {&__pyx_kp_s_Incompatible_checksums_s_vs_0xb0, __pyx_k_Incompatible_checksums_s_vs_0xb0, sizeof(__pyx_k_Incompatible_checksums_s_vs_0xb0), 0, 0, 1, 0}, {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 0, 1, 0}, {&__pyx_kp_s_Invalid_shape_in_axis_d_d, __pyx_k_Invalid_shape_in_axis_d_d, sizeof(__pyx_k_Invalid_shape_in_axis_d_d), 0, 0, 1, 0}, {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, {&__pyx_n_s_N, __pyx_k_N, sizeof(__pyx_k_N), 0, 0, 1, 1}, {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, {&__pyx_kp_s_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 0, 1, 0}, {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, {&__pyx_n_s_X, __pyx_k_X, sizeof(__pyx_k_X), 0, 0, 1, 1}, {&__pyx_n_s_Y, __pyx_k_Y, sizeof(__pyx_k_Y), 0, 0, 1, 1}, {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, {&__pyx_n_s_c_array_f, __pyx_k_c_array_f, sizeof(__pyx_k_c_array_f), 0, 0, 1, 1}, {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, {&__pyx_kp_s_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 0, 1, 0}, {&__pyx_n_s_i, __pyx_k_i, sizeof(__pyx_k_i), 0, 0, 1, 1}, {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_getbuffer, __pyx_k_pyx_getbuffer, sizeof(__pyx_k_pyx_getbuffer), 0, 0, 1, 1}, {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, {&__pyx_n_s_thread_demo, __pyx_k_thread_demo, sizeof(__pyx_k_thread_demo), 0, 0, 1, 1}, {&__pyx_kp_s_thread_demo_pyx, __pyx_k_thread_demo_pyx, sizeof(__pyx_k_thread_demo_pyx), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, {0, 0, 0, 0, 0, 0, 0} }; static int __Pyx_InitCachedBuiltins(void) { __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 131, __pyx_L1_error) __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 146, __pyx_L1_error) __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 149, __pyx_L1_error) __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(1, 178, __pyx_L1_error) __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 398, __pyx_L1_error) __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 601, __pyx_L1_error) __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 820, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } static int __Pyx_InitCachedConstants(void) { __Pyx_RefNannyDeclarations __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); /* "View.MemoryView":131 * * if not self.ndim: * raise ValueError("Empty shape tuple for cython.array") # <<<<<<<<<<<<<< * * if itemsize <= 0: */ __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_Empty_shape_tuple_for_cython_arr); if (unlikely(!__pyx_tuple_)) __PYX_ERR(1, 131, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple_); __Pyx_GIVEREF(__pyx_tuple_); /* "View.MemoryView":134 * * if itemsize <= 0: * raise ValueError("itemsize <= 0 for cython.array") # <<<<<<<<<<<<<< * * if not isinstance(format, bytes): */ __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_itemsize_0_for_cython_array); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(1, 134, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__2); __Pyx_GIVEREF(__pyx_tuple__2); /* "View.MemoryView":137 * * if not isinstance(format, bytes): * format = format.encode('ASCII') # <<<<<<<<<<<<<< * self._format = format # keep a reference to the byte string * self.format = self._format */ __pyx_tuple__3 = PyTuple_Pack(1, __pyx_n_s_ASCII); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 137, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__3); __Pyx_GIVEREF(__pyx_tuple__3); /* "View.MemoryView":146 * * if not self._shape: * raise MemoryError("unable to allocate shape and strides.") # <<<<<<<<<<<<<< * * */ __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_shape_and_str); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 146, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__4); __Pyx_GIVEREF(__pyx_tuple__4); /* "View.MemoryView":174 * self.data = <char *>malloc(self.len) * if not self.data: * raise MemoryError("unable to allocate array data.") # <<<<<<<<<<<<<< * * if self.dtype_is_object: */ __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_s_unable_to_allocate_array_data); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(1, 174, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__5); __Pyx_GIVEREF(__pyx_tuple__5); /* "View.MemoryView":190 * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS * if not (flags & bufmode): * raise ValueError("Can only create a buffer that is contiguous in memory.") # <<<<<<<<<<<<<< * info.buf = self.data * info.len = self.len */ __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_s_Can_only_create_a_buffer_that_is); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 190, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__6); __Pyx_GIVEREF(__pyx_tuple__6); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__7); __Pyx_GIVEREF(__pyx_tuple__7); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__8); __Pyx_GIVEREF(__pyx_tuple__8); /* "View.MemoryView":486 * result = struct.unpack(self.view.format, bytesitem) * except struct.error: * raise ValueError("Unable to convert item to object") # <<<<<<<<<<<<<< * else: * if len(self.view.format) == 1: */ __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_Unable_to_convert_item_to_object); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(1, 486, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__9); __Pyx_GIVEREF(__pyx_tuple__9); /* "View.MemoryView":558 * if self.view.strides == NULL: * * raise ValueError("Buffer view does not expose strides") # <<<<<<<<<<<<<< * * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) */ __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_Buffer_view_does_not_expose_stri); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(1, 558, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__10); __Pyx_GIVEREF(__pyx_tuple__10); /* "View.MemoryView":565 * def suboffsets(self): * if self.view.suboffsets == NULL: * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< * * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) */ __pyx_tuple__11 = PyTuple_New(1); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 565, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__11); __Pyx_INCREF(__pyx_int_neg_1); __Pyx_GIVEREF(__pyx_int_neg_1); PyTuple_SET_ITEM(__pyx_tuple__11, 0, __pyx_int_neg_1); __Pyx_GIVEREF(__pyx_tuple__11); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__12); __Pyx_GIVEREF(__pyx_tuple__12); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__13); __Pyx_GIVEREF(__pyx_tuple__13); /* "View.MemoryView":670 * if item is Ellipsis: * if not seen_ellipsis: * result.extend([slice(None)] * (ndim - len(tup) + 1)) # <<<<<<<<<<<<<< * seen_ellipsis = True * else: */ __pyx_slice__14 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__14)) __PYX_ERR(1, 670, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__14); __Pyx_GIVEREF(__pyx_slice__14); /* "View.MemoryView":673 * seen_ellipsis = True * else: * result.append(slice(None)) # <<<<<<<<<<<<<< * have_slices = True * else: */ __pyx_slice__15 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__15)) __PYX_ERR(1, 673, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__15); __Pyx_GIVEREF(__pyx_slice__15); /* "View.MemoryView":684 * nslices = ndim - len(result) * if nslices: * result.extend([slice(None)] * nslices) # <<<<<<<<<<<<<< * * return have_slices or nslices, tuple(result) */ __pyx_slice__16 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__16)) __PYX_ERR(1, 684, __pyx_L1_error) __Pyx_GOTREF(__pyx_slice__16); __Pyx_GIVEREF(__pyx_slice__16); /* "View.MemoryView":691 * for suboffset in suboffsets[:ndim]: * if suboffset >= 0: * raise ValueError("Indirect dimensions not supported") # <<<<<<<<<<<<<< * * */ __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_Indirect_dimensions_not_supporte); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 691, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__17); __Pyx_GIVEREF(__pyx_tuple__17); /* "(tree fragment)":2 * def __reduce_cython__(self): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") */ __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 2, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__18); __Pyx_GIVEREF(__pyx_tuple__18); /* "(tree fragment)":4 * raise TypeError("no default __reduce__ due to non-trivial __cinit__") * def __setstate_cython__(self, __pyx_state): * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< */ __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 4, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__19); __Pyx_GIVEREF(__pyx_tuple__19); /* "thread_demo.pyx":14 * from libc.math cimport exp as c_exp * * def c_array_f(double[:] X): # <<<<<<<<<<<<<< * * cdef int N = X.shape[0] */ __pyx_tuple__20 = PyTuple_Pack(5, __pyx_n_s_X, __pyx_n_s_X, __pyx_n_s_N, __pyx_n_s_Y, __pyx_n_s_i); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__20); __Pyx_GIVEREF(__pyx_tuple__20); __pyx_codeobj__21 = (PyObject*)__Pyx_PyCode_New(1, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__20, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_thread_demo_pyx, __pyx_n_s_c_array_f, 14, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__21)) __PYX_ERR(0, 14, __pyx_L1_error) /* "View.MemoryView":284 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_tuple__22 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(1, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__22); __Pyx_GIVEREF(__pyx_tuple__22); /* "View.MemoryView":285 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 285, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__23); __Pyx_GIVEREF(__pyx_tuple__23); /* "View.MemoryView":286 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__24 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__24); __Pyx_GIVEREF(__pyx_tuple__24); /* "View.MemoryView":289 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 289, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__25); __Pyx_GIVEREF(__pyx_tuple__25); /* "View.MemoryView":290 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_tuple__26 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 290, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__26); __Pyx_GIVEREF(__pyx_tuple__26); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError */ __pyx_tuple__27 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_tuple__27); __Pyx_GIVEREF(__pyx_tuple__27); __pyx_codeobj__28 = (PyObject*)__Pyx_PyCode_New(3, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__27, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__28)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_RefNannyFinishContext(); return 0; __pyx_L1_error:; __Pyx_RefNannyFinishContext(); return -1; } static int __Pyx_InitGlobals(void) { /* InitThreads.init */ #ifdef WITH_THREAD PyEval_InitThreads(); #endif if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(0, 1, __pyx_L1_error); __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) return 0; __pyx_L1_error:; return -1; } #if PY_MAJOR_VERSION < 3 PyMODINIT_FUNC initthread_demo(void); /*proto*/ PyMODINIT_FUNC initthread_demo(void) #else PyMODINIT_FUNC PyInit_thread_demo(void); /*proto*/ PyMODINIT_FUNC PyInit_thread_demo(void) #if CYTHON_PEP489_MULTI_PHASE_INIT { return PyModuleDef_Init(&__pyx_moduledef); } static int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name) { PyObject *value = PyObject_GetAttrString(spec, from_name); int result = 0; if (likely(value)) { result = PyDict_SetItemString(moddict, to_name, value); Py_DECREF(value); } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Clear(); } else { result = -1; } return result; } static PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { PyObject *module = NULL, *moddict, *modname; if (__pyx_m) return __Pyx_NewRef(__pyx_m); modname = PyObject_GetAttrString(spec, "name"); if (unlikely(!modname)) goto bad; module = PyModule_NewObject(modname); Py_DECREF(modname); if (unlikely(!module)) goto bad; moddict = PyModule_GetDict(module); if (unlikely(!moddict)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__") < 0)) goto bad; if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__") < 0)) goto bad; return module; bad: Py_XDECREF(module); return NULL; } static int __pyx_pymod_exec_thread_demo(PyObject *__pyx_pyinit_module) #endif #endif { PyObject *__pyx_t_1 = NULL; static PyThread_type_lock __pyx_t_2[8]; __Pyx_RefNannyDeclarations #if CYTHON_PEP489_MULTI_PHASE_INIT if (__pyx_m && __pyx_m == __pyx_pyinit_module) return 0; #endif #if CYTHON_REFNANNY __Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); if (!__Pyx_RefNanny) { PyErr_Clear(); __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); if (!__Pyx_RefNanny) Py_FatalError("failed to import 'refnanny' module"); } #endif __Pyx_RefNannySetupContext("PyMODINIT_FUNC PyInit_thread_demo(void)", 0); if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) #ifdef __Pyx_CyFunction_USED if (__pyx_CyFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_FusedFunction_USED if (__pyx_FusedFunction_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Coroutine_USED if (__pyx_Coroutine_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_Generator_USED if (__pyx_Generator_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_AsyncGen_USED if (__pyx_AsyncGen_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif #ifdef __Pyx_StopAsyncIteration_USED if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /*--- Library function declarations ---*/ /*--- Threads initialization code ---*/ #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS #ifdef WITH_THREAD /* Python build with threading support? */ PyEval_InitThreads(); #endif #endif /*--- Module creation code ---*/ #if CYTHON_PEP489_MULTI_PHASE_INIT __pyx_m = __pyx_pyinit_module; Py_INCREF(__pyx_m); #else #if PY_MAJOR_VERSION < 3 __pyx_m = Py_InitModule4("thread_demo", __pyx_methods, __pyx_k_Created_on_Tue_Mar_20_09_22_22, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); #else __pyx_m = PyModule_Create(&__pyx_moduledef); #endif if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) #endif __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) Py_INCREF(__pyx_d); __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) #if CYTHON_COMPILING_IN_PYPY Py_INCREF(__pyx_b); #endif if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error); /*--- Initialize various global constants etc. ---*/ if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif if (__pyx_module_is_main_thread_demo) { if (PyObject_SetAttrString(__pyx_m, "__name__", __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) } #if PY_MAJOR_VERSION >= 3 { PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) if (!PyDict_GetItemString(modules, "thread_demo")) { if (unlikely(PyDict_SetItemString(modules, "thread_demo", __pyx_m) < 0)) __PYX_ERR(0, 1, __pyx_L1_error) } } #endif /*--- Builtin init code ---*/ if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Constants init code ---*/ if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) /*--- Global init code ---*/ generic = Py_None; Py_INCREF(Py_None); strided = Py_None; Py_INCREF(Py_None); indirect = Py_None; Py_INCREF(Py_None); contiguous = Py_None; Py_INCREF(Py_None); indirect_contiguous = Py_None; Py_INCREF(Py_None); /*--- Variable export code ---*/ /*--- Function export code ---*/ /*--- Type init code ---*/ __pyx_vtabptr_array = &__pyx_vtable_array; __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; if (PyType_Ready(&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error) __pyx_type___pyx_array.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_array.tp_dict, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_array) < 0) __PYX_ERR(1, 103, __pyx_L1_error) __pyx_array_type = &__pyx_type___pyx_array; if (PyType_Ready(&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 277, __pyx_L1_error) __pyx_type___pyx_MemviewEnum.tp_print = 0; if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_MemviewEnum) < 0) __PYX_ERR(1, 277, __pyx_L1_error) __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; if (PyType_Ready(&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 328, __pyx_L1_error) __pyx_type___pyx_memoryview.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryview.tp_dict, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 328, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryview) < 0) __PYX_ERR(1, 328, __pyx_L1_error) __pyx_memoryview_type = &__pyx_type___pyx_memoryview; __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; __pyx_type___pyx_memoryviewslice.tp_base = __pyx_memoryview_type; if (PyType_Ready(&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 953, __pyx_L1_error) __pyx_type___pyx_memoryviewslice.tp_print = 0; if (__Pyx_SetVtable(__pyx_type___pyx_memoryviewslice.tp_dict, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 953, __pyx_L1_error) if (__Pyx_setup_reduce((PyObject*)&__pyx_type___pyx_memoryviewslice) < 0) __PYX_ERR(1, 953, __pyx_L1_error) __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; /*--- Type import code ---*/ /*--- Variable import code ---*/ /*--- Function import code ---*/ /*--- Execution code ---*/ #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) #endif /* "thread_demo.pyx":10 * #cython: boundscheck=False, wraparound=False, nonecheck=False * * import numpy as np # <<<<<<<<<<<<<< * from cython.parallel import prange * from libc.math cimport exp as c_exp */ __pyx_t_1 = __Pyx_Import(__pyx_n_s_numpy, 0, -1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 10, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_1) < 0) __PYX_ERR(0, 10, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thread_demo.pyx":14 * from libc.math cimport exp as c_exp * * def c_array_f(double[:] X): # <<<<<<<<<<<<<< * * cdef int N = X.shape[0] */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_11thread_demo_1c_array_f, NULL, __pyx_n_s_thread_demo); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_c_array_f, __pyx_t_1) < 0) __PYX_ERR(0, 14, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "thread_demo.pyx":1 * #!/usr/bin/env python2 # <<<<<<<<<<<<<< * # -*- coding: utf-8 -*- * """ */ __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(0, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":207 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_array_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * def __dealloc__(array self): */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_array_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 207, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_array_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 207, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_array_type); /* "View.MemoryView":284 * return self.name * * cdef generic = Enum("<strided and direct or indirect>") # <<<<<<<<<<<<<< * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__22, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 284, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(generic); __Pyx_DECREF_SET(generic, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":285 * * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default # <<<<<<<<<<<<<< * cdef indirect = Enum("<strided and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 285, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(strided); __Pyx_DECREF_SET(strided, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":286 * cdef generic = Enum("<strided and direct or indirect>") * cdef strided = Enum("<strided and direct>") # default * cdef indirect = Enum("<strided and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__24, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 286, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect); __Pyx_DECREF_SET(indirect, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":289 * * * cdef contiguous = Enum("<contiguous and direct>") # <<<<<<<<<<<<<< * cdef indirect_contiguous = Enum("<contiguous and indirect>") * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 289, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(contiguous); __Pyx_DECREF_SET(contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":290 * * cdef contiguous = Enum("<contiguous and direct>") * cdef indirect_contiguous = Enum("<contiguous and indirect>") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__26, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 290, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); __Pyx_XGOTREF(indirect_contiguous); __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_1); __Pyx_GIVEREF(__pyx_t_1); __pyx_t_1 = 0; /* "View.MemoryView":314 * * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ * PyThread_allocate_lock(), */ __pyx_memoryview_thread_locks_used = 0; /* "View.MemoryView":315 * DEF THREAD_LOCKS_PREALLOCATED = 8 * cdef int __pyx_memoryview_thread_locks_used = 0 * cdef PyThread_type_lock[THREAD_LOCKS_PREALLOCATED] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< * PyThread_allocate_lock(), * PyThread_allocate_lock(), */ __pyx_t_2[0] = PyThread_allocate_lock(); __pyx_t_2[1] = PyThread_allocate_lock(); __pyx_t_2[2] = PyThread_allocate_lock(); __pyx_t_2[3] = PyThread_allocate_lock(); __pyx_t_2[4] = PyThread_allocate_lock(); __pyx_t_2[5] = PyThread_allocate_lock(); __pyx_t_2[6] = PyThread_allocate_lock(); __pyx_t_2[7] = PyThread_allocate_lock(); memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_2, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); /* "View.MemoryView":537 * info.obj = self * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 537, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryview_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 537, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryview_type); /* "View.MemoryView":983 * return self.from_object * * __pyx_getbuffer = capsule(<void *> &__pyx_memoryview_getbuffer, "getbuffer(obj, view, flags)") # <<<<<<<<<<<<<< * * */ __pyx_t_1 = __pyx_capsule_create(((void *)(&__pyx_memoryview_getbuffer)), ((char *)"getbuffer(obj, view, flags)")); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 983, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem((PyObject *)__pyx_memoryviewslice_type->tp_dict, __pyx_n_s_pyx_getbuffer, __pyx_t_1) < 0) __PYX_ERR(1, 983, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; PyType_Modified(__pyx_memoryviewslice_type); /* "(tree fragment)":1 * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< * if __pyx_checksum != 0xb068931: * from pickle import PickleError as __pyx_PickleError */ __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_GOTREF(__pyx_t_1); if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; /* "(tree fragment)":9 * __pyx_unpickle_Enum__set_state(<Enum> __pyx_result, __pyx_state) * return __pyx_result * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< * __pyx_result.name = __pyx_state[0] * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): */ /*--- Wrapped vars code ---*/ goto __pyx_L0; __pyx_L1_error:; __Pyx_XDECREF(__pyx_t_1); if (__pyx_m) { if (__pyx_d) { __Pyx_AddTraceback("init thread_demo", 0, __pyx_lineno, __pyx_filename); } Py_DECREF(__pyx_m); __pyx_m = 0; } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_ImportError, "init thread_demo"); } __pyx_L0:; __Pyx_RefNannyFinishContext(); #if CYTHON_PEP489_MULTI_PHASE_INIT return (__pyx_m != NULL) ? 0 : -1; #elif PY_MAJOR_VERSION >= 3 return __pyx_m; #else return; #endif } /* --- Runtime support code --- */ /* Refnanny */ #if CYTHON_REFNANNY static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { PyObject *m = NULL, *p = NULL; void *r = NULL; m = PyImport_ImportModule((char *)modname); if (!m) goto end; p = PyObject_GetAttrString(m, (char *)"RefNannyAPI"); if (!p) goto end; r = PyLong_AsVoidPtr(p); end: Py_XDECREF(p); Py_XDECREF(m); return (__Pyx_RefNannyAPIStruct *)r; } #endif /* GetBuiltinName */ static PyObject *__Pyx_GetBuiltinName(PyObject *name) { PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); if (unlikely(!result)) { PyErr_Format(PyExc_NameError, #if PY_MAJOR_VERSION >= 3 "name '%U' is not defined", name); #else "name '%.200s' is not defined", PyString_AS_STRING(name)); #endif } return result; } /* GetModuleGlobalName */ static CYTHON_INLINE PyObject *__Pyx_GetModuleGlobalName(PyObject *name) { PyObject *result; #if !CYTHON_AVOID_BORROWED_REFS result = PyDict_GetItem(__pyx_d, name); if (likely(result)) { Py_INCREF(result); } else { #else result = PyObject_GetItem(__pyx_d, name); if (!result) { PyErr_Clear(); #endif result = __Pyx_GetBuiltinName(name); } return result; } /* PyCFunctionFastCall */ #if CYTHON_FAST_PYCCALL static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { PyCFunctionObject *func = (PyCFunctionObject*)func_obj; PyCFunction meth = PyCFunction_GET_FUNCTION(func); PyObject *self = PyCFunction_GET_SELF(func); int flags = PyCFunction_GET_FLAGS(func); assert(PyCFunction_Check(func)); assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS))); assert(nargs >= 0); assert(nargs == 0 || args != NULL); /* _PyCFunction_FastCallDict() must not be called with an exception set, because it may clear it (directly or indirectly) and so the caller loses its exception */ assert(!PyErr_Occurred()); if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { return (*((__Pyx_PyCFunctionFastWithKeywords)meth)) (self, args, nargs, NULL); } else { return (*((__Pyx_PyCFunctionFast)meth)) (self, args, nargs); } } #endif /* PyFunctionFastCall */ #if CYTHON_FAST_PYCALL #include "frameobject.h" static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, PyObject *globals) { PyFrameObject *f; PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject **fastlocals; Py_ssize_t i; PyObject *result; assert(globals != NULL); /* XXX Perhaps we should create a specialized PyFrame_New() that doesn't take locals, but does take builtins without sanity checking them. */ assert(tstate != NULL); f = PyFrame_New(tstate, co, globals, NULL); if (f == NULL) { return NULL; } fastlocals = f->f_localsplus; for (i = 0; i < na; i++) { Py_INCREF(*args); fastlocals[i] = *args++; } result = PyEval_EvalFrameEx(f,0); ++tstate->recursion_depth; Py_DECREF(f); --tstate->recursion_depth; return result; } #if 1 || PY_VERSION_HEX < 0x030600B1 static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) { PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); PyObject *globals = PyFunction_GET_GLOBALS(func); PyObject *argdefs = PyFunction_GET_DEFAULTS(func); PyObject *closure; #if PY_MAJOR_VERSION >= 3 PyObject *kwdefs; #endif PyObject *kwtuple, **k; PyObject **d; Py_ssize_t nd; Py_ssize_t nk; PyObject *result; assert(kwargs == NULL || PyDict_Check(kwargs)); nk = kwargs ? PyDict_Size(kwargs) : 0; if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { return NULL; } if ( #if PY_MAJOR_VERSION >= 3 co->co_kwonlyargcount == 0 && #endif likely(kwargs == NULL || nk == 0) && co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { if (argdefs == NULL && co->co_argcount == nargs) { result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); goto done; } else if (nargs == 0 && argdefs != NULL && co->co_argcount == Py_SIZE(argdefs)) { /* function called with no arguments, but all parameters have a default value: use default values as arguments .*/ args = &PyTuple_GET_ITEM(argdefs, 0); result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); goto done; } } if (kwargs != NULL) { Py_ssize_t pos, i; kwtuple = PyTuple_New(2 * nk); if (kwtuple == NULL) { result = NULL; goto done; } k = &PyTuple_GET_ITEM(kwtuple, 0); pos = i = 0; while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { Py_INCREF(k[i]); Py_INCREF(k[i+1]); i += 2; } nk = i / 2; } else { kwtuple = NULL; k = NULL; } closure = PyFunction_GET_CLOSURE(func); #if PY_MAJOR_VERSION >= 3 kwdefs = PyFunction_GET_KW_DEFAULTS(func); #endif if (argdefs != NULL) { d = &PyTuple_GET_ITEM(argdefs, 0); nd = Py_SIZE(argdefs); } else { d = NULL; nd = 0; } #if PY_MAJOR_VERSION >= 3 result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, kwdefs, closure); #else result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, args, nargs, k, (int)nk, d, (int)nd, closure); #endif Py_XDECREF(kwtuple); done: Py_LeaveRecursiveCall(); return result; } #endif #endif /* PyObjectCall */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { PyObject *result; ternaryfunc call = func->ob_type->tp_call; if (unlikely(!call)) return PyObject_Call(func, arg, kw); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = (*call)(func, arg, kw); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallMethO */ #if CYTHON_COMPILING_IN_CPYTHON static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { PyObject *self, *result; PyCFunction cfunc; cfunc = PyCFunction_GET_FUNCTION(func); self = PyCFunction_GET_SELF(func); if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) return NULL; result = cfunc(self, arg); Py_LeaveRecursiveCall(); if (unlikely(!result) && unlikely(!PyErr_Occurred())) { PyErr_SetString( PyExc_SystemError, "NULL result without error in PyObject_Call"); } return result; } #endif /* PyObjectCallOneArg */ #if CYTHON_COMPILING_IN_CPYTHON static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_New(1); if (unlikely(!args)) return NULL; Py_INCREF(arg); PyTuple_SET_ITEM(args, 0, arg); result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { #if CYTHON_FAST_PYCALL if (PyFunction_Check(func)) { return __Pyx_PyFunction_FastCall(func, &arg, 1); } #endif if (likely(PyCFunction_Check(func))) { if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { return __Pyx_PyObject_CallMethO(func, arg); #if CYTHON_FAST_PYCCALL } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { return __Pyx_PyCFunction_FastCall(func, &arg, 1); #endif } } return __Pyx__PyObject_CallOneArg(func, arg); } #else static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { PyObject *result; PyObject *args = PyTuple_Pack(1, arg); if (unlikely(!args)) return NULL; result = __Pyx_PyObject_Call(func, args, NULL); Py_DECREF(args); return result; } #endif /* BufferIndexError */ static void __Pyx_RaiseBufferIndexError(int axis) { PyErr_Format(PyExc_IndexError, "Out of bounds on buffer access (axis %d)", axis); } /* BufferIndexErrorNogil */ static void __Pyx_RaiseBufferIndexErrorNogil(int axis) { #ifdef WITH_THREAD PyGILState_STATE gilstate = PyGILState_Ensure(); #endif __Pyx_RaiseBufferIndexError(axis); #ifdef WITH_THREAD PyGILState_Release(gilstate); #endif } /* PyErrFetchRestore */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; tmp_type = tstate->curexc_type; tmp_value = tstate->curexc_value; tmp_tb = tstate->curexc_traceback; tstate->curexc_type = type; tstate->curexc_value = value; tstate->curexc_traceback = tb; Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { *type = tstate->curexc_type; *value = tstate->curexc_value; *tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; } #endif /* MemviewSliceInit */ static int __Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, int ndim, __Pyx_memviewslice *memviewslice, int memview_is_new_reference) { __Pyx_RefNannyDeclarations int i, retval=-1; Py_buffer *buf = &memview->view; __Pyx_RefNannySetupContext("init_memviewslice", 0); if (!buf) { PyErr_SetString(PyExc_ValueError, "buf is NULL."); goto fail; } else if (memviewslice->memview || memviewslice->data) { PyErr_SetString(PyExc_ValueError, "memviewslice is already initialized!"); goto fail; } if (buf->strides) { for (i = 0; i < ndim; i++) { memviewslice->strides[i] = buf->strides[i]; } } else { Py_ssize_t stride = buf->itemsize; for (i = ndim - 1; i >= 0; i--) { memviewslice->strides[i] = stride; stride *= buf->shape[i]; } } for (i = 0; i < ndim; i++) { memviewslice->shape[i] = buf->shape[i]; if (buf->suboffsets) { memviewslice->suboffsets[i] = buf->suboffsets[i]; } else { memviewslice->suboffsets[i] = -1; } } memviewslice->memview = memview; memviewslice->data = (char *)buf->buf; if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { Py_INCREF(memview); } retval = 0; goto no_fail; fail: memviewslice->memview = 0; memviewslice->data = 0; retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } #ifndef Py_NO_RETURN #define Py_NO_RETURN #endif static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { va_list vargs; char msg[200]; #ifdef HAVE_STDARG_PROTOTYPES va_start(vargs, fmt); #else va_start(vargs); #endif vsnprintf(msg, 200, fmt, vargs); va_end(vargs); Py_FatalError(msg); } static CYTHON_INLINE int __pyx_add_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)++; PyThread_release_lock(lock); return result; } static CYTHON_INLINE int __pyx_sub_acquisition_count_locked(__pyx_atomic_int *acquisition_count, PyThread_type_lock lock) { int result; PyThread_acquire_lock(lock, 1); result = (*acquisition_count)--; PyThread_release_lock(lock); return result; } static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int first_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview || (PyObject *) memview == Py_None) return; if (__pyx_get_slice_count(memview) < 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); first_time = __pyx_add_acquisition_count(memview) == 0; if (first_time) { if (have_gil) { Py_INCREF((PyObject *) memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_INCREF((PyObject *) memview); PyGILState_Release(_gilstate); } } } static CYTHON_INLINE void __Pyx_XDEC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) { int last_time; struct __pyx_memoryview_obj *memview = memslice->memview; if (!memview ) { return; } else if ((PyObject *) memview == Py_None) { memslice->memview = NULL; return; } if (__pyx_get_slice_count(memview) <= 0) __pyx_fatalerror("Acquisition count is %d (line %d)", __pyx_get_slice_count(memview), lineno); last_time = __pyx_sub_acquisition_count(memview) == 1; memslice->data = NULL; if (last_time) { if (have_gil) { Py_CLEAR(memslice->memview); } else { PyGILState_STATE _gilstate = PyGILState_Ensure(); Py_CLEAR(memslice->memview); PyGILState_Release(_gilstate); } } else { memslice->memview = NULL; } } /* RaiseArgTupleInvalid */ static void __Pyx_RaiseArgtupleInvalid( const char* func_name, int exact, Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found) { Py_ssize_t num_expected; const char *more_or_less; if (num_found < num_min) { num_expected = num_min; more_or_less = "at least"; } else { num_expected = num_max; more_or_less = "at most"; } if (exact) { more_or_less = "exactly"; } PyErr_Format(PyExc_TypeError, "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", func_name, more_or_less, num_expected, (num_expected == 1) ? "" : "s", num_found); } /* RaiseDoubleKeywords */ static void __Pyx_RaiseDoubleKeywordsError( const char* func_name, PyObject* kw_name) { PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION >= 3 "%s() got multiple values for keyword argument '%U'", func_name, kw_name); #else "%s() got multiple values for keyword argument '%s'", func_name, PyString_AsString(kw_name)); #endif } /* ParseKeywords */ static int __Pyx_ParseOptionalKeywords( PyObject *kwds, PyObject **argnames[], PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, const char* function_name) { PyObject *key = 0, *value = 0; Py_ssize_t pos = 0; PyObject*** name; PyObject*** first_kw_arg = argnames + num_pos_args; while (PyDict_Next(kwds, &pos, &key, &value)) { name = first_kw_arg; while (*name && (**name != key)) name++; if (*name) { values[name-argnames] = value; continue; } name = first_kw_arg; #if PY_MAJOR_VERSION < 3 if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { while (*name) { if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) && _PyString_Eq(**name, key)) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { if ((**argname == key) || ( (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) && _PyString_Eq(**argname, key))) { goto arg_passed_twice; } argname++; } } } else #endif if (likely(PyUnicode_Check(key))) { while (*name) { int cmp = (**name == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**name, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) { values[name-argnames] = value; break; } name++; } if (*name) continue; else { PyObject*** argname = argnames; while (argname != first_kw_arg) { int cmp = (**argname == key) ? 0 : #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : #endif PyUnicode_Compare(**argname, key); if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; if (cmp == 0) goto arg_passed_twice; argname++; } } } else goto invalid_keyword_type; if (kwds2) { if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; } else { goto invalid_keyword; } } return 0; arg_passed_twice: __Pyx_RaiseDoubleKeywordsError(function_name, key); goto bad; invalid_keyword_type: PyErr_Format(PyExc_TypeError, "%.200s() keywords must be strings", function_name); goto bad; invalid_keyword: PyErr_Format(PyExc_TypeError, #if PY_MAJOR_VERSION < 3 "%.200s() got an unexpected keyword argument '%.200s'", function_name, PyString_AsString(key)); #else "%s() got an unexpected keyword argument '%U'", function_name, key); #endif bad: return -1; } /* ArgTypeTest */ static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } else if (exact) { #if PY_MAJOR_VERSION == 2 if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; #endif } else { if (likely(__Pyx_TypeCheck(obj, type))) return 1; } PyErr_Format(PyExc_TypeError, "Argument '%.200s' has incorrect type (expected %.200s, got %.200s)", name, type->tp_name, Py_TYPE(obj)->tp_name); return 0; } /* RaiseException */ #if PY_MAJOR_VERSION < 3 static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, CYTHON_UNUSED PyObject *cause) { __Pyx_PyThreadState_declare Py_XINCREF(type); if (!value || value == Py_None) value = NULL; else Py_INCREF(value); if (!tb || tb == Py_None) tb = NULL; else { Py_INCREF(tb); if (!PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto raise_error; } } if (PyType_Check(type)) { #if CYTHON_COMPILING_IN_PYPY if (!value) { Py_INCREF(Py_None); value = Py_None; } #endif PyErr_NormalizeException(&type, &value, &tb); } else { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto raise_error; } value = type; type = (PyObject*) Py_TYPE(type); Py_INCREF(type); if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto raise_error; } } __Pyx_PyThreadState_assign __Pyx_ErrRestore(type, value, tb); return; raise_error: Py_XDECREF(value); Py_XDECREF(type); Py_XDECREF(tb); return; } #else static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { PyObject* owned_instance = NULL; if (tb == Py_None) { tb = 0; } else if (tb && !PyTraceBack_Check(tb)) { PyErr_SetString(PyExc_TypeError, "raise: arg 3 must be a traceback or None"); goto bad; } if (value == Py_None) value = 0; if (PyExceptionInstance_Check(type)) { if (value) { PyErr_SetString(PyExc_TypeError, "instance exception may not have a separate value"); goto bad; } value = type; type = (PyObject*) Py_TYPE(value); } else if (PyExceptionClass_Check(type)) { PyObject *instance_class = NULL; if (value && PyExceptionInstance_Check(value)) { instance_class = (PyObject*) Py_TYPE(value); if (instance_class != type) { int is_subclass = PyObject_IsSubclass(instance_class, type); if (!is_subclass) { instance_class = NULL; } else if (unlikely(is_subclass == -1)) { goto bad; } else { type = instance_class; } } } if (!instance_class) { PyObject *args; if (!value) args = PyTuple_New(0); else if (PyTuple_Check(value)) { Py_INCREF(value); args = value; } else args = PyTuple_Pack(1, value); if (!args) goto bad; owned_instance = PyObject_Call(type, args, NULL); Py_DECREF(args); if (!owned_instance) goto bad; value = owned_instance; if (!PyExceptionInstance_Check(value)) { PyErr_Format(PyExc_TypeError, "calling %R should have returned an instance of " "BaseException, not %R", type, Py_TYPE(value)); goto bad; } } } else { PyErr_SetString(PyExc_TypeError, "raise: exception class must be a subclass of BaseException"); goto bad; } if (cause) { PyObject *fixed_cause; if (cause == Py_None) { fixed_cause = NULL; } else if (PyExceptionClass_Check(cause)) { fixed_cause = PyObject_CallObject(cause, NULL); if (fixed_cause == NULL) goto bad; } else if (PyExceptionInstance_Check(cause)) { fixed_cause = cause; Py_INCREF(fixed_cause); } else { PyErr_SetString(PyExc_TypeError, "exception causes must derive from " "BaseException"); goto bad; } PyException_SetCause(value, fixed_cause); } PyErr_SetObject(type, value); if (tb) { #if CYTHON_COMPILING_IN_PYPY PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); Py_INCREF(tb); PyErr_Restore(tmp_type, tmp_value, tb); Py_XDECREF(tmp_tb); #else PyThreadState *tstate = __Pyx_PyThreadState_Current; PyObject* tmp_tb = tstate->curexc_traceback; if (tb != tmp_tb) { Py_INCREF(tb); tstate->curexc_traceback = tb; Py_XDECREF(tmp_tb); } #endif } bad: Py_XDECREF(owned_instance); return; } #endif /* BytesEquals */ static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else if (s1 == s2) { return (equals == Py_EQ); } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { const char *ps1, *ps2; Py_ssize_t length = PyBytes_GET_SIZE(s1); if (length != PyBytes_GET_SIZE(s2)) return (equals == Py_NE); ps1 = PyBytes_AS_STRING(s1); ps2 = PyBytes_AS_STRING(s2); if (ps1[0] != ps2[0]) { return (equals == Py_NE); } else if (length == 1) { return (equals == Py_EQ); } else { int result; #if CYTHON_USE_UNICODE_INTERNALS Py_hash_t hash1, hash2; hash1 = ((PyBytesObject*)s1)->ob_shash; hash2 = ((PyBytesObject*)s2)->ob_shash; if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { return (equals == Py_NE); } #endif result = memcmp(ps1, ps2, (size_t)length); return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { return (equals == Py_NE); } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { return (equals == Py_NE); } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } #endif } /* UnicodeEquals */ static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { #if CYTHON_COMPILING_IN_PYPY return PyObject_RichCompareBool(s1, s2, equals); #else #if PY_MAJOR_VERSION < 3 PyObject* owned_ref = NULL; #endif int s1_is_unicode, s2_is_unicode; if (s1 == s2) { goto return_eq; } s1_is_unicode = PyUnicode_CheckExact(s1); s2_is_unicode = PyUnicode_CheckExact(s2); #if PY_MAJOR_VERSION < 3 if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { owned_ref = PyUnicode_FromObject(s2); if (unlikely(!owned_ref)) return -1; s2 = owned_ref; s2_is_unicode = 1; } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { owned_ref = PyUnicode_FromObject(s1); if (unlikely(!owned_ref)) return -1; s1 = owned_ref; s1_is_unicode = 1; } else if (((!s2_is_unicode) & (!s1_is_unicode))) { return __Pyx_PyBytes_Equals(s1, s2, equals); } #endif if (s1_is_unicode & s2_is_unicode) { Py_ssize_t length; int kind; void *data1, *data2; if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) return -1; length = __Pyx_PyUnicode_GET_LENGTH(s1); if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { goto return_ne; } #if CYTHON_USE_UNICODE_INTERNALS { Py_hash_t hash1, hash2; #if CYTHON_PEP393_ENABLED hash1 = ((PyASCIIObject*)s1)->hash; hash2 = ((PyASCIIObject*)s2)->hash; #else hash1 = ((PyUnicodeObject*)s1)->hash; hash2 = ((PyUnicodeObject*)s2)->hash; #endif if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { goto return_ne; } } #endif kind = __Pyx_PyUnicode_KIND(s1); if (kind != __Pyx_PyUnicode_KIND(s2)) { goto return_ne; } data1 = __Pyx_PyUnicode_DATA(s1); data2 = __Pyx_PyUnicode_DATA(s2); if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { goto return_ne; } else if (length == 1) { goto return_eq; } else { int result = memcmp(data1, data2, (size_t)(length * kind)); #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ) ? (result == 0) : (result != 0); } } else if ((s1 == Py_None) & s2_is_unicode) { goto return_ne; } else if ((s2 == Py_None) & s1_is_unicode) { goto return_ne; } else { int result; PyObject* py_result = PyObject_RichCompare(s1, s2, equals); if (!py_result) return -1; result = __Pyx_PyObject_IsTrue(py_result); Py_DECREF(py_result); return result; } return_eq: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_EQ); return_ne: #if PY_MAJOR_VERSION < 3 Py_XDECREF(owned_ref); #endif return (equals == Py_NE); #endif } /* None */ static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { Py_ssize_t q = a / b; Py_ssize_t r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* GetAttr */ static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { #if CYTHON_USE_TYPE_SLOTS #if PY_MAJOR_VERSION >= 3 if (likely(PyUnicode_Check(n))) #else if (likely(PyString_Check(n))) #endif return __Pyx_PyObject_GetAttrStr(o, n); #endif return PyObject_GetAttr(o, n); } /* decode_c_string */ static CYTHON_INLINE PyObject* __Pyx_decode_c_string( const char* cstring, Py_ssize_t start, Py_ssize_t stop, const char* encoding, const char* errors, PyObject* (*decode_func)(const char *s, Py_ssize_t size, const char *errors)) { Py_ssize_t length; if (unlikely((start < 0) | (stop < 0))) { size_t slen = strlen(cstring); if (unlikely(slen > (size_t) PY_SSIZE_T_MAX)) { PyErr_SetString(PyExc_OverflowError, "c-string too long to convert to Python"); return NULL; } length = (Py_ssize_t) slen; if (start < 0) { start += length; if (start < 0) start = 0; } if (stop < 0) stop += length; } length = stop - start; if (unlikely(length <= 0)) return PyUnicode_FromUnicode(NULL, 0); cstring += start; if (decode_func) { return decode_func(cstring, length, errors); } else { return PyUnicode_Decode(cstring, length, encoding, errors); } } /* PyErrExceptionMatches */ #if CYTHON_FAST_THREAD_STATE static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(tuple); #if PY_MAJOR_VERSION >= 3 for (i=0; i<n; i++) { if (exc_type == PyTuple_GET_ITEM(tuple, i)) return 1; } #endif for (i=0; i<n; i++) { if (__Pyx_PyErr_GivenExceptionMatches(exc_type, PyTuple_GET_ITEM(tuple, i))) return 1; } return 0; } static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err) { PyObject *exc_type = tstate->curexc_type; if (exc_type == err) return 1; if (unlikely(!exc_type)) return 0; if (unlikely(PyTuple_Check(err))) return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); } #endif /* GetAttr3 */ static PyObject *__Pyx_GetAttr3Default(PyObject *d) { __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) return NULL; __Pyx_PyErr_Clear(); Py_INCREF(d); return d; } static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { PyObject *r = __Pyx_GetAttr(o, n); return (likely(r)) ? r : __Pyx_GetAttr3Default(d); } /* RaiseTooManyValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { PyErr_Format(PyExc_ValueError, "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); } /* RaiseNeedMoreValuesToUnpack */ static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { PyErr_Format(PyExc_ValueError, "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", index, (index == 1) ? "" : "s"); } /* RaiseNoneIterError */ static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); } /* ExtTypeTest */ static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { if (unlikely(!type)) { PyErr_SetString(PyExc_SystemError, "Missing type object"); return 0; } if (likely(__Pyx_TypeCheck(obj, type))) return 1; PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", Py_TYPE(obj)->tp_name, type->tp_name); return 0; } /* SaveResetException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #if PY_VERSION_HEX >= 0x030700A2 *type = tstate->exc_state.exc_type; *value = tstate->exc_state.exc_value; *tb = tstate->exc_state.exc_traceback; #else *type = tstate->exc_type; *value = tstate->exc_value; *tb = tstate->exc_traceback; #endif Py_XINCREF(*type); Py_XINCREF(*value); Py_XINCREF(*tb); } static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if PY_VERSION_HEX >= 0x030700A2 tmp_type = tstate->exc_state.exc_type; tmp_value = tstate->exc_state.exc_value; tmp_tb = tstate->exc_state.exc_traceback; tstate->exc_state.exc_type = type; tstate->exc_state.exc_value = value; tstate->exc_state.exc_traceback = tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = type; tstate->exc_value = value; tstate->exc_traceback = tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); } #endif /* GetException */ #if CYTHON_FAST_THREAD_STATE static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { #else static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) { #endif PyObject *local_type, *local_value, *local_tb; #if CYTHON_FAST_THREAD_STATE PyObject *tmp_type, *tmp_value, *tmp_tb; local_type = tstate->curexc_type; local_value = tstate->curexc_value; local_tb = tstate->curexc_traceback; tstate->curexc_type = 0; tstate->curexc_value = 0; tstate->curexc_traceback = 0; #else PyErr_Fetch(&local_type, &local_value, &local_tb); #endif PyErr_NormalizeException(&local_type, &local_value, &local_tb); #if CYTHON_FAST_THREAD_STATE if (unlikely(tstate->curexc_type)) #else if (unlikely(PyErr_Occurred())) #endif goto bad; #if PY_MAJOR_VERSION >= 3 if (local_tb) { if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) goto bad; } #endif Py_XINCREF(local_tb); Py_XINCREF(local_type); Py_XINCREF(local_value); *type = local_type; *value = local_value; *tb = local_tb; #if CYTHON_FAST_THREAD_STATE #if PY_VERSION_HEX >= 0x030700A2 tmp_type = tstate->exc_state.exc_type; tmp_value = tstate->exc_state.exc_value; tmp_tb = tstate->exc_state.exc_traceback; tstate->exc_state.exc_type = local_type; tstate->exc_state.exc_value = local_value; tstate->exc_state.exc_traceback = local_tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = local_type; tstate->exc_value = local_value; tstate->exc_traceback = local_tb; #endif Py_XDECREF(tmp_type); Py_XDECREF(tmp_value); Py_XDECREF(tmp_tb); #else PyErr_SetExcInfo(local_type, local_value, local_tb); #endif return 0; bad: *type = 0; *value = 0; *tb = 0; Py_XDECREF(local_type); Py_XDECREF(local_value); Py_XDECREF(local_tb); return -1; } /* SwapException */ #if CYTHON_FAST_THREAD_STATE static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; #if PY_VERSION_HEX >= 0x030700A2 tmp_type = tstate->exc_state.exc_type; tmp_value = tstate->exc_state.exc_value; tmp_tb = tstate->exc_state.exc_traceback; tstate->exc_state.exc_type = *type; tstate->exc_state.exc_value = *value; tstate->exc_state.exc_traceback = *tb; #else tmp_type = tstate->exc_type; tmp_value = tstate->exc_value; tmp_tb = tstate->exc_traceback; tstate->exc_type = *type; tstate->exc_value = *value; tstate->exc_traceback = *tb; #endif *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #else static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { PyObject *tmp_type, *tmp_value, *tmp_tb; PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); PyErr_SetExcInfo(*type, *value, *tb); *type = tmp_type; *value = tmp_value; *tb = tmp_tb; } #endif /* Import */ static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { PyObject *empty_list = 0; PyObject *module = 0; PyObject *global_dict = 0; PyObject *empty_dict = 0; PyObject *list; #if PY_MAJOR_VERSION < 3 PyObject *py_import; py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); if (!py_import) goto bad; #endif if (from_list) list = from_list; else { empty_list = PyList_New(0); if (!empty_list) goto bad; list = empty_list; } global_dict = PyModule_GetDict(__pyx_m); if (!global_dict) goto bad; empty_dict = PyDict_New(); if (!empty_dict) goto bad; { #if PY_MAJOR_VERSION >= 3 if (level == -1) { if (strchr(__Pyx_MODULE_NAME, '.')) { module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, 1); if (!module) { if (!PyErr_ExceptionMatches(PyExc_ImportError)) goto bad; PyErr_Clear(); } } level = 0; } #endif if (!module) { #if PY_MAJOR_VERSION < 3 PyObject *py_level = PyInt_FromLong(level); if (!py_level) goto bad; module = PyObject_CallFunctionObjArgs(py_import, name, global_dict, empty_dict, list, py_level, NULL); Py_DECREF(py_level); #else module = PyImport_ImportModuleLevelObject( name, global_dict, empty_dict, list, level); #endif } } bad: #if PY_MAJOR_VERSION < 3 Py_XDECREF(py_import); #endif Py_XDECREF(empty_list); Py_XDECREF(empty_dict); return module; } /* GetItemInt */ static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { PyObject *r; if (!j) return NULL; r = PyObject_GetItem(o, j); Py_DECREF(j); return r; } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyList_GET_SIZE(o); } if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyList_GET_SIZE(o)))) { PyObject *r = PyList_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS Py_ssize_t wrapped_i = i; if (wraparound & unlikely(i < 0)) { wrapped_i += PyTuple_GET_SIZE(o); } if ((!boundscheck) || likely((0 <= wrapped_i) & (wrapped_i < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); Py_INCREF(r); return r; } return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); #else return PySequence_GetItem(o, i); #endif } static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS if (is_list || PyList_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); if ((!boundscheck) || (likely((n >= 0) & (n < PyList_GET_SIZE(o))))) { PyObject *r = PyList_GET_ITEM(o, n); Py_INCREF(r); return r; } } else if (PyTuple_CheckExact(o)) { Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); if ((!boundscheck) || likely((n >= 0) & (n < PyTuple_GET_SIZE(o)))) { PyObject *r = PyTuple_GET_ITEM(o, n); Py_INCREF(r); return r; } } else { PySequenceMethods *m = Py_TYPE(o)->tp_as_sequence; if (likely(m && m->sq_item)) { if (wraparound && unlikely(i < 0) && likely(m->sq_length)) { Py_ssize_t l = m->sq_length(o); if (likely(l >= 0)) { i += l; } else { if (!PyErr_ExceptionMatches(PyExc_OverflowError)) return NULL; PyErr_Clear(); } } return m->sq_item(o, i); } } #else if (is_list || PySequence_Check(o)) { return PySequence_GetItem(o, i); } #endif return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); } /* PyIntBinop */ #if !CYTHON_COMPILING_IN_PYPY static PyObject* __Pyx_PyInt_AddObjC(PyObject *op1, PyObject *op2, CYTHON_UNUSED long intval, CYTHON_UNUSED int inplace) { #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(op1))) { const long b = intval; long x; long a = PyInt_AS_LONG(op1); x = (long)((unsigned long)a + b); if (likely((x^a) >= 0 || (x^b) >= 0)) return PyInt_FromLong(x); return PyLong_Type.tp_as_number->nb_add(op1, op2); } #endif #if CYTHON_USE_PYLONG_INTERNALS if (likely(PyLong_CheckExact(op1))) { const long b = intval; long a, x; #ifdef HAVE_LONG_LONG const PY_LONG_LONG llb = intval; PY_LONG_LONG lla, llx; #endif const digit* digits = ((PyLongObject*)op1)->ob_digit; const Py_ssize_t size = Py_SIZE(op1); if (likely(__Pyx_sst_abs(size) <= 1)) { a = likely(size) ? digits[0] : 0; if (size == -1) a = -a; } else { switch (size) { case -2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 2: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { a = (long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case -3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 3: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { a = (long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case -4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = -(PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } case 4: if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { a = (long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); break; #ifdef HAVE_LONG_LONG } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { lla = (PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); goto long_long; #endif } default: return PyLong_Type.tp_as_number->nb_add(op1, op2); } } x = a + b; return PyLong_FromLong(x); #ifdef HAVE_LONG_LONG long_long: llx = lla + llb; return PyLong_FromLongLong(llx); #endif } #endif if (PyFloat_CheckExact(op1)) { const long b = intval; double a = PyFloat_AS_DOUBLE(op1); double result; PyFPE_START_PROTECT("add", return NULL) result = ((double)a) + (double)b; PyFPE_END_PROTECT(result) return PyFloat_FromDouble(result); } return (inplace ? PyNumber_InPlaceAdd : PyNumber_Add)(op1, op2); } #endif /* None */ static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); } /* None */ static CYTHON_INLINE long __Pyx_div_long(long a, long b) { long q = a / b; long r = a - q*b; q -= ((r != 0) & ((r ^ b) < 0)); return q; } /* WriteUnraisableException */ static void __Pyx_WriteUnraisable(const char *name, CYTHON_UNUSED int clineno, CYTHON_UNUSED int lineno, CYTHON_UNUSED const char *filename, int full_traceback, CYTHON_UNUSED int nogil) { PyObject *old_exc, *old_val, *old_tb; PyObject *ctx; __Pyx_PyThreadState_declare #ifdef WITH_THREAD PyGILState_STATE state; if (nogil) state = PyGILState_Ensure(); #ifdef _MSC_VER else state = (PyGILState_STATE)-1; #endif #endif __Pyx_PyThreadState_assign __Pyx_ErrFetch(&old_exc, &old_val, &old_tb); if (full_traceback) { Py_XINCREF(old_exc); Py_XINCREF(old_val); Py_XINCREF(old_tb); __Pyx_ErrRestore(old_exc, old_val, old_tb); PyErr_PrintEx(1); } #if PY_MAJOR_VERSION < 3 ctx = PyString_FromString(name); #else ctx = PyUnicode_FromString(name); #endif __Pyx_ErrRestore(old_exc, old_val, old_tb); if (!ctx) { PyErr_WriteUnraisable(Py_None); } else { PyErr_WriteUnraisable(ctx); Py_DECREF(ctx); } #ifdef WITH_THREAD if (nogil) PyGILState_Release(state); #endif } /* ImportFrom */ static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { PyErr_Format(PyExc_ImportError, #if PY_MAJOR_VERSION < 3 "cannot import name %.230s", PyString_AS_STRING(name)); #else "cannot import name %S", name); #endif } return value; } /* HasAttr */ static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { PyObject *r; if (unlikely(!__Pyx_PyBaseString_Check(n))) { PyErr_SetString(PyExc_TypeError, "hasattr(): attribute name must be string"); return -1; } r = __Pyx_GetAttr(o, n); if (unlikely(!r)) { PyErr_Clear(); return 0; } else { Py_DECREF(r); return 1; } } /* SetVTable */ static int __Pyx_SetVtable(PyObject *dict, void *vtable) { #if PY_VERSION_HEX >= 0x02070000 PyObject *ob = PyCapsule_New(vtable, 0, 0); #else PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); #endif if (!ob) goto bad; if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) goto bad; Py_DECREF(ob); return 0; bad: Py_XDECREF(ob); return -1; } /* SetupReduce */ static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { int ret; PyObject *name_attr; name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name_2); if (likely(name_attr)) { ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); } else { ret = -1; } if (unlikely(ret < 0)) { PyErr_Clear(); ret = 0; } Py_XDECREF(name_attr); return ret; } static int __Pyx_setup_reduce(PyObject* type_obj) { int ret = 0; PyObject *object_reduce = NULL; PyObject *object_reduce_ex = NULL; PyObject *reduce = NULL; PyObject *reduce_ex = NULL; PyObject *reduce_cython = NULL; PyObject *setstate = NULL; PyObject *setstate_cython = NULL; #if CYTHON_USE_PYTYPE_LOOKUP if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto GOOD; #else if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto GOOD; #endif #if CYTHON_USE_PYTYPE_LOOKUP object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #else object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; #endif reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto BAD; if (reduce_ex == object_reduce_ex) { #if CYTHON_USE_PYTYPE_LOOKUP object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #else object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; #endif reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto BAD; if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { reduce_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_cython); if (unlikely(!reduce_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto BAD; setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); if (!setstate) PyErr_Clear(); if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { setstate_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate_cython); if (unlikely(!setstate_cython)) goto BAD; ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto BAD; ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto BAD; } PyType_Modified((PyTypeObject*)type_obj); } } goto GOOD; BAD: if (!PyErr_Occurred()) PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); ret = -1; GOOD: #if !CYTHON_USE_PYTYPE_LOOKUP Py_XDECREF(object_reduce); Py_XDECREF(object_reduce_ex); #endif Py_XDECREF(reduce); Py_XDECREF(reduce_ex); Py_XDECREF(reduce_cython); Py_XDECREF(setstate); Py_XDECREF(setstate_cython); return ret; } /* CLineInTraceback */ #ifndef CYTHON_CLINE_IN_TRACEBACK static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) { PyObject *use_cline; PyObject *ptype, *pvalue, *ptraceback; #if CYTHON_COMPILING_IN_CPYTHON PyObject **cython_runtime_dict; #endif __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); #if CYTHON_COMPILING_IN_CPYTHON cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); if (likely(cython_runtime_dict)) { use_cline = PyDict_GetItem(*cython_runtime_dict, __pyx_n_s_cline_in_traceback); } else #endif { PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); if (use_cline_obj) { use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; Py_DECREF(use_cline_obj); } else { PyErr_Clear(); use_cline = NULL; } } if (!use_cline) { c_line = 0; PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); } else if (PyObject_Not(use_cline) != 0) { c_line = 0; } __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); return c_line; } #endif /* CodeObjectCache */ static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { int start = 0, mid = 0, end = count - 1; if (end >= 0 && code_line > entries[end].code_line) { return count; } while (start < end) { mid = start + (end - start) / 2; if (code_line < entries[mid].code_line) { end = mid; } else if (code_line > entries[mid].code_line) { start = mid + 1; } else { return mid; } } if (code_line <= entries[mid].code_line) { return mid; } else { return mid + 1; } } static PyCodeObject *__pyx_find_code_object(int code_line) { PyCodeObject* code_object; int pos; if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { return NULL; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { return NULL; } code_object = __pyx_code_cache.entries[pos].code_object; Py_INCREF(code_object); return code_object; } static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { int pos, i; __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; if (unlikely(!code_line)) { return; } if (unlikely(!entries)) { entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); if (likely(entries)) { __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = 64; __pyx_code_cache.count = 1; entries[0].code_line = code_line; entries[0].code_object = code_object; Py_INCREF(code_object); } return; } pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { PyCodeObject* tmp = entries[pos].code_object; entries[pos].code_object = code_object; Py_DECREF(tmp); return; } if (__pyx_code_cache.count == __pyx_code_cache.max_count) { int new_max = __pyx_code_cache.max_count + 64; entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); if (unlikely(!entries)) { return; } __pyx_code_cache.entries = entries; __pyx_code_cache.max_count = new_max; } for (i=__pyx_code_cache.count; i>pos; i--) { entries[i] = entries[i-1]; } entries[pos].code_line = code_line; entries[pos].code_object = code_object; __pyx_code_cache.count++; Py_INCREF(code_object); } /* AddTraceback */ #include "compile.h" #include "frameobject.h" #include "traceback.h" static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyObject *py_srcfile = 0; PyObject *py_funcname = 0; #if PY_MAJOR_VERSION < 3 py_srcfile = PyString_FromString(filename); #else py_srcfile = PyUnicode_FromString(filename); #endif if (!py_srcfile) goto bad; if (c_line) { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #else py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); #endif } else { #if PY_MAJOR_VERSION < 3 py_funcname = PyString_FromString(funcname); #else py_funcname = PyUnicode_FromString(funcname); #endif } if (!py_funcname) goto bad; py_code = __Pyx_PyCode_New( 0, 0, 0, 0, 0, __pyx_empty_bytes, /*PyObject *code,*/ __pyx_empty_tuple, /*PyObject *consts,*/ __pyx_empty_tuple, /*PyObject *names,*/ __pyx_empty_tuple, /*PyObject *varnames,*/ __pyx_empty_tuple, /*PyObject *freevars,*/ __pyx_empty_tuple, /*PyObject *cellvars,*/ py_srcfile, /*PyObject *filename,*/ py_funcname, /*PyObject *name,*/ py_line, __pyx_empty_bytes /*PyObject *lnotab*/ ); Py_DECREF(py_srcfile); Py_DECREF(py_funcname); return py_code; bad: Py_XDECREF(py_srcfile); Py_XDECREF(py_funcname); return NULL; } static void __Pyx_AddTraceback(const char *funcname, int c_line, int py_line, const char *filename) { PyCodeObject *py_code = 0; PyFrameObject *py_frame = 0; PyThreadState *tstate = __Pyx_PyThreadState_Current; if (c_line) { c_line = __Pyx_CLineForTraceback(tstate, c_line); } py_code = __pyx_find_code_object(c_line ? -c_line : py_line); if (!py_code) { py_code = __Pyx_CreateCodeObjectForTraceback( funcname, c_line, py_line, filename); if (!py_code) goto bad; __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); } py_frame = PyFrame_New( tstate, /*PyThreadState *tstate,*/ py_code, /*PyCodeObject *code,*/ __pyx_d, /*PyObject *globals,*/ 0 /*PyObject *locals*/ ); if (!py_frame) goto bad; __Pyx_PyFrame_SetLineNumber(py_frame, py_line); PyTraceBack_Here(py_frame); bad: Py_XDECREF(py_code); Py_XDECREF(py_frame); } #if PY_MAJOR_VERSION < 3 static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); PyErr_Format(PyExc_TypeError, "'%.200s' does not have the buffer interface", Py_TYPE(obj)->tp_name); return -1; } static void __Pyx_ReleaseBuffer(Py_buffer *view) { PyObject *obj = view->obj; if (!obj) return; if (PyObject_CheckBuffer(obj)) { PyBuffer_Release(view); return; } if ((0)) {} view->obj = NULL; Py_DECREF(obj); } #endif /* MemviewSliceIsContig */ static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) { int i, index, step, start; Py_ssize_t itemsize = mvs.memview->view.itemsize; if (order == 'F') { step = 1; start = 0; } else { step = -1; start = ndim - 1; } for (i = 0; i < ndim; i++) { index = start + step * i; if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) return 0; itemsize *= mvs.shape[index]; } return 1; } /* OverlappingSlices */ static void __pyx_get_array_memory_extents(__Pyx_memviewslice *slice, void **out_start, void **out_end, int ndim, size_t itemsize) { char *start, *end; int i; start = end = slice->data; for (i = 0; i < ndim; i++) { Py_ssize_t stride = slice->strides[i]; Py_ssize_t extent = slice->shape[i]; if (extent == 0) { *out_start = *out_end = start; return; } else { if (stride > 0) end += stride * (extent - 1); else start += stride * (extent - 1); } } *out_start = start; *out_end = end + itemsize; } static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, __Pyx_memviewslice *slice2, int ndim, size_t itemsize) { void *start1, *end1, *start2, *end2; __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); return (start1 < end2) && (start2 < end1); } /* Capsule */ static CYTHON_INLINE PyObject * __pyx_capsule_create(void *p, CYTHON_UNUSED const char *sig) { PyObject *cobj; #if PY_VERSION_HEX >= 0x02070000 cobj = PyCapsule_New(p, sig, NULL); #else cobj = PyCObject_FromVoidPtr(p, NULL); #endif return cobj; } /* IsLittleEndian */ static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) { union { uint32_t u32; uint8_t u8[4]; } S; S.u32 = 0x01020304; return S.u8[0] == 4; } /* BufferFormatCheck */ static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, __Pyx_BufFmt_StackElem* stack, __Pyx_TypeInfo* type) { stack[0].field = &ctx->root; stack[0].parent_offset = 0; ctx->root.type = type; ctx->root.name = "buffer dtype"; ctx->root.offset = 0; ctx->head = stack; ctx->head->field = &ctx->root; ctx->fmt_offset = 0; ctx->head->parent_offset = 0; ctx->new_packmode = '@'; ctx->enc_packmode = '@'; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->is_complex = 0; ctx->is_valid_array = 0; ctx->struct_alignment = 0; while (type->typegroup == 'S') { ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = 0; type = type->fields->type; } } static int __Pyx_BufFmt_ParseNumber(const char** ts) { int count; const char* t = *ts; if (*t < '0' || *t > '9') { return -1; } else { count = *t++ - '0'; while (*t >= '0' && *t < '9') { count *= 10; count += *t++ - '0'; } } *ts = t; return count; } static int __Pyx_BufFmt_ExpectNumber(const char **ts) { int number = __Pyx_BufFmt_ParseNumber(ts); if (number == -1) PyErr_Format(PyExc_ValueError,\ "Does not understand character buffer dtype format string ('%c')", **ts); return number; } static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { PyErr_Format(PyExc_ValueError, "Unexpected format string character: '%c'", ch); } static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { switch (ch) { case 'c': return "'char'"; case 'b': return "'signed char'"; case 'B': return "'unsigned char'"; case 'h': return "'short'"; case 'H': return "'unsigned short'"; case 'i': return "'int'"; case 'I': return "'unsigned int'"; case 'l': return "'long'"; case 'L': return "'unsigned long'"; case 'q': return "'long long'"; case 'Q': return "'unsigned long long'"; case 'f': return (is_complex ? "'complex float'" : "'float'"); case 'd': return (is_complex ? "'complex double'" : "'double'"); case 'g': return (is_complex ? "'complex long double'" : "'long double'"); case 'T': return "a struct"; case 'O': return "Python object"; case 'P': return "a pointer"; case 's': case 'p': return "a string"; case 0: return "end"; default: return "unparseable format string"; } } static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return 2; case 'i': case 'I': case 'l': case 'L': return 4; case 'q': case 'Q': return 8; case 'f': return (is_complex ? 8 : 4); case 'd': return (is_complex ? 16 : 8); case 'g': { PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); return 0; } case 'O': case 'P': return sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { switch (ch) { case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(short); case 'i': case 'I': return sizeof(int); case 'l': case 'L': return sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(float) * (is_complex ? 2 : 1); case 'd': return sizeof(double) * (is_complex ? 2 : 1); case 'g': return sizeof(long double) * (is_complex ? 2 : 1); case 'O': case 'P': return sizeof(void*); default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } typedef struct { char c; short x; } __Pyx_st_short; typedef struct { char c; int x; } __Pyx_st_int; typedef struct { char c; long x; } __Pyx_st_long; typedef struct { char c; float x; } __Pyx_st_float; typedef struct { char c; double x; } __Pyx_st_double; typedef struct { char c; long double x; } __Pyx_st_longdouble; typedef struct { char c; void *x; } __Pyx_st_void_p; #ifdef HAVE_LONG_LONG typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_st_float) - sizeof(float); case 'd': return sizeof(__Pyx_st_double) - sizeof(double); case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } /* These are for computing the padding at the end of the struct to align on the first member of the struct. This will probably the same as above, but we don't have any guarantees. */ typedef struct { short x; char c; } __Pyx_pad_short; typedef struct { int x; char c; } __Pyx_pad_int; typedef struct { long x; char c; } __Pyx_pad_long; typedef struct { float x; char c; } __Pyx_pad_float; typedef struct { double x; char c; } __Pyx_pad_double; typedef struct { long double x; char c; } __Pyx_pad_longdouble; typedef struct { void *x; char c; } __Pyx_pad_void_p; #ifdef HAVE_LONG_LONG typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; #endif static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, CYTHON_UNUSED int is_complex) { switch (ch) { case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); #ifdef HAVE_LONG_LONG case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); #endif case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); default: __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { switch (ch) { case 'c': return 'H'; case 'b': case 'h': case 'i': case 'l': case 'q': case 's': case 'p': return 'I'; case 'B': case 'H': case 'I': case 'L': case 'Q': return 'U'; case 'f': case 'd': case 'g': return (is_complex ? 'C' : 'R'); case 'O': return 'O'; case 'P': return 'P'; default: { __Pyx_BufFmt_RaiseUnexpectedChar(ch); return 0; } } } static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { if (ctx->head == NULL || ctx->head->field == &ctx->root) { const char* expected; const char* quote; if (ctx->head == NULL) { expected = "end"; quote = ""; } else { expected = ctx->head->field->type->name; quote = "'"; } PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected %s%s%s but got %s", quote, expected, quote, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); } else { __Pyx_StructField* field = ctx->head->field; __Pyx_StructField* parent = (ctx->head - 1)->field; PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), parent->type->name, field->name); } } static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { char group; size_t size, offset, arraysize = 1; if (ctx->enc_type == 0) return 0; if (ctx->head->field->type->arraysize[0]) { int i, ndim = 0; if (ctx->enc_type == 's' || ctx->enc_type == 'p') { ctx->is_valid_array = ctx->head->field->type->ndim == 1; ndim = 1; if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %zu", ctx->head->field->type->arraysize[0], ctx->enc_count); return -1; } } if (!ctx->is_valid_array) { PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", ctx->head->field->type->ndim, ndim); return -1; } for (i = 0; i < ctx->head->field->type->ndim; i++) { arraysize *= ctx->head->field->type->arraysize[i]; } ctx->is_valid_array = 0; ctx->enc_count = 1; } group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); do { __Pyx_StructField* field = ctx->head->field; __Pyx_TypeInfo* type = field->type; if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); } else { size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); } if (ctx->enc_packmode == '@') { size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); size_t align_mod_offset; if (align_at == 0) return -1; align_mod_offset = ctx->fmt_offset % align_at; if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; if (ctx->struct_alignment == 0) ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, ctx->is_complex); } if (type->size != size || type->typegroup != group) { if (type->typegroup == 'C' && type->fields != NULL) { size_t parent_offset = ctx->head->parent_offset + field->offset; ++ctx->head; ctx->head->field = type->fields; ctx->head->parent_offset = parent_offset; continue; } if ((type->typegroup == 'H' || group == 'H') && type->size == size) { } else { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } } offset = ctx->head->parent_offset + field->offset; if (ctx->fmt_offset != offset) { PyErr_Format(PyExc_ValueError, "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); return -1; } ctx->fmt_offset += size; if (arraysize) ctx->fmt_offset += (arraysize - 1) * size; --ctx->enc_count; while (1) { if (field == &ctx->root) { ctx->head = NULL; if (ctx->enc_count != 0) { __Pyx_BufFmt_RaiseExpected(ctx); return -1; } break; } ctx->head->field = ++field; if (field->type == NULL) { --ctx->head; field = ctx->head->field; continue; } else if (field->type->typegroup == 'S') { size_t parent_offset = ctx->head->parent_offset + field->offset; if (field->type->fields->type == NULL) continue; field = field->type->fields; ++ctx->head; ctx->head->field = field; ctx->head->parent_offset = parent_offset; break; } else { break; } } } while (ctx->enc_count); ctx->enc_type = 0; ctx->is_complex = 0; return 0; } static PyObject * __pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) { const char *ts = *tsp; int i = 0, number; int ndim = ctx->head->field->type->ndim; ; ++ts; if (ctx->new_count != 1) { PyErr_SetString(PyExc_ValueError, "Cannot handle repeated arrays in format string"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; while (*ts && *ts != ')') { switch (*ts) { case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; default: break; } number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) return PyErr_Format(PyExc_ValueError, "Expected a dimension of size %zu, got %d", ctx->head->field->type->arraysize[i], number); if (*ts != ',' && *ts != ')') return PyErr_Format(PyExc_ValueError, "Expected a comma in format string, got '%c'", *ts); if (*ts == ',') ts++; i++; } if (i != ndim) return PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", ctx->head->field->type->ndim, i); if (!*ts) { PyErr_SetString(PyExc_ValueError, "Unexpected end of format string, expected ')'"); return NULL; } ctx->is_valid_array = 1; ctx->new_count = 1; *tsp = ++ts; return Py_None; } static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { int got_Z = 0; while (1) { switch(*ts) { case 0: if (ctx->enc_type != 0 && ctx->head == NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; if (ctx->head != NULL) { __Pyx_BufFmt_RaiseExpected(ctx); return NULL; } return ts; case ' ': case '\r': case '\n': ++ts; break; case '<': if (!__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '>': case '!': if (__Pyx_Is_Little_Endian()) { PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); return NULL; } ctx->new_packmode = '='; ++ts; break; case '=': case '@': case '^': ctx->new_packmode = *ts++; break; case 'T': { const char* ts_after_sub; size_t i, struct_count = ctx->new_count; size_t struct_alignment = ctx->struct_alignment; ctx->new_count = 1; ++ts; if (*ts != '{') { PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); return NULL; } if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; ctx->enc_count = 0; ctx->struct_alignment = 0; ++ts; ts_after_sub = ts; for (i = 0; i != struct_count; ++i) { ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); if (!ts_after_sub) return NULL; } ts = ts_after_sub; if (struct_alignment) ctx->struct_alignment = struct_alignment; } break; case '}': { size_t alignment = ctx->struct_alignment; ++ts; if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_type = 0; if (alignment && ctx->fmt_offset % alignment) { ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); } } return ts; case 'x': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->fmt_offset += ctx->new_count; ctx->new_count = 1; ctx->enc_count = 0; ctx->enc_type = 0; ctx->enc_packmode = ctx->new_packmode; ++ts; break; case 'Z': got_Z = 1; ++ts; if (*ts != 'f' && *ts != 'd' && *ts != 'g') { __Pyx_BufFmt_RaiseUnexpectedChar('Z'); return NULL; } case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': case 'l': case 'L': case 'q': case 'Q': case 'f': case 'd': case 'g': case 'O': case 'p': if (ctx->enc_type == *ts && got_Z == ctx->is_complex && ctx->enc_packmode == ctx->new_packmode) { ctx->enc_count += ctx->new_count; ctx->new_count = 1; got_Z = 0; ++ts; break; } case 's': if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; ctx->enc_count = ctx->new_count; ctx->enc_packmode = ctx->new_packmode; ctx->enc_type = *ts; ctx->is_complex = got_Z; ++ts; ctx->new_count = 1; got_Z = 0; break; case ':': ++ts; while(*ts != ':') ++ts; ++ts; break; case '(': if (!__pyx_buffmt_parse_array(ctx, &ts)) return NULL; break; default: { int number = __Pyx_BufFmt_ExpectNumber(&ts); if (number == -1) return NULL; ctx->new_count = (size_t)number; } } } } /* TypeInfoCompare */ static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) { int i; if (!a || !b) return 0; if (a == b) return 1; if (a->size != b->size || a->typegroup != b->typegroup || a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { if (a->typegroup == 'H' || b->typegroup == 'H') { return a->size == b->size; } else { return 0; } } if (a->ndim) { for (i = 0; i < a->ndim; i++) if (a->arraysize[i] != b->arraysize[i]) return 0; } if (a->typegroup == 'S') { if (a->flags != b->flags) return 0; if (a->fields || b->fields) { if (!(a->fields && b->fields)) return 0; for (i = 0; a->fields[i].type && b->fields[i].type; i++) { __Pyx_StructField *field_a = a->fields + i; __Pyx_StructField *field_b = b->fields + i; if (field_a->offset != field_b->offset || !__pyx_typeinfo_cmp(field_a->type, field_b->type)) return 0; } return !a->fields[i].type && !b->fields[i].type; } } return 1; } /* MemviewSliceValidateAndInit */ static int __pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) { if (buf->shape[dim] <= 1) return 1; if (buf->strides) { if (spec & __Pyx_MEMVIEW_CONTIG) { if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { if (buf->strides[dim] != sizeof(void *)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly contiguous " "in dimension %d.", dim); goto fail; } } else if (buf->strides[dim] != buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } if (spec & __Pyx_MEMVIEW_FOLLOW) { Py_ssize_t stride = buf->strides[dim]; if (stride < 0) stride = -stride; if (stride < buf->itemsize) { PyErr_SetString(PyExc_ValueError, "Buffer and memoryview are not contiguous " "in the same dimension."); goto fail; } } } else { if (spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not contiguous in " "dimension %d", dim); goto fail; } else if (spec & (__Pyx_MEMVIEW_PTR)) { PyErr_Format(PyExc_ValueError, "C-contiguous buffer is not indirect in " "dimension %d", dim); goto fail; } else if (buf->suboffsets) { PyErr_SetString(PyExc_ValueError, "Buffer exposes suboffsets but no strides"); goto fail; } } return 1; fail: return 0; } static int __pyx_check_suboffsets(Py_buffer *buf, int dim, CYTHON_UNUSED int ndim, int spec) { if (spec & __Pyx_MEMVIEW_DIRECT) { if (buf->suboffsets && buf->suboffsets[dim] >= 0) { PyErr_Format(PyExc_ValueError, "Buffer not compatible with direct access " "in dimension %d.", dim); goto fail; } } if (spec & __Pyx_MEMVIEW_PTR) { if (!buf->suboffsets || (buf->suboffsets && buf->suboffsets[dim] < 0)) { PyErr_Format(PyExc_ValueError, "Buffer is not indirectly accessible " "in dimension %d.", dim); goto fail; } } return 1; fail: return 0; } static int __pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) { int i; if (c_or_f_flag & __Pyx_IS_F_CONTIG) { Py_ssize_t stride = 1; for (i = 0; i < ndim; i++) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not fortran contiguous."); goto fail; } stride = stride * buf->shape[i]; } } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { Py_ssize_t stride = 1; for (i = ndim - 1; i >- 1; i--) { if (stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1) { PyErr_SetString(PyExc_ValueError, "Buffer not C contiguous."); goto fail; } stride = stride * buf->shape[i]; } } return 1; fail: return 0; } static int __Pyx_ValidateAndInit_memviewslice( int *axes_specs, int c_or_f_flag, int buf_flags, int ndim, __Pyx_TypeInfo *dtype, __Pyx_BufFmt_StackElem stack[], __Pyx_memviewslice *memviewslice, PyObject *original_obj) { struct __pyx_memoryview_obj *memview, *new_memview; __Pyx_RefNannyDeclarations Py_buffer *buf; int i, spec = 0, retval = -1; __Pyx_BufFmt_Context ctx; int from_memoryview = __pyx_memoryview_check(original_obj); __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) original_obj)->typeinfo)) { memview = (struct __pyx_memoryview_obj *) original_obj; new_memview = NULL; } else { memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( original_obj, buf_flags, 0, dtype); new_memview = memview; if (unlikely(!memview)) goto fail; } buf = &memview->view; if (buf->ndim != ndim) { PyErr_Format(PyExc_ValueError, "Buffer has wrong number of dimensions (expected %d, got %d)", ndim, buf->ndim); goto fail; } if (new_memview) { __Pyx_BufFmt_Init(&ctx, stack, dtype); if (!__Pyx_BufFmt_CheckString(&ctx, buf->format)) goto fail; } if ((unsigned) buf->itemsize != dtype->size) { PyErr_Format(PyExc_ValueError, "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", buf->itemsize, (buf->itemsize > 1) ? "s" : "", dtype->name, dtype->size, (dtype->size > 1) ? "s" : ""); goto fail; } for (i = 0; i < ndim; i++) { spec = axes_specs[i]; if (!__pyx_check_strides(buf, i, ndim, spec)) goto fail; if (!__pyx_check_suboffsets(buf, i, ndim, spec)) goto fail; } if (buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, new_memview != NULL) == -1)) { goto fail; } retval = 0; goto no_fail; fail: Py_XDECREF(new_memview); retval = -1; no_fail: __Pyx_RefNannyFinishContext(); return retval; } /* ObjectToMemviewSlice */ static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_double(PyObject *obj) { __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; __Pyx_BufFmt_StackElem stack[1]; int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; int retcode; if (obj == Py_None) { result.memview = (struct __pyx_memoryview_obj *) Py_None; return result; } retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, PyBUF_RECORDS, 1, &__Pyx_TypeInfo_double, stack, &result, obj); if (unlikely(retcode == -1)) goto __pyx_fail; return result; __pyx_fail: result.memview = NULL; result.data = NULL; return result; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(int) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(int) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(int) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(int), little, !is_unsigned); } } /* MemviewDtypeToObject */ static CYTHON_INLINE PyObject *__pyx_memview_get_double(const char *itemp) { return (PyObject *) PyFloat_FromDouble(*(double *) itemp); } static CYTHON_INLINE int __pyx_memview_set_double(const char *itemp, PyObject *obj) { double value = __pyx_PyFloat_AsDouble(obj); if ((value == (double)-1) && PyErr_Occurred()) return 0; *(double *) itemp = value; return 1; } /* MemviewSliceCopyTemplate */ static __Pyx_memviewslice __pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, const char *mode, int ndim, size_t sizeof_dtype, int contig_flag, int dtype_is_object) { __Pyx_RefNannyDeclarations int i; __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; struct __pyx_memoryview_obj *from_memview = from_mvs->memview; Py_buffer *buf = &from_memview->view; PyObject *shape_tuple = NULL; PyObject *temp_int = NULL; struct __pyx_array_obj *array_obj = NULL; struct __pyx_memoryview_obj *memview_obj = NULL; __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); for (i = 0; i < ndim; i++) { if (from_mvs->suboffsets[i] >= 0) { PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " "indirect dimensions (axis %d)", i); goto fail; } } shape_tuple = PyTuple_New(ndim); if (unlikely(!shape_tuple)) { goto fail; } __Pyx_GOTREF(shape_tuple); for(i = 0; i < ndim; i++) { temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); if(unlikely(!temp_int)) { goto fail; } else { PyTuple_SET_ITEM(shape_tuple, i, temp_int); temp_int = NULL; } } array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); if (unlikely(!array_obj)) { goto fail; } __Pyx_GOTREF(array_obj); memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( (PyObject *) array_obj, contig_flag, dtype_is_object, from_mvs->memview->typeinfo); if (unlikely(!memview_obj)) goto fail; if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) goto fail; if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, dtype_is_object) < 0)) goto fail; goto no_fail; fail: __Pyx_XDECREF(new_mvs.memview); new_mvs.memview = NULL; new_mvs.data = NULL; no_fail: __Pyx_XDECREF(shape_tuple); __Pyx_XDECREF(temp_int); __Pyx_XDECREF(array_obj); __Pyx_RefNannyFinishContext(); return new_mvs; } /* CIntFromPyVerify */ #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) #define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) #define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ {\ func_type value = func_value;\ if (sizeof(target_type) < sizeof(func_type)) {\ if (unlikely(value != (func_type) (target_type) value)) {\ func_type zero = 0;\ if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ return (target_type) -1;\ if (is_unsigned && unlikely(value < zero))\ goto raise_neg_overflow;\ else\ goto raise_overflow;\ }\ }\ return (target_type) value;\ } /* CIntFromPy */ static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { const int neg_one = (int) -1, const_zero = (int) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(int) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (int) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (int) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(int) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (int) 0; case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) case -2: if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 2: if (8 * sizeof(int) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -3: if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 3: if (8 * sizeof(int) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case -4: if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; case 4: if (8 * sizeof(int) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); } } break; } #endif if (sizeof(int) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else int val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (int) -1; } } else { int val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (int) -1; val = __Pyx_PyInt_As_int(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to int"); return (int) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to int"); return (int) -1; } /* CIntFromPy */ static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(long) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (long) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (long) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(long) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (long) 0; case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) case -2: if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 2: if (8 * sizeof(long) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -3: if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 3: if (8 * sizeof(long) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case -4: if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; case 4: if (8 * sizeof(long) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); } } break; } #endif if (sizeof(long) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else long val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (long) -1; } } else { long val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (long) -1; val = __Pyx_PyInt_As_long(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to long"); return (long) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to long"); return (long) -1; } /* CIntToPy */ static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { const long neg_one = (long) -1, const_zero = (long) 0; const int is_unsigned = neg_one > const_zero; if (is_unsigned) { if (sizeof(long) < sizeof(long)) { return PyInt_FromLong((long) value); } else if (sizeof(long) <= sizeof(unsigned long)) { return PyLong_FromUnsignedLong((unsigned long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); #endif } } else { if (sizeof(long) <= sizeof(long)) { return PyInt_FromLong((long) value); #ifdef HAVE_LONG_LONG } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { return PyLong_FromLongLong((PY_LONG_LONG) value); #endif } } { int one = 1; int little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&value; return _PyLong_FromByteArray(bytes, sizeof(long), little, !is_unsigned); } } /* CIntFromPy */ static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { const char neg_one = (char) -1, const_zero = (char) 0; const int is_unsigned = neg_one > const_zero; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x))) { if (sizeof(char) < sizeof(long)) { __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) } else { long val = PyInt_AS_LONG(x); if (is_unsigned && unlikely(val < 0)) { goto raise_neg_overflow; } return (char) val; } } else #endif if (likely(PyLong_Check(x))) { if (is_unsigned) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); } } break; } #endif #if CYTHON_COMPILING_IN_CPYTHON if (unlikely(Py_SIZE(x) < 0)) { goto raise_neg_overflow; } #else { int result = PyObject_RichCompareBool(x, Py_False, Py_LT); if (unlikely(result < 0)) return (char) -1; if (unlikely(result == 1)) goto raise_neg_overflow; } #endif if (sizeof(char) <= sizeof(unsigned long)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) #endif } } else { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)x)->ob_digit; switch (Py_SIZE(x)) { case 0: return (char) 0; case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) case -2: if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 2: if (8 * sizeof(char) > 1 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -3: if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 3: if (8 * sizeof(char) > 2 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case -4: if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; case 4: if (8 * sizeof(char) > 3 * PyLong_SHIFT) { if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); } } break; } #endif if (sizeof(char) <= sizeof(long)) { __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) #ifdef HAVE_LONG_LONG } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) #endif } } { #if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) PyErr_SetString(PyExc_RuntimeError, "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); #else char val; PyObject *v = __Pyx_PyNumber_IntOrLong(x); #if PY_MAJOR_VERSION < 3 if (likely(v) && !PyLong_Check(v)) { PyObject *tmp = v; v = PyNumber_Long(tmp); Py_DECREF(tmp); } #endif if (likely(v)) { int one = 1; int is_little = (int)*(unsigned char *)&one; unsigned char *bytes = (unsigned char *)&val; int ret = _PyLong_AsByteArray((PyLongObject *)v, bytes, sizeof(val), is_little, !is_unsigned); Py_DECREF(v); if (likely(!ret)) return val; } #endif return (char) -1; } } else { char val; PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); if (!tmp) return (char) -1; val = __Pyx_PyInt_As_char(tmp); Py_DECREF(tmp); return val; } raise_overflow: PyErr_SetString(PyExc_OverflowError, "value too large to convert to char"); return (char) -1; raise_neg_overflow: PyErr_SetString(PyExc_OverflowError, "can't convert negative value to char"); return (char) -1; } /* FastTypeChecks */ #if CYTHON_COMPILING_IN_CPYTHON static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { while (a) { a = a->tp_base; if (a == b) return 1; } return b == &PyBaseObject_Type; } static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { PyObject *mro; if (a == b) return 1; mro = a->tp_mro; if (likely(mro)) { Py_ssize_t i, n; n = PyTuple_GET_SIZE(mro); for (i = 0; i < n; i++) { if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) return 1; } return 0; } return __Pyx_InBases(a, b); } #if PY_MAJOR_VERSION == 2 static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { PyObject *exception, *value, *tb; int res; __Pyx_PyThreadState_declare __Pyx_PyThreadState_assign __Pyx_ErrFetch(&exception, &value, &tb); res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } if (!res) { res = PyObject_IsSubclass(err, exc_type2); if (unlikely(res == -1)) { PyErr_WriteUnraisable(err); res = 0; } } __Pyx_ErrRestore(exception, value, tb); return res; } #else static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; if (!res) { res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); } return res; } #endif static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject* exc_type) { if (likely(err == exc_type)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, NULL, exc_type); } return PyErr_GivenExceptionMatches(err, exc_type); } static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *exc_type1, PyObject *exc_type2) { if (likely(err == exc_type1 || err == exc_type2)) return 1; if (likely(PyExceptionClass_Check(err))) { return __Pyx_inner_PyErr_GivenExceptionMatches2(err, exc_type1, exc_type2); } return (PyErr_GivenExceptionMatches(err, exc_type1) || PyErr_GivenExceptionMatches(err, exc_type2)); } #endif /* CheckBinaryVersion */ static int __Pyx_check_binary_version(void) { char ctversion[4], rtversion[4]; PyOS_snprintf(ctversion, 4, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); PyOS_snprintf(rtversion, 4, "%s", Py_GetVersion()); if (ctversion[0] != rtversion[0] || ctversion[2] != rtversion[2]) { char message[200]; PyOS_snprintf(message, sizeof(message), "compiletime version %s of module '%.100s' " "does not match runtime version %s", ctversion, __Pyx_MODULE_NAME, rtversion); return PyErr_WarnEx(NULL, message, 1); } return 0; } /* InitStrings */ static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { while (t->p) { #if PY_MAJOR_VERSION < 3 if (t->is_unicode) { *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); } else if (t->intern) { *t->p = PyString_InternFromString(t->s); } else { *t->p = PyString_FromStringAndSize(t->s, t->n - 1); } #else if (t->is_unicode | t->is_str) { if (t->intern) { *t->p = PyUnicode_InternFromString(t->s); } else if (t->encoding) { *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); } else { *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); } } else { *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); } #endif if (!*t->p) return -1; if (PyObject_Hash(*t->p) == -1) PyErr_Clear(); ++t; } return 0; } static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); } static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { Py_ssize_t ignore; return __Pyx_PyObject_AsStringAndSize(o, &ignore); } #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT #if !CYTHON_PEP393_ENABLED static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { char* defenc_c; PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); if (!defenc) return NULL; defenc_c = PyBytes_AS_STRING(defenc); #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII { char* end = defenc_c + PyBytes_GET_SIZE(defenc); char* c; for (c = defenc_c; c < end; c++) { if ((unsigned char) (*c) >= 128) { PyUnicode_AsASCIIString(o); return NULL; } } } #endif *length = PyBytes_GET_SIZE(defenc); return defenc_c; } #else static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII if (likely(PyUnicode_IS_ASCII(o))) { *length = PyUnicode_GET_LENGTH(o); return PyUnicode_AsUTF8(o); } else { PyUnicode_AsASCIIString(o); return NULL; } #else return PyUnicode_AsUTF8AndSize(o, length); #endif } #endif #endif static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { #if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT if ( #if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII __Pyx_sys_getdefaultencoding_not_ascii && #endif PyUnicode_Check(o)) { return __Pyx_PyUnicode_AsStringAndSize(o, length); } else #endif #if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) if (PyByteArray_Check(o)) { *length = PyByteArray_GET_SIZE(o); return PyByteArray_AS_STRING(o); } else #endif { char* result; int r = PyBytes_AsStringAndSize(o, &result, length); if (unlikely(r < 0)) { return NULL; } else { return result; } } } static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { int is_true = x == Py_True; if (is_true | (x == Py_False) | (x == Py_None)) return is_true; else return PyObject_IsTrue(x); } static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { #if PY_MAJOR_VERSION >= 3 if (PyLong_Check(result)) { if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, "__int__ returned non-int (type %.200s). " "The ability to return an instance of a strict subclass of int " "is deprecated, and may be removed in a future version of Python.", Py_TYPE(result)->tp_name)) { Py_DECREF(result); return NULL; } return result; } #endif PyErr_Format(PyExc_TypeError, "__%.4s__ returned non-%.4s (type %.200s)", type_name, type_name, Py_TYPE(result)->tp_name); Py_DECREF(result); return NULL; } static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { #if CYTHON_USE_TYPE_SLOTS PyNumberMethods *m; #endif const char *name = NULL; PyObject *res = NULL; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_Check(x) || PyLong_Check(x))) #else if (likely(PyLong_Check(x))) #endif return __Pyx_NewRef(x); #if CYTHON_USE_TYPE_SLOTS m = Py_TYPE(x)->tp_as_number; #if PY_MAJOR_VERSION < 3 if (m && m->nb_int) { name = "int"; res = m->nb_int(x); } else if (m && m->nb_long) { name = "long"; res = m->nb_long(x); } #else if (likely(m && m->nb_int)) { name = "int"; res = m->nb_int(x); } #endif #else if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { res = PyNumber_Int(x); } #endif if (likely(res)) { #if PY_MAJOR_VERSION < 3 if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { #else if (unlikely(!PyLong_CheckExact(res))) { #endif return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); } } else if (!PyErr_Occurred()) { PyErr_SetString(PyExc_TypeError, "an integer is required"); } return res; } static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { Py_ssize_t ival; PyObject *x; #if PY_MAJOR_VERSION < 3 if (likely(PyInt_CheckExact(b))) { if (sizeof(Py_ssize_t) >= sizeof(long)) return PyInt_AS_LONG(b); else return PyInt_AsSsize_t(x); } #endif if (likely(PyLong_CheckExact(b))) { #if CYTHON_USE_PYLONG_INTERNALS const digit* digits = ((PyLongObject*)b)->ob_digit; const Py_ssize_t size = Py_SIZE(b); if (likely(__Pyx_sst_abs(size) <= 1)) { ival = likely(size) ? digits[0] : 0; if (size == -1) ival = -ival; return ival; } else { switch (size) { case 2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -2: if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -3: if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case 4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; case -4: if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); } break; } } #endif return PyLong_AsSsize_t(b); } x = PyNumber_Index(b); if (!x) return -1; ival = PyInt_AsSsize_t(x); Py_DECREF(x); return ival; } static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { return PyInt_FromSize_t(ival); } #endif /* Py_PYTHON_H */
GB_unop__identity_uint16_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint16_int64) // op(A') function: GB (_unop_tran__identity_uint16_int64) // C type: uint16_t // A type: int64_t // cast: uint16_t cij = (uint16_t) aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = (uint16_t) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = (uint16_t) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint16_int64) ( uint16_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; uint16_t z = (uint16_t) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
rng.c
/** * @file rng.c * @author Michael Trotter & Matt Goodrum * @brief Uniform and Normal RNG Implemented in OpenMP */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include <omp.h> #include <limits.h> #define PI acos(-1) /** @var M value for Linear Congruential Generator (LCG); use GCC's value */ long M = INT_MAX; /** @var A value for LCG */ int A = 1103515245; /** @var C value for LCG */ int C = 12345; /** * Generates a uniformly distributed random number using the provided seed and GCC's settings for the Linear Congruential Generator (LCG) * @see http://en.wikipedia.org/wiki/Linear_congruential_generator * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a uniformly distributed number [0, 1) */ double randu(int * seed, int index) { int num = A*seed[index] + C; seed[index] = num % M; return fabs(seed[index]/((double) M)); } /** * Generates a normally distributed random number using the Box-Muller transformation * @note This function is thread-safe * @param seed The seed array * @param index The specific index of the seed to be advanced * @return a double representing random number generated using the Box-Muller algorithm * @see http://en.wikipedia.org/wiki/Normal_distribution, section computing value for normal random distribution */ double randn(int * seed, int index){ /*Box-Muller algorithm*/ double u = randu(seed, index); double v = randu(seed, index); double cosine = cos(2*PI*v); double rt = -2*log(u); return sqrt(rt)*cosine; } /** * A simple main that demonstrates how to setup the seed array for use */ int main(){ //define the length of the seed array int length = 10000; //declare seed array int * seed = (int *)malloc(sizeof(int)*length); //establish original values //the current time * the index is good enough for most uses. int x; for(x = 0; x < length; x++) { seed[x] = time(0)*x; } //make kernel calls etc; device functions can now use seed array to generate normal and uniform random numbers /* Example #pragma omp parallel for shared(arrayX, arrayY, length, seed) private(x) for(x = 0; x < length; x++){ arrayX[x] += 1 + 5*randn(seed, x); arrayY[x] += -2 + 2*randn(seed, x); } */ //free allocated memory free(seed); return 0; }
GB_binop__iseq_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__iseq_uint32 // A.*B function (eWiseMult): GB_AemultB__iseq_uint32 // A*D function (colscale): GB_AxD__iseq_uint32 // D*A function (rowscale): GB_DxB__iseq_uint32 // C+=B function (dense accum): GB_Cdense_accumB__iseq_uint32 // C+=b function (dense accum): GB_Cdense_accumb__iseq_uint32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_uint32 // C=scalar+B GB_bind1st__iseq_uint32 // C=scalar+B' GB_bind1st_tran__iseq_uint32 // C=A+scalar GB_bind2nd__iseq_uint32 // C=A'+scalar GB_bind2nd_tran__iseq_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x == y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_UINT32 || GxB_NO_ISEQ_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__iseq_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__iseq_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__iseq_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__iseq_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__iseq_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__iseq_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__iseq_uint32 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__iseq_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__iseq_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint32_t aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB_bind1st_tran__iseq_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB_bind2nd_tran__iseq_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
vptree_openmp.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdbool.h> #include <math.h> #include <omp.h> #include "../inc/vptree.h" // Threshold of points to switch to sequential execution #define POINT_THRESHOLD 10000 // Threshold of maximum live threads simultaneously for distance calculation #define THREADS_MAX 16 // Development flags to switch execution mode from serial to parallel for distance calculation and subtree creation #define PARALLELDIS true #define PARALLELSUB true // Function Prototypes vptree * buildvp(double *X, int n, int d); vptree * getInner(vptree * T); vptree * getOuter(vptree * T); double getMD(vptree * T); double * getVP(vptree * T); int getIDX(vptree * T); vptree * build_tree(double *points, int *ids, int n, int d); void euclidean(double *point, double *points, double *distances, int n, int d); void swap(double *a, double *b); int partition (double arr[], int low, int high); double quickselect_median(double arr[], int length); double quickselect(double arr[], int length, int idx); // Counter to keep track of live threads int threadCount = 0; // Thread-safe function to alter the live thread count void modThreadCount(int n){ #pragma omp atomic threadCount += n; } // Application entry point vptree * buildvp(double *X, int n, int d) { // Allocate space for the index array int *ids = calloc(n, sizeof(int)); // Build the initial ids array for (int i = 0; i < n; i++) ids[i] = i; // Call build_tree to get the pointer to the root of the tree return build_tree(X, ids, n, d); } // Function that recursively builds the binary tree and returns a pointer to its root vptree * build_tree(double *points, int *ids, int n, int d) { // Enable OpenMP Nested Parallelism omp_set_nested(true); // Create node to be returned vptree *node = calloc(1, sizeof(vptree)); // Check to end recursion: if points array is of size 0 - we are returning a leaf if (n == 1){ // Build node node->inner = NULL; node->outer = NULL; node->idx = ids[0]; node->md = 0; node->vp = calloc(d, sizeof(double)); memcpy(node->vp, points, sizeof(double) * d); // Free memory for ids and points arrays free(ids); free(points); // Return node return node; } // Choose the last point in X as the vantage point double *point = (points + (n-1)*d); int id = ids[n-1]; // Create array that holds euclidean distance of point from all other points double *distances = calloc(n-1, sizeof(double)); // Calculate distances in parallel if possible using work-construct, else do it sequentially (logic in euclidean()) euclidean(point, points, distances, n-1, d); // At this point distances[i] indicates the distance of point i in points from the vantage point // Find median by creating a copy of distances and passing it to QuickSelect double *distancesCopy = calloc(n-1, sizeof(double)); memcpy(distancesCopy, distances, sizeof(double) * (n-1)); double median = quickselect_median(distancesCopy, n-1); free(distancesCopy); // Sort points into two new arrays // Calculate array sizes for subtrees. Values up to and equal to the median go on the inner tree int innerLength = 0; for (int i = 0; i < n-1; i++) { if(distances[i] <= median) { innerLength++; } } int outerLength = n - 1 - innerLength; //TODO: Perhaps use distancesCopy to reduce the above linear scan to half // Pointers to keep track of inner and outer arrays content while sorting points int innerPointer = 0; int outerPointer = 0; // Create arrays for inner and outer points. Arrays contain the points and a list of ids (one for each point) double *innerPoints = calloc(innerLength * d, sizeof(double)); double *outerPoints = calloc(outerLength * d, sizeof(double)); int *innerIDs = calloc(innerLength, sizeof(int)); int *outerIDs = calloc(outerLength, sizeof(int)); // Sort points to inner and outer subtree for (int i = 0; i < n-1; i++){ if(distances[i] <= median){ memcpy(innerPoints + innerPointer * d, points + i*d, sizeof(double) * d); innerIDs[innerPointer] = ids[i]; innerPointer++; } else{ memcpy(outerPoints + outerPointer * d, points + i*d, sizeof(double) * d); outerIDs[outerPointer] = ids[i]; outerPointer++; } } // Set node fields // Copy the point into vp because we will call free(points) that will also free(point) node->vp = calloc(d, sizeof(double)); node->md = median; memcpy(node->vp, point, sizeof(double) * d); node->idx = id; // De-allocate unused memory free(points); free(distances); free(ids); // Build subtrees in parallel or sequentially if((PARALLELSUB == true) && (THREADS_MAX - threadCount >= 2)) { modThreadCount(2); #pragma omp parallel shared(node) { #pragma omp sections nowait { // Create threads #pragma omp section if(innerLength > 0) { node->inner = build_tree(innerPoints, innerIDs, innerLength, d); } #pragma omp section if(outerLength > 0) { node->outer = build_tree(outerPoints, outerIDs, outerLength, d); } } } modThreadCount(-2); } else { if(innerLength > 0) { node->inner = build_tree(innerPoints, innerIDs, innerLength, d); } if(outerLength > 0) { node->outer = build_tree(outerPoints, outerIDs, outerLength, d); } } if(innerLength < 1) { node->inner = NULL; } if(outerLength < 1) { node->outer= NULL; } return node; } // Return vantage-point subtree with points inside radius vptree * getInner(vptree * T) { return T->inner; } // Return vantage-point subtree with points outside radius vptree * getOuter(vptree * T) { return T->outer; } // Return median of distances to vantage point double getMD(vptree * T) { return T->md; } // Return the coordinates of the vantage point double * getVP(vptree * T) { return T->vp; } // Return the index of the vantage point int getIDX(vptree * T) { return T->idx; } // Calculates the distances of all points from point and writes them to an array. If possible use work-sharing to parallelize void euclidean(double *point, double *points, double *distances, int n, int d) { // Accumulator array for parallel execution double accumulator = 0; // Enable dynamic threads allocation omp_set_dynamic(1); // Decide if point calculation should happen in parallel or not if((n-1 > POINT_THRESHOLD) && (PARALLELDIS == true)) { #pragma omp parallel private(accumulator) { #pragma omp for schedule(auto) nowait for (int i = 0; i < n; i++) { accumulator = 0; for (int j = 0; j < d; j++) { accumulator += (point[j] - *(points + i * d + j)) * (point[j] - *(points + i * d + j)); } distances[i] = sqrt(accumulator); } } }else{ for (int i = 0; i < n; i++) { accumulator = 0; for (int j = 0; j < d; j++) { accumulator += (point[j] - *(points + i * d + j)) * (point[j] - *(points + i * d + j)); } distances[i] = sqrt(accumulator); } } return; } // A utility function to swap two elements void swap(double *a, double *b) { double t = *a; *a = *b; *b = t; } // QuickSort Partition function. low and high are the range of indexes in arr where partition should work int partition (double arr[], int low, int high) { // Select a pivot and initialize flag to position of smallest element before pivot double pivot = arr[high]; int i = (low - 1); // Go through the array examining each element for (int j = low; j <= high - 1; j++) { // If current element is smaller than the pivot, increment i and swap it out with the one currently pointed by i if (arr[j] < pivot) { i++; swap(&arr[i], &arr[j]); } } // Finally place pivot in its correct position in the array and return the position as the middle point swap(&arr[i + 1], &arr[high]); return (i + 1); } // Returns the median using the QuickSelect algorithm double quickselect_median(double arr[], int length) { if (length % 2 == 1) { return quickselect(arr, length, (length+1)/2); } else { return 0.5 * (quickselect(arr, length, length/2) + quickselect(arr, length, length/2 + 1)); } } // Returns the idx-th element of arr when arr is sorted // idx is the index (starting from 1) of the point we want to find when the array is sorted. For the median idx should be the middle one (i.e (length+1)/2 for odd lengths etc) double quickselect(double arr[], int length, int idx) { // Check to end recursion if (length == 1) { return arr[0]; } // Select last array element as pivot double pivot = arr[length - 1]; // Get index of pivot after we partition the array int pivotIndex = partition(arr, 0, length - 1); // Create the higher and lower arrays that occur after partitioning in QuickSort fashion int lowerLength = pivotIndex; pivotIndex++; int higherLength = (length - (lowerLength + 1)); // At this point pivotIndex, lowerLength and higherLength all start from 1 not 0 double *lower = calloc(lowerLength, sizeof(double)); double *higher = calloc(higherLength, sizeof(double)); memcpy(lower, arr, sizeof(double) * lowerLength); memcpy(higher, arr + pivotIndex, sizeof(double) * higherLength); // Variable to store result of following recursive calls double result = 0; // This means that the point we're looking (median in our case) is in the lower partition if (idx <= lowerLength) { result = quickselect(lower, lowerLength, idx); } // This means that the median is our pivot point else if(idx == pivotIndex) { result = pivot; } // This means that the median is in the higher partition else { result = quickselect(higher, higherLength, idx - pivotIndex); } // Free memory allocated to lower and higher free(lower); free(higher); // Return result return result; }
is.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* -------------------------------------------------------------------- NAS Parallel Benchmarks 2.3 OpenMP C versions - IS This benchmark is an OpenMP C version of the NPB IS code. The OpenMP C versions are developed by RWCP and derived from the serial Fortran versions in "NPB 2.3-serial" developed by NAS. Permission to use, copy, distribute and modify this software for any purpose with or without fee is hereby granted. This software is provided "as is" without express or implied warranty. Send comments on the OpenMP C versions to [email protected] Information on OpenMP activities at RWCP is available at: http:pdplab.trc.rwcp.or.jppdperf/Omni/ Information on NAS Parallel Benchmarks 2.3 is available at: http:www.nas.nasa.gov/NAS/NPB/ -------------------------------------------------------------------- */ /* -------------------------------------------------------------------- Author: M. Yarrow OpenMP C version: S. Satoh -------------------------------------------------------------------- */ #include "npbparams.h" #include <stdlib.h> #include <stdio.h> /* #if defined(_OPENMP) */ /* #include <omp.h> */ /* #endif _OPENMP */ /* */ /* For serial IS, buckets are not really req'd to solve NPB1 IS */ /* spec, but their use on some machines improves performance, on */ /* other machines the use of buckets compromises performance, */ /* probably because it is extra computation which is not req'd. */ /* (Note: Mechanism not understood, probably cache related) */ /* Example: SP2-66MhzWN: 50% speedup with buckets */ /* Example: SGI Indy5000: 50% slowdown with buckets */ /* Example: SGI O2000: 400% slowdown with buckets (Wow!) */ /* */ /* #define USE_BUCKETS */ /* buckets are not used in the OpenMP C version */ /* */ /* default values */ /* */ /* */ /* CLASS S */ /* */ /* */ /* CLASS W */ /* */ /* */ /* CLASS A */ /* */ /* */ /* CLASS B */ /* */ /* */ /* CLASS C */ /* */ /* */ /* Typedef: if necessary, change the */ /* size of int here by changing the */ /* int type to, say, long */ /* */ typedef int INT_TYPE; /* */ /* Some global info */ /* */ INT_TYPE * key_buff_ptr_global; /* used by full_verify to get */ /* copies of rank info */ int passed_verification; /* */ /* These are the three main arrays. */ /* See SIZE_OF_BUFFERS def above */ /* */ INT_TYPE key_array[(1<<23)], key_buff1[(1<<23)], key_buff2[(1<<23)], partial_verify_vals[5]; /* */ /* Partial verif info */ /* */ INT_TYPE test_index_array[5], test_rank_array[5], S_test_index_array[5] = {48427, 17148, 23627, 62548, 4431}, S_test_rank_array[5] = {0, 18, 346, 64917, 65463}, W_test_index_array[5] = {357773, 934767, 875723, 898999, 404505}, W_test_rank_array[5] = {1249, 11698, 1039987, 1043896, 1048018}, A_test_index_array[5] = {2112377, 662041, 5336171, 3642833, 4250760}, A_test_rank_array[5] = {104, 17523, 123928, 8288932, 8388264}, B_test_index_array[5] = {41869, 812306, 5102857, 18232239, 26860214}, B_test_rank_array[5] = {33422937, 10244, 59149, 33135281, 99}, C_test_index_array[5] = {44172927, 72999161, 74326391, 129606274, 21736814}, C_test_rank_array[5] = {61147, 882988, 266290, 133997595, 133525895}; /* */ /* function prototypes */ /* */ double randlc(double * X, double * A); void full_verify(void ); /* FUNCTION RANDLC (X, A) * * This routine returns a uniform pseudorandom double precision number in the * range (0, 1) by using the linear congruential generator * * x_{k+1} = a x_k (mod 2^46) * * where 0 < x_k < 2^46 and 0 < a < 2^46. This scheme generates 2^44 numbers * before repeating. The argument A is the same as 'a' in the above formula, * and X is the same as x_0. A and X must be odd double precision integers * in the range (1, 2^46). The returned value RANDLC is normalized to be * between 0 and 1, i.e. RANDLC = 2^(-46) * x_1. X is updated to contain * the new seed x_1, so that subsequent calls to RANDLC using the same * arguments will generate a continuous sequence. * * This routine should produce the same results on any computer with at least * 48 mantissa bits in double precision floating point data. On Cray systems, * double precision should be disabled. * * David H. Bailey October 26, 1990 * * IMPLICIT DOUBLE PRECISION (A-H, O-Z) * SAVE KS, R23, R46, T23, T46 * DATA KS/0/ * * If this is the first call to RANDLC, compute R23 = 2 ^ -23, R46 = 2 ^ -46, * T23 = 2 ^ 23, and T46 = 2 ^ 46. These are computed in loops, rather than * by merely using the ** operator, in order to insure that the results are * exact on all systems. This code assumes that 0.5D0 is represented exactly. */ /* */ /* R A N D L C */ /* */ /* portable random number generator */ /* */ double randlc(double * X, double * A) { static int KS = 0; static double R23, R46, T23, T46; double T1, T2, T3, T4; double A1; double A2; double X1; double X2; double Z; int i, j; double _ret_val_0; if (KS==0) { R23=1.0; R46=1.0; T23=1.0; T46=1.0; #pragma loop name randlc#0 #pragma cetus reduction(*: R23, T23) #pragma cetus parallel #pragma omp parallel for reduction(*: R23, T23) for (i=1; i<=23; i ++ ) { R23=(0.5*R23); T23=(2.0*T23); } #pragma loop name randlc#1 #pragma cetus reduction(*: R46, T46) #pragma cetus parallel #pragma omp parallel for reduction(*: R46, T46) for (i=1; i<=46; i ++ ) { R46=(0.5*R46); T46=(2.0*T46); } KS=1; } /* Break A into two parts such that A = 2^23 A1 + A2 and set X = N. */ T1=(R23*( * A)); j=T1; A1=j; A2=(( * A)-(T23*A1)); /* Break X into two parts such that X = 2^23 X1 + X2, compute Z = A1 * X2 + A2 * X1 (mod 2^23), and then X = 2^23 * Z + A2 * X2 (mod 2^46). */ T1=(R23*( * X)); j=T1; X1=j; X2=(( * X)-(T23*X1)); T1=((A1*X2)+(A2*X1)); j=(R23*T1); T2=j; Z=(T1-(T23*T2)); T3=((T23*Z)+(A2*X2)); j=(R46*T3); T4=j; ( * X)=(T3-(T46*T4)); _ret_val_0=(R46*( * X)); return _ret_val_0; } /* */ /* C R E A T E _ S E Q */ /* */ void create_seq(double seed, double a) { double x; int i, j, k; k=((1<<19)/4); #pragma loop name create_seq#0 for (i=0; i<(1<<23); i ++ ) { x=randlc( & seed, & a); x+=randlc( & seed, & a); x+=randlc( & seed, & a); x+=randlc( & seed, & a); key_array[i]=(k*x); } return ; } /* */ /* F U L L _ V E R I F Y */ /* */ void full_verify() { INT_TYPE i, j; INT_TYPE k; INT_TYPE m, unique_keys; /* Now, finally, sort the keys: */ #pragma loop name full_verify#0 for (i=0; i<(1<<23); i ++ ) { key_array[ -- key_buff_ptr_global[key_buff2[i]]]=key_buff2[i]; } /* Confirm keys correctly sorted: count incorrectly sorted keys, if any */ j=0; #pragma loop name full_verify#1 #pragma cetus reduction(+: j) #pragma cetus parallel #pragma omp parallel for reduction(+: j) for (i=1; i<(1<<23); i ++ ) { if (key_array[i-1]>key_array[i]) { j ++ ; } } if (j!=0) { printf("Full_verify: number of keys out of sort: %d\n", j); } else { passed_verification ++ ; } return ; } /* */ /* R A N K */ /* */ void rank(int iteration) { INT_TYPE i, j, k; INT_TYPE l, m; INT_TYPE shift = 19-10; INT_TYPE key; INT_TYPE min_key_val, max_key_val; INT_TYPE prv_buff1[(1<<19)]; key_array[iteration]=iteration; key_array[iteration+10]=((1<<19)-iteration); /* Determine where the partial verify test keys are, load into */ /* top of array bucket_size */ #pragma loop name rank#0 for (i=0; i<5; i ++ ) { partial_verify_vals[i]=key_array[test_index_array[i]]; } /* Clear the work array */ #pragma loop name rank#1 #pragma cetus parallel #pragma omp parallel for for (i=0; i<(1<<19); i ++ ) { key_buff1[i]=0; } #pragma loop name rank#2 #pragma cetus parallel #pragma omp parallel for for (i=0; i<(1<<19); i ++ ) { prv_buff1[i]=0; } /* Copy keys into work array; keys in key_array will be reused each iter. */ #pragma loop name rank#3 #pragma cetus reduction(+: prv_buff1[key_buff2[i]]) for (i=0; i<(1<<23); i ++ ) { key_buff2[i]=key_array[i]; /* Ranking of all keys occurs in this section: */ /* In this section, the keys themselves are used as their own indexes to determine how many of each there are: their individual population */ prv_buff1[key_buff2[i]] ++ ; /* Now they have individual key */ } /* population */ #pragma loop name rank#4 for (i=0; i<((1<<19)-1); i ++ ) { prv_buff1[i+1]+=prv_buff1[i]; } #pragma loop name rank#5 for (i=0; i<(1<<19); i ++ ) { key_buff1[i]+=prv_buff1[i]; } /* To obtain ranks of each key, successively add the individual key population, not forgetting to add m, the total of lesser keys, to the first key population */ /* This is the partial verify test section */ /* Observe that test_rank_array vals are */ /* shifted differently for different cases */ #pragma loop name rank#6 #pragma cetus reduction(+: passed_verification) for (i=0; i<5; i ++ ) { k=partial_verify_vals[i]; /* test vals were put here */ if ((0<=k)&&(k<=((1<<23)-1))) { switch ('A') { case 'S': if (i<=2) { if (key_buff1[k-1]!=(test_rank_array[i]+iteration)) { printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i); } else { passed_verification ++ ; } } else { if (key_buff1[k-1]!=(test_rank_array[i]-iteration)) { printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i); } else { passed_verification ++ ; } } break; case 'W': if (i<2) { if (key_buff1[k-1]!=(test_rank_array[i]+(iteration-2))) { printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i); } else { passed_verification ++ ; } } else { if (key_buff1[k-1]!=(test_rank_array[i]-iteration)) { printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i); } else { passed_verification ++ ; } } break; case 'A': if (i<=2) { if (key_buff1[k-1]!=(test_rank_array[i]+(iteration-1))) { printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i); } else { passed_verification ++ ; } } else { if (key_buff1[k-1]!=(test_rank_array[i]-(iteration-1))) { printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i); } else { passed_verification ++ ; } } break; case 'B': if (((i==1)||(i==2))||(i==4)) { if (key_buff1[k-1]!=(test_rank_array[i]+iteration)) { printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i); } else { passed_verification ++ ; } } else { if (key_buff1[k-1]!=(test_rank_array[i]-iteration)) { printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i); } else { passed_verification ++ ; } } break; case 'C': if (i<=2) { if (key_buff1[k-1]!=(test_rank_array[i]+iteration)) { printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i); } else { passed_verification ++ ; } } else { if (key_buff1[k-1]!=(test_rank_array[i]-iteration)) { printf("Failed partial verification: ""iteration %d, test key %d\n", iteration, i); } else { passed_verification ++ ; } } break; } } } /* Make copies of rank info for use by full_verify: these variables in rank are local; making them global slows down the code, probably since they cannot be made register by compiler */ if (iteration==10) { key_buff_ptr_global=key_buff1; } /* end master */ return ; } /* */ /* M A I N */ /* */ main(int argc, char * * argv) { int i, iteration, itemp; int nthreads = 1; double timecounter, maxtime; /* Initialize the verification arrays if a valid class */ int _ret_val_0; #pragma loop name main#0 for (i=0; i<5; i ++ ) { switch ('A') { case 'S': test_index_array[i]=S_test_index_array[i]; test_rank_array[i]=S_test_rank_array[i]; break; case 'A': test_index_array[i]=A_test_index_array[i]; test_rank_array[i]=A_test_rank_array[i]; break; case 'W': test_index_array[i]=W_test_index_array[i]; test_rank_array[i]=W_test_rank_array[i]; break; case 'B': test_index_array[i]=B_test_index_array[i]; test_rank_array[i]=B_test_rank_array[i]; break; case 'C': test_index_array[i]=C_test_index_array[i]; test_rank_array[i]=C_test_rank_array[i]; break; } } ; /* Printout initial NPB info */ printf("\n\n NAS Parallel Benchmarks 2.3 OpenMP C version"" - IS Benchmark\n\n"); printf(" Size: %d (class %c)\n", 1<<23, 'A'); printf(" Iterations: %d\n", 10); /* Initialize timer */ timer_clear(0); /* Generate random number sequence and subsequent keys on all procs */ /* Random number gen seed */ create_seq(3.14159265E8, 1.220703125E9); /* Random number gen mult */ /* Do one interation for free (i.e., untimed) to guarantee initialization of all data and code pages and respective tables */ rank(1); /* Start verification counter */ passed_verification=0; printf("\n iteration\n"); /* Start timer */ timer_start(0); /* This is the main iteration */ #pragma loop name main#1 for (iteration=1; iteration<=10; iteration ++ ) { printf(" %d\n", iteration); rank(iteration); /* #if defined(_OPENMP) */ /* nthreads = omp_get_num_threads(); */ /* #endif _OPENMP */ } /* End of timing, obtain maximum time of all processors */ timer_stop(0); timecounter=timer_read(0); /* This tests that keys are in sequence: sorting of last ranked key seq occurs here, but is an untimed operation */ full_verify(); /* The final printout */ if (passed_verification!=((5*10)+1)) { passed_verification=0; } c_print_results("IS", 'A', 1<<23, 0, 0, 10, nthreads, timecounter, (((double)(10*(1<<23)))/timecounter)/1000000.0, "keys ranked", passed_verification, "3.0 structured", "28 Nov 2019", "(none)", "(none)", "-lm", "(none)", "(none)", "(none)", "randlc"); /* */ return _ret_val_0; }
GB_binop__gt_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__gt_uint16) // A.*B function (eWiseMult): GB (_AemultB_08__gt_uint16) // A.*B function (eWiseMult): GB (_AemultB_02__gt_uint16) // A.*B function (eWiseMult): GB (_AemultB_04__gt_uint16) // A.*B function (eWiseMult): GB (_AemultB_bitmap__gt_uint16) // A*D function (colscale): GB (_AxD__gt_uint16) // D*A function (rowscale): GB (_DxB__gt_uint16) // C+=B function (dense accum): GB (_Cdense_accumB__gt_uint16) // C+=b function (dense accum): GB (_Cdense_accumb__gt_uint16) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__gt_uint16) // C=scalar+B GB (_bind1st__gt_uint16) // C=scalar+B' GB (_bind1st_tran__gt_uint16) // C=A+scalar GB (_bind2nd__gt_uint16) // C=A'+scalar GB (_bind2nd_tran__gt_uint16) // C type: bool // A type: uint16_t // A pattern? 0 // B type: uint16_t // B pattern? 0 // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint16_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint16_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x > y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_UINT16 || GxB_NO_GT_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__gt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__gt_uint16) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__gt_uint16) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__gt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__gt_uint16) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__gt_uint16) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint16_t alpha_scalar ; uint16_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ; beta_scalar = (*((uint16_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__gt_uint16) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__gt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__gt_uint16) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__gt_uint16) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__gt_uint16) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint16_t bij = GBX (Bx, p, false) ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__gt_uint16) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint16_t aij = GBX (Ax, p, false) ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB (_bind1st_tran__gt_uint16) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB (_bind2nd_tran__gt_uint16) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
primitives.h
#pragma once #include <vector> #include <cstdint> #include <omp.h> #include "local_buffer.h" #include "../timer.h" using namespace std; #define CACHE_LINE_ENTRY (16) #define LOCAL_BUDGET (8*1024*1024) template<typename T> void MemSetOMP(T *arr, int val, size_t size) { size_t tid = omp_get_thread_num(); size_t max_omp_threads = omp_get_num_threads(); size_t task_num = size; size_t avg = (task_num + max_omp_threads - 1) / max_omp_threads; auto it_beg = avg * tid; auto it_end = min(avg * (tid + 1), task_num); memset(arr + it_beg, val, sizeof(T) * (it_end - it_beg)); #pragma omp barrier } template<typename T> void MemCpyOMP(T *dst, T *src, size_t size) { size_t tid = omp_get_thread_num(); size_t max_omp_threads = omp_get_num_threads(); size_t task_num = size; size_t avg = (task_num + max_omp_threads - 1) / max_omp_threads; auto it_beg = avg * tid; auto it_end = min(avg * (tid + 1), task_num); memcpy(dst + it_beg, src + it_beg, sizeof(T) * (it_end - it_beg)); #pragma omp barrier } /* * InclusivePrefixSumOMP: General Case Inclusive Prefix Sum * histogram: is for cache-aware thread-local histogram purpose * output: should be different from the variables captured in function object f * size: is the original size for the flagged prefix sum * f: requires it as the parameter, f(it) return the histogram value of that it */ template<typename H, typename T, typename F> void InclusivePrefixSumOMP(vector<H> &histogram, T *output, size_t size, F f) { int omp_num_threads = omp_get_num_threads(); #pragma omp single { histogram = vector<H>((omp_num_threads + 1) * CACHE_LINE_ENTRY, 0); } static thread_local int tid = omp_get_thread_num(); // 1st Pass: Histogram. auto avg = size / omp_num_threads; auto it_beg = avg * tid; auto histogram_idx = (tid + 1) * CACHE_LINE_ENTRY; histogram[histogram_idx] = 0; auto it_end = tid == omp_num_threads - 1 ? size : avg * (tid + 1); size_t prev = 0u; for (auto it = it_beg; it < it_end; it++) { auto value = f(it); histogram[histogram_idx] += value; prev += value; output[it] = prev; } #pragma omp barrier // 2nd Pass: single-prefix-sum & Add previous sum. #pragma omp single { for (auto local_tid = 0; local_tid < omp_num_threads; local_tid++) { auto local_histogram_idx = (local_tid + 1) * CACHE_LINE_ENTRY; auto prev_histogram_idx = (local_tid) * CACHE_LINE_ENTRY; histogram[local_histogram_idx] += histogram[prev_histogram_idx]; } } { auto prev_sum = histogram[tid * CACHE_LINE_ENTRY]; for (auto it = it_beg; it < it_end; it++) { output[it] += prev_sum; } #pragma omp barrier } } /* * FlagPrefixSumOMP: special case of InclusivePrefixSumOMP */ template<typename H, typename T, typename F> void FlagPrefixSumOMP(vector<H> &histogram, T *output, size_t size, F f) { InclusivePrefixSumOMP(histogram, output, size, [&f](size_t it) { return f(it) ? 1 : 0; }); } /* * SelectNotFOMP: selection primitive * !f(it) returns selected */ template<typename H, typename T, typename OFF, typename F> void SelectNotFOMP(vector<H> &histogram, T *output, T *input, OFF *relative_off, size_t size, F f) { FlagPrefixSumOMP(histogram, relative_off, size, f); #pragma omp for for (size_t i = 0u; i < size; i++) { if (!(f(i))) { auto off = i - relative_off[i]; output[off] = input[i]; } } } template<typename OFF, typename F> void Histogram(size_t size, OFF *&bucket_ptrs, int32_t num_buckets, F f, Timer *timer = nullptr) { // Histogram. auto local_buf = (uint8_t *) calloc(num_buckets, sizeof(uint8_t)); #pragma omp for for (size_t i = 0u; i < size; i++) { auto src = f(i); local_buf[src]++; if (local_buf[src] == 0xff) { __sync_fetch_and_add(&bucket_ptrs[src], 0xff); local_buf[src] = 0; } } #pragma omp single if (timer != nullptr)log_info("[%s]: Local Comp Time: %.9lfs", __FUNCTION__, timer->elapsed()); for (size_t i = 0; i < num_buckets; i++) { if (local_buf[i] != 0) { __sync_fetch_and_add(&bucket_ptrs[i], local_buf[i]); } } #pragma omp barrier free(local_buf); } template<typename OFF, typename F> void HistogramAtomic(size_t size, OFF *&bucket_ptrs, int32_t num_buckets, F f) { // Histogram. #pragma omp for for (size_t i = 0u; i < size; i++) { auto src = f(i); __sync_fetch_and_add(&bucket_ptrs[src], 1); } } /* * Require an output array, * f: is the property for the bucket ID, given an index on the input array * Inefficient when there are lots of contentions because of atomic operations */ template<typename H, typename T, typename OFF, typename F> void BucketSort(vector<H> &histogram, T *&input, T *&output, OFF *&cur_write_off, OFF *&bucket_ptrs, size_t size, int32_t num_buckets, F f, Timer *timer = nullptr) { // Populate. #pragma omp single { bucket_ptrs = (OFF *) malloc(sizeof(OFF) * (num_buckets + 1)); cur_write_off = (OFF *) malloc(sizeof(OFF) * (num_buckets + 1)); cur_write_off[0] = 0; } MemSetOMP(bucket_ptrs, 0, num_buckets + 1); Histogram(size, bucket_ptrs, num_buckets, f, timer); // HistogramAtomic(size, bucket_ptrs, num_buckets, f); #pragma omp single if (timer != nullptr)log_info("[%s]: Histogram, Time: %.9lfs", __FUNCTION__, timer->elapsed()); InclusivePrefixSumOMP(histogram, cur_write_off + 1, num_buckets, [&bucket_ptrs](uint32_t it) { return bucket_ptrs[it]; }); MemCpyOMP(bucket_ptrs, cur_write_off, num_buckets + 1); #pragma omp single { if (timer != nullptr)log_info("[%s]: Before Scatter, Time: %.9lfs", __FUNCTION__, timer->elapsed()); } // Scatter. #pragma omp for for (size_t i = 0u; i < size; i++) { auto element = input[i]; auto bucket_id = f(i); auto old_offset = __sync_fetch_and_add(&(cur_write_off[bucket_id]), 1); output[old_offset] = element; } #pragma omp single { if (timer != nullptr)log_info("[%s]: Before Sort, Time: %.9lfs", __FUNCTION__, timer->elapsed()); } #pragma omp barrier } template<typename H, typename T, typename OFF, typename F> void BucketSortSmallBuckets(vector<H> &histogram, T *&input, T *&output, OFF *&cur_write_off, OFF *&bucket_ptrs, size_t size, int32_t num_buckets, F f, Timer *timer = nullptr) { using BufT= LocalWriteBuffer<T, uint32_t>; auto cap = max<int>(CACHE_LINE_ENTRY, LOCAL_BUDGET / num_buckets / sizeof(T)); auto bucket_write_buffers = (BufT *) malloc(num_buckets * sizeof(BufT)); auto bucket_buffers = (T *) malloc(cap * num_buckets * sizeof(T)); // Populate. #pragma omp single { int max_omp_threads = omp_get_num_threads(); log_info("[%s]: Mem Size Buckets: %zu, Bucket#: %d", __FUNCTION__, cap * num_buckets * sizeof(T) * max_omp_threads, num_buckets); bucket_ptrs = (uint32_t *) malloc(sizeof(OFF) * (num_buckets + 1)); cur_write_off = (uint32_t *) malloc(sizeof(OFF) * (num_buckets + 1)); cur_write_off[0] = 0; } MemSetOMP(bucket_ptrs, 0, num_buckets + 1); Histogram(size, bucket_ptrs, num_buckets, f); #pragma omp barrier InclusivePrefixSumOMP(histogram, cur_write_off + 1, num_buckets, [&bucket_ptrs](uint32_t it) { return bucket_ptrs[it]; }); MemCpyOMP(bucket_ptrs, cur_write_off, num_buckets + 1); #pragma omp single { if (timer != nullptr)log_info("[%s]: Before Scatter, Time: %.9lfs", __FUNCTION__, timer->elapsed()); } for (auto i = 0; i < num_buckets; i++) { bucket_write_buffers[i] = BufT(bucket_buffers + cap * i, cap, output, &cur_write_off[i]); } #pragma omp barrier // Scatter. #pragma omp for for (size_t i = 0u; i < size; i++) { auto element = input[i]; auto bucket_id = f(i); bucket_write_buffers[bucket_id].push(element); } for (auto i = 0; i < num_buckets; i++) { bucket_write_buffers[i].submit_if_possible(); } #pragma omp barrier #pragma omp single { if (timer != nullptr)log_info("[%s]: Before Sort, Time: %.9lfs", __FUNCTION__, timer->elapsed()); } free(bucket_buffers); free(bucket_write_buffers); #pragma omp barrier }
GB_AxB_dot2_nomask.c
//------------------------------------------------------------------------------ // GB_AxB_dot2_nomask: C=A'*B via dot products //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ { int ntasks = naslice * nbslice ; int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (taskid = 0 ; taskid < ntasks ; taskid++) { int a_taskid = taskid / nbslice ; int b_taskid = taskid % nbslice ; //---------------------------------------------------------------------- // get A //---------------------------------------------------------------------- GrB_Matrix A = Aslice [a_taskid] ; const int64_t *GB_RESTRICT Ai = A->i ; #if defined ( GB_PHASE_1_OF_2 ) int64_t *GB_RESTRICT C_count = C_counts [a_taskid] ; #else int64_t *GB_RESTRICT C_count_start = (a_taskid == 0) ? NULL : C_counts [a_taskid] ; int64_t *GB_RESTRICT C_count_end = (a_taskid == naslice-1) ? NULL : C_counts [a_taskid+1] ; const GB_ATYPE *GB_RESTRICT Ax = A_is_pattern ? NULL : A->x ; #endif //---------------------------------------------------------------------- // C=A'*B via dot products //---------------------------------------------------------------------- for (int64_t Iter_k = B_slice [b_taskid] ; Iter_k < B_slice [b_taskid+1] ; Iter_k++) { //------------------------------------------------------------------ // get B(:,j) //------------------------------------------------------------------ GBI_jth_iteration_with_iter (Iter, j, pB_start, pB_end) ; int64_t bjnz = pB_end - pB_start ; // no work to do if B(:,j) is empty if (bjnz == 0) continue ; //------------------------------------------------------------------ // phase 1 of 2: skip if B(:,j) is dense //------------------------------------------------------------------ #if defined ( GB_PHASE_1_OF_2 ) if (bjnz == bvlen) { // C(i,j) is if A(:i) not empty C_count [Iter_k] = A->nvec_nonempty ; continue ; } #endif //------------------------------------------------------------------ // phase 2 of 2: get the range of entries in C(:,j) to compute //------------------------------------------------------------------ #if defined ( GB_PHASE_2_OF_2 ) // this thread computes Ci and Cx [cnz:cnz_last] int64_t cnz = Cp [Iter_k] + ((C_count_start == NULL) ? 0 : C_count_start [Iter_k]) ; int64_t cnz_last = (C_count_end == NULL) ? (Cp [Iter_k+1] - 1) : (Cp [Iter_k] + C_count_end [Iter_k] - 1) ; if (cnz > cnz_last) continue ; #endif //------------------------------------------------------------------ // C(:,j) = A'*B(:,j) //------------------------------------------------------------------ // get the first and last index in B(:,j) int64_t ib_first = Bi [pB_start] ; int64_t ib_last = Bi [pB_end-1] ; // for each vector A(:,i): GBI_for_each_vector_with_iter (Iter_A, A) { GBI_jth_iteration_with_iter (Iter_A, i, pA, pA_end) ; // C(i,j) = A(:,i)'*B(:,j) #include "GB_AxB_dot_cij.c" } } } }
feature_group.h
/*! * Copyright (c) 2017 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_FEATURE_GROUP_H_ #define LIGHTGBM_FEATURE_GROUP_H_ #include <LightGBM/bin.h> #include <LightGBM/meta.h> #include <LightGBM/utils/random.h> #include <cstdio> #include <memory> #include <vector> namespace LightGBM { class Dataset; class DatasetLoader; /*! \brief Using to store data and providing some operations on one feature group*/ class FeatureGroup { public: friend Dataset; friend DatasetLoader; /*! * \brief Constructor * \param num_feature number of features of this group * \param bin_mappers Bin mapper for features * \param num_data Total number of data * \param is_enable_sparse True if enable sparse feature */ FeatureGroup(int num_feature, bool is_multi_val, std::vector<std::unique_ptr<BinMapper>>* bin_mappers, data_size_t num_data) : num_feature_(num_feature), is_multi_val_(is_multi_val), is_sparse_(false) { CHECK_EQ(static_cast<int>(bin_mappers->size()), num_feature); // use bin at zero to store most_freq_bin num_total_bin_ = 1; bin_offsets_.emplace_back(num_total_bin_); auto& ref_bin_mappers = *bin_mappers; for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(ref_bin_mappers[i].release()); auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= 1; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); } CreateBinData(num_data, is_multi_val_, true, false); } FeatureGroup(const FeatureGroup& other, int num_data) { num_feature_ = other.num_feature_; is_multi_val_ = other.is_multi_val_; is_sparse_ = other.is_sparse_; num_total_bin_ = other.num_total_bin_; bin_offsets_ = other.bin_offsets_; bin_mappers_.reserve(other.bin_mappers_.size()); for (auto& bin_mapper : other.bin_mappers_) { bin_mappers_.emplace_back(new BinMapper(*bin_mapper)); } CreateBinData(num_data, is_multi_val_, !is_sparse_, is_sparse_); } FeatureGroup(std::vector<std::unique_ptr<BinMapper>>* bin_mappers, data_size_t num_data) : num_feature_(1), is_multi_val_(false) { CHECK_EQ(static_cast<int>(bin_mappers->size()), 1); // use bin at zero to store default_bin num_total_bin_ = 1; bin_offsets_.emplace_back(num_total_bin_); auto& ref_bin_mappers = *bin_mappers; for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(ref_bin_mappers[i].release()); auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= 1; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); } CreateBinData(num_data, false, false, false); } /*! * \brief Constructor from memory * \param memory Pointer of memory * \param num_all_data Number of global data * \param local_used_indices Local used indices, empty means using all data */ FeatureGroup(const void* memory, data_size_t num_all_data, const std::vector<data_size_t>& local_used_indices) { const char* memory_ptr = reinterpret_cast<const char*>(memory); // get is_sparse is_multi_val_ = *(reinterpret_cast<const bool*>(memory_ptr)); memory_ptr += sizeof(is_multi_val_); is_sparse_ = *(reinterpret_cast<const bool*>(memory_ptr)); memory_ptr += sizeof(is_sparse_); num_feature_ = *(reinterpret_cast<const int*>(memory_ptr)); memory_ptr += sizeof(num_feature_); // get bin mapper bin_mappers_.clear(); bin_offsets_.clear(); // start from 1, due to need to store zero bin in this slot num_total_bin_ = 1; bin_offsets_.emplace_back(num_total_bin_); for (int i = 0; i < num_feature_; ++i) { bin_mappers_.emplace_back(new BinMapper(memory_ptr)); auto num_bin = bin_mappers_[i]->num_bin(); if (bin_mappers_[i]->GetMostFreqBin() == 0) { num_bin -= 1; } num_total_bin_ += num_bin; bin_offsets_.emplace_back(num_total_bin_); memory_ptr += bin_mappers_[i]->SizesInByte(); } data_size_t num_data = num_all_data; if (!local_used_indices.empty()) { num_data = static_cast<data_size_t>(local_used_indices.size()); } if (is_multi_val_) { for (int i = 0; i < num_feature_; ++i) { int addi = bin_mappers_[i]->GetMostFreqBin() == 0 ? 0 : 1; if (bin_mappers_[i]->sparse_rate() >= kSparseThreshold) { multi_bin_data_.emplace_back(Bin::CreateSparseBin(num_data, bin_mappers_[i]->num_bin() + addi)); } else { multi_bin_data_.emplace_back(Bin::CreateDenseBin(num_data, bin_mappers_[i]->num_bin() + addi)); } multi_bin_data_.back()->LoadFromMemory(memory_ptr, local_used_indices); memory_ptr += multi_bin_data_.back()->SizesInByte(); } } else { if (is_sparse_) { bin_data_.reset(Bin::CreateSparseBin(num_data, num_total_bin_)); } else { bin_data_.reset(Bin::CreateDenseBin(num_data, num_total_bin_)); } // get bin data bin_data_->LoadFromMemory(memory_ptr, local_used_indices); } } /*! \brief Destructor */ ~FeatureGroup() { } /*! * \brief Push one record, will auto convert to bin and push to bin data * \param tid Thread id * \param idx Index of record * \param value feature value of record */ inline void PushData(int tid, int sub_feature_idx, data_size_t line_idx, double value) { uint32_t bin = bin_mappers_[sub_feature_idx]->ValueToBin(value); if (bin == bin_mappers_[sub_feature_idx]->GetMostFreqBin()) { return; } if (bin_mappers_[sub_feature_idx]->GetMostFreqBin() == 0) { bin -= 1; } if (is_multi_val_) { multi_bin_data_[sub_feature_idx]->Push(tid, line_idx, bin + 1); } else { bin += bin_offsets_[sub_feature_idx]; bin_data_->Push(tid, line_idx, bin); } } void ReSize(int num_data) { if (!is_multi_val_) { bin_data_->ReSize(num_data); } else { for (int i = 0; i < num_feature_; ++i) { multi_bin_data_[i]->ReSize(num_data); } } } inline void CopySubrow(const FeatureGroup* full_feature, const data_size_t* used_indices, data_size_t num_used_indices) { if (!is_multi_val_) { bin_data_->CopySubrow(full_feature->bin_data_.get(), used_indices, num_used_indices); } else { for (int i = 0; i < num_feature_; ++i) { multi_bin_data_[i]->CopySubrow(full_feature->multi_bin_data_[i].get(), used_indices, num_used_indices); } } } inline BinIterator* SubFeatureIterator(int sub_feature) { uint32_t most_freq_bin = bin_mappers_[sub_feature]->GetMostFreqBin(); if (!is_multi_val_) { uint32_t min_bin = bin_offsets_[sub_feature]; uint32_t max_bin = bin_offsets_[sub_feature + 1] - 1; return bin_data_->GetIterator(min_bin, max_bin, most_freq_bin); } else { int addi = bin_mappers_[sub_feature]->GetMostFreqBin() == 0 ? 0 : 1; uint32_t min_bin = 1; uint32_t max_bin = bin_mappers_[sub_feature]->num_bin() - 1 + addi; return multi_bin_data_[sub_feature]->GetIterator(min_bin, max_bin, most_freq_bin); } } inline void FinishLoad() { if (is_multi_val_) { OMP_INIT_EX(); #pragma omp parallel for schedule(guided) for (int i = 0; i < num_feature_; ++i) { OMP_LOOP_EX_BEGIN(); multi_bin_data_[i]->FinishLoad(); OMP_LOOP_EX_END(); } OMP_THROW_EX(); } else { bin_data_->FinishLoad(); } } /*! * \brief Returns a BinIterator that can access the entire feature group's raw data. * The RawGet() function of the iterator should be called for best efficiency. * \return A pointer to the BinIterator object */ inline BinIterator* FeatureGroupIterator() { if (is_multi_val_) { return nullptr; } uint32_t min_bin = bin_offsets_[0]; uint32_t max_bin = bin_offsets_.back() - 1; uint32_t most_freq_bin = 0; return bin_data_->GetIterator(min_bin, max_bin, most_freq_bin); } inline data_size_t Split(int sub_feature, const uint32_t* threshold, int num_threshold, bool default_left, const data_size_t* data_indices, data_size_t cnt, data_size_t* lte_indices, data_size_t* gt_indices) const { uint32_t default_bin = bin_mappers_[sub_feature]->GetDefaultBin(); uint32_t most_freq_bin = bin_mappers_[sub_feature]->GetMostFreqBin(); if (!is_multi_val_) { uint32_t min_bin = bin_offsets_[sub_feature]; uint32_t max_bin = bin_offsets_[sub_feature + 1] - 1; if (bin_mappers_[sub_feature]->bin_type() == BinType::NumericalBin) { auto missing_type = bin_mappers_[sub_feature]->missing_type(); if (num_feature_ == 1) { return bin_data_->Split(max_bin, default_bin, most_freq_bin, missing_type, default_left, *threshold, data_indices, cnt, lte_indices, gt_indices); } else { return bin_data_->Split(min_bin, max_bin, default_bin, most_freq_bin, missing_type, default_left, *threshold, data_indices, cnt, lte_indices, gt_indices); } } else { if (num_feature_ == 1) { return bin_data_->SplitCategorical(max_bin, most_freq_bin, threshold, num_threshold, data_indices, cnt, lte_indices, gt_indices); } else { return bin_data_->SplitCategorical( min_bin, max_bin, most_freq_bin, threshold, num_threshold, data_indices, cnt, lte_indices, gt_indices); } } } else { int addi = bin_mappers_[sub_feature]->GetMostFreqBin() == 0 ? 0 : 1; uint32_t max_bin = bin_mappers_[sub_feature]->num_bin() - 1 + addi; if (bin_mappers_[sub_feature]->bin_type() == BinType::NumericalBin) { auto missing_type = bin_mappers_[sub_feature]->missing_type(); return multi_bin_data_[sub_feature]->Split( max_bin, default_bin, most_freq_bin, missing_type, default_left, *threshold, data_indices, cnt, lte_indices, gt_indices); } else { return multi_bin_data_[sub_feature]->SplitCategorical( max_bin, most_freq_bin, threshold, num_threshold, data_indices, cnt, lte_indices, gt_indices); } } } /*! * \brief From bin to feature value * \param bin * \return FeatureGroup value of this bin */ inline double BinToValue(int sub_feature_idx, uint32_t bin) const { return bin_mappers_[sub_feature_idx]->BinToValue(bin); } /*! * \brief Save binary data to file * \param file File want to write */ void SaveBinaryToFile(const VirtualFileWriter* writer) const { writer->Write(&is_multi_val_, sizeof(is_multi_val_)); writer->Write(&is_sparse_, sizeof(is_sparse_)); writer->Write(&num_feature_, sizeof(num_feature_)); for (int i = 0; i < num_feature_; ++i) { bin_mappers_[i]->SaveBinaryToFile(writer); } if (is_multi_val_) { for (int i = 0; i < num_feature_; ++i) { multi_bin_data_[i]->SaveBinaryToFile(writer); } } else { bin_data_->SaveBinaryToFile(writer); } } /*! * \brief Get sizes in byte of this object */ size_t SizesInByte() const { size_t ret = sizeof(is_multi_val_) + sizeof(is_sparse_) + sizeof(num_feature_); for (int i = 0; i < num_feature_; ++i) { ret += bin_mappers_[i]->SizesInByte(); } if (!is_multi_val_) { ret += bin_data_->SizesInByte(); } else { for (int i = 0; i < num_feature_; ++i) { ret += multi_bin_data_[i]->SizesInByte(); } } return ret; } /*! \brief Disable copy */ FeatureGroup& operator=(const FeatureGroup&) = delete; /*! \brief Deep copy */ FeatureGroup(const FeatureGroup& other) { num_feature_ = other.num_feature_; is_multi_val_ = other.is_multi_val_; is_sparse_ = other.is_sparse_; num_total_bin_ = other.num_total_bin_; bin_offsets_ = other.bin_offsets_; bin_mappers_.reserve(other.bin_mappers_.size()); for (auto& bin_mapper : other.bin_mappers_) { bin_mappers_.emplace_back(new BinMapper(*bin_mapper)); } if (!is_multi_val_) { bin_data_.reset(other.bin_data_->Clone()); } else { multi_bin_data_.clear(); for (int i = 0; i < num_feature_; ++i) { multi_bin_data_.emplace_back(other.multi_bin_data_[i]->Clone()); } } } private: void CreateBinData(int num_data, bool is_multi_val, bool force_dense, bool force_sparse) { if (is_multi_val) { multi_bin_data_.clear(); for (int i = 0; i < num_feature_; ++i) { int addi = bin_mappers_[i]->GetMostFreqBin() == 0 ? 0 : 1; if (bin_mappers_[i]->sparse_rate() >= kSparseThreshold) { multi_bin_data_.emplace_back(Bin::CreateSparseBin( num_data, bin_mappers_[i]->num_bin() + addi)); } else { multi_bin_data_.emplace_back( Bin::CreateDenseBin(num_data, bin_mappers_[i]->num_bin() + addi)); } } is_multi_val_ = true; } else { if (force_sparse || (!force_dense && num_feature_ == 1 && bin_mappers_[0]->sparse_rate() >= kSparseThreshold)) { is_sparse_ = true; bin_data_.reset(Bin::CreateSparseBin(num_data, num_total_bin_)); } else { is_sparse_ = false; bin_data_.reset(Bin::CreateDenseBin(num_data, num_total_bin_)); } is_multi_val_ = false; } } /*! \brief Number of features */ int num_feature_; /*! \brief Bin mapper for sub features */ std::vector<std::unique_ptr<BinMapper>> bin_mappers_; /*! \brief Bin offsets for sub features */ std::vector<uint32_t> bin_offsets_; /*! \brief Bin data of this feature */ std::unique_ptr<Bin> bin_data_; std::vector<std::unique_ptr<Bin>> multi_bin_data_; /*! \brief True if this feature is sparse */ bool is_multi_val_; bool is_sparse_; int num_total_bin_; }; } // namespace LightGBM #endif // LIGHTGBM_FEATURE_GROUP_H_
GB_unop__minv_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__minv_fc32_fc32) // op(A') function: GB (_unop_tran__minv_fc32_fc32) // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_FC32_minv (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_FC32_minv (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_FC32_minv (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__minv_fc32_fc32) ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_FC32_minv (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_FC32_minv (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__minv_fc32_fc32) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pyfr_gemm_rm.c
/****************************************************************************** * Copyright (c) Intel Corporation - All rights reserved. * * This file is part of the LIBXSMM library. * * * * For information on the license, see the LICENSE file. * * Further information: https://github.com/hfp/libxsmm/ * * SPDX-License-Identifier: BSD-3-Clause * ******************************************************************************/ /* Alexander Heinecke (Intel Corp.) ******************************************************************************/ #include <libxsmm.h> #include <stdlib.h> #include <assert.h> #include <stdio.h> #if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL) # include <mkl.h> #else /* prototypes for GEMM */ void my_dgemm( const int* M, const int* N, const int* K, const double* alpha, const double* a, const int* LDA, const double* b, const int* LDB, const double* beta, double* c, const int* LDC ) { const int my_M = *M; const int my_N = *N; const int my_K = *K; const int my_LDA = *LDA; const int my_LDB = *LDB; const int my_LDC = *LDC; const float my_alpha = *alpha; const float my_beta = *beta; int m = 0, n = 0, k = 0; for ( n = 0; n < my_N; ++n ) { for ( m = 0; m < my_M; ++m ) { c[(n * my_LDC) + m] = my_beta * c[(n * my_LDC) + m]; for ( k = 0; k < my_K; ++k ) { c[(n * my_LDC) + m] += my_alpha * a[(k * my_LDA) + m] * b[(n * my_LDB) + k]; } } } } #endif int main(int argc, char *argv[]) { int n,m,k; int lda,ldb,ldc; double* a; double* b; double* c1; double* c2; libxsmm_timer_tickint l_start, l_end; double l_total = 0.0; int reps, i, j; const int nblock = 16; double alpha = 1.0, beta = 1.0; #if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL) char transa = 'N', transb = 'N'; #endif int l_prefetch_op = LIBXSMM_PREFETCH_NONE; libxsmm_dmmfunction kernel = NULL; if (argc != 5) { assert(0 < argc); fprintf(stderr, "Invalid: try %s M N K reps\n", argv[0]); exit(-1); } m = atoi(argv[1]); n = atoi(argv[2]); k = atoi(argv[3]); reps = atoi(argv[4]); /* this is col-major what you want to use for the sizes in question */ lda = k; ldb = n; ldc = n; if (n % nblock != 0) { fprintf(stderr, "N needs to be divisible by %i\n", nblock); exit(-1); } a = (double*)libxsmm_aligned_malloc(sizeof(double)*lda*m, 64); b = (double*)libxsmm_aligned_malloc(sizeof(double)*ldb*k, 64); c1 = (double*)libxsmm_aligned_malloc(sizeof(double)*ldc*m, 64); c2 = (double*)libxsmm_aligned_malloc(sizeof(double)*ldc*m, 64); #pragma omp parallel for for (i = 0; i < lda*m; i++) { a[i] = libxsmm_rng_f64(); } #pragma omp parallel for for (i = 0; i < ldb*k; i++) { b[i] = libxsmm_rng_f64(); } #pragma omp parallel for for (i = 0; i < ldc*m; i++) { c1[i] = 0; c2[i] = 0; } /* JIT Kernel */ kernel = libxsmm_dmmdispatch(nblock, m, k, &ldb, &lda, &ldc, NULL, NULL, NULL, &l_prefetch_op ); if (kernel == 0) { printf("JIT failed, exiting\n"); exit(-1); } /* init MKL */ #if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL) dgemm(&transb, &transa, &n, &m, &k, &alpha, b, &ldb, a, &lda, &beta, c1, &ldc); #else my_dgemm(&n, &m, &k, &alpha, b, &ldb, a, &lda, &beta, c1, &ldc); #endif #pragma omp parallel for for (i = 0; i < ldc*m; i++) { c1[i] = 0; c2[i] = 0; } l_start = libxsmm_timer_tick(); for ( j = 0; j < reps; j++ ) { #if defined(__MKL) || defined(MKL_DIRECT_CALL_SEQ) || defined(MKL_DIRECT_CALL) dgemm(&transb, &transa, &n, &m, &k, &alpha, b, &ldb, a, &lda, &beta, c1, &ldc); #else my_dgemm(&n, &m, &k, &alpha, b, &ldb, a, &lda, &beta, c1, &ldc); #endif } l_end = libxsmm_timer_tick(); l_total = libxsmm_timer_duration(l_start, l_end); fprintf(stdout, "time[s] MKL (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps ); fprintf(stdout, "GFLOPS MKL (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s MKL (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total ); l_start = libxsmm_timer_tick(); for ( j = 0; j < reps; j++ ) { #pragma omp parallel for private(i) for ( i = 0; i < n; i+=nblock) { kernel( b+i, a, c2+i, NULL, NULL, NULL ); } l_end = libxsmm_timer_tick(); } l_total = libxsmm_timer_duration(l_start, l_end); fprintf(stdout, "time[s] libxsmm (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, l_total/(double)reps ); fprintf(stdout, "GFLOPS libxsmm (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, (2.0 * (double)m * (double)n * (double)k * (double)reps * 1.0e-9) / l_total ); fprintf(stdout, "GB/s libxsmm (RM, M=%i, N=%i, K=%i): %f\n", m, n, k, ((double)sizeof(double) * (((double)m * (double)n) + ((double)k * (double)n)) * (double)reps * 1.0e-9) / l_total ); /* test result */ double max_error = 0.0; for ( i = 0; i < ldc*m; i++) { if (max_error < fabs(c1[i] - c2[i])) { max_error = fabs(c1[i] - c2[i]); } } printf("max error: %f\n\n", max_error); return EXIT_SUCCESS; }
mk_id_list.c
/* gcc -fopenmp -lm -lgsl -lgslcblas -lgad -L ./ mk_id_list.c -o ~/bin/mk_id_list OctTree.o */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <time.h> #include <string.h> #include "libgad.h" #include "OctTree.h" #define USE 63 // #ifdef LONGIDS // typedef unsigned long long IDtype; // #else // typedef unsigned int IDtype; // #endif int cmp_IDtype (const void *first, const void *second) { IDtype *a = (IDtype *)first; IDtype *b = (IDtype *)second; if (*a > *b) return 1; else if (*a < *b) return -1; else return 0; } const int MAX_HALO_ID = 100000; const float EXTEND = 500; const float TRACE_FACTOR = 2.; const float SEARCHDIST = 25; const float MAXDIST = 3000.; const float SOFTENING = 1.0; void usage() { fprintf(stderr," Make ID list - reads a list of IDs, finds center of corresponding particles in snapshot, adds particles to list that are inside trace_factor * R200 and overwrites ID list\n"); fprintf(stderr,"\t-o \t<ID list base file name>\n"); fprintf(stderr,"\t-i \t<snaphsot file name>\n"); fprintf(stderr,"\t-tf \t<trace_factor>\n"); fprintf(stderr,"\t-max\t<max Halo ID>\n"); fprintf(stderr,"\t-sd \t<search distance>\n"); fprintf(stderr,"\t-md \t<max distance from cm>\n"); fprintf(stderr,"\t-cd \t<convert distances (factor needed to get to kpc, for calculating the critical density>\n"); fprintf(stderr,"\t-c \t<write basic properties to catalogue file>\n"); fprintf(stderr,"\t-pos\t<write position file (to use with MUSIC)>\n"); fprintf(stderr,"\t-use\t<bitcode particle types to use (default 2¹)>\n\n"); exit(1); } int main (int argc, char *argv[]) { FILE *fp; char infile[256]; char outbase[256]; char catname[256]; char **output; int i,j,k, usepart; struct gadpart *part, *wpart; struct header head; int max_halo_id = MAX_HALO_ID; float extend = EXTEND; float trace_factor = TRACE_FACTOR; int verbose = 0; float searchdist = SEARCHDIST; float def_maxdist = MAXDIST; double conv_dist = 1.; int start_id = 0; int num_halos = 0; int write_catalogue = 0; int write_gad_file = 0; int outpos = 0; double soft = SOFTENING; strcpy(outbase,"idlist"); i=1; usepart=USE; if (1==argc) usage(); while (i<argc) { if (!strcmp(argv[i],"-i")) { i++; strcpy(infile,argv[i]); i++; } else if (*argv[i]!='-') { strcpy(infile,argv[i]); i++; } else if (!strcmp(argv[i],"-o")) { i++; strcpy(outbase,argv[i]); i++; } else if (!strcmp(argv[i],"-c")) { i++; strcpy(catname,argv[i]); write_catalogue = 1; i++; } else if (!strcmp(argv[i],"-gad")) { i++; write_gad_file = 1; } else if (!strcmp(argv[i],"-pos")) { i++; outpos = 1; } else if (!strcmp(argv[i],"-v")) { i++; verbose = 1; } else if (!strcmp(argv[i],"-s")) { i++; start_id = atoi(argv[i]); i++; } else if (!strcmp(argv[i],"-max")) { i++; max_halo_id = atoi(argv[i]); i++; } else if (!strcmp(argv[i],"-e")) { i++; extend = atof(argv[i]); i++; } else if (!strcmp(argv[i],"-sfl")) { i++; soft = atof(argv[i]); i++; } else if (!strcmp(argv[i],"-md")) { i++; def_maxdist = atof(argv[i]); i++; } else if (!strcmp(argv[i],"-cd")) { i++; conv_dist = atof(argv[i]); i++; } else if (!strcmp(argv[i],"-tf")) { i++; trace_factor = atof(argv[i]); i++; } else if (!strcmp(argv[i],"-sd")) { i++; searchdist = atof(argv[i]); i++; } else if (!strcmp(argv[i],"-use")) { i++; if (!strcmp(argv[i],"all")) usepart=63; else usepart=atoi(argv[i]); i++; } else { usage(); } } output = (char**) malloc (sizeof(char*) * (max_halo_id+1)); for (i = 0; i < (max_halo_id+1); i++ ) output[i] = (char*) malloc(sizeof(char) * 256); if (verbose) { printf("reading snapshot\n"); fflush(stdout); } unsigned int numpart_all; if (!(numpart_all=readgadget_part(infile, &head, &part))) { extern int libgaderr; printf("error reading file %s\nError Code %d\n",infile, libgaderr); exit(1); } extern float BOXSIZE; BOXSIZE = head.boxsize; if (verbose) { printf("building OctTree\n"); fflush(stdout); } OctNode *onode; set_periodic_boundaries(head); extern double crit_dens; crit_dens *= pow(conv_dist,3); buildTreeBox(&onode, part, head, 200, 15); if (verbose) printf("numpart %d \nchecktree %d\n", numpart_all, checkOctTree(onode)); /********************************************************************* Program code goes here *********************************************************************/ if (verbose) { printf("main loop...\n"); fflush(stdout); } int haloid; #pragma omp parallel for private (i,j,k) reduction (+ : num_halos) for ( haloid = start_id; haloid <= max_halo_id; haloid++ ) { char idlistname[128]; sprintf(idlistname, "%s_%d", outbase, haloid); char posfilename[128]; sprintf(posfilename, "%s_positions_%d", outbase, haloid); FILE *fp = fopen(idlistname, "rb"); if (fp == NULL) { sprintf(output[haloid], ""); continue; } num_halos++; int numids=0; fltarr center; IDtype *idlist; fltarr *pos = NULL; float maxdist = 0; fread(&numids, sizeof(int), 1, fp); fread(center, sizeof(float), 3, fp); if (verbose) { printf("haloid %d | numids %d | center %g %g %g\n", haloid, numids, center[0], center[1], center[2]); fflush(stdout); } if (numids) { fread(&maxdist, sizeof(float), 1, fp); idlist = calloc(numids, sizeof(IDtype)); fread(&idlist[0], sizeof(IDtype), numids, fp); qsort(idlist, numids, sizeof(IDtype), cmp_IDtype); if (outpos) pos = (fltarr*) calloc(numids, sizeof(fltarr)); } if (maxdist == 0) maxdist = def_maxdist; fclose(fp); gadpart **part_pnt; unsigned int npart=0; unsigned int bsize=0; findParticles( onode, center, (maxdist + extend), &part_pnt, &npart, &bsize); if (verbose) { printf("haloid %d | center %g %g %g | npart %d | maxdist + extend %g\n", haloid, center[0], center[1], center[2], npart, maxdist+extend); // float mindist = distance(part_pnt[0]->pos, center); // for ( i = 0; i < npart; i++ ) // { // float dum = distance(part_pnt[i]->pos, center); // if ( dum < mindist ) mindist = dum; // } // printf("mindist %g\n", mindist); fflush(stdout); } // if (verbose) // { // for ( i = 0; i < 200000; i+=10000 ) // printf("%lu\n", part_pnt[i]->id); // } if (numids) { gadpart_dist *wpart = (gadpart_dist *) malloc (sizeof(gadpart_dist) * numids); if (wpart==NULL) { fprintf(stderr, "unable to allocate memory\n"); exit(1); } int numfnd = 0; for ( i = 0; i < npart; i++ ) { IDtype *fnd; IDtype id_key = part_pnt[i] -> id; fnd = bsearch( &id_key, idlist, numids, sizeof(IDtype), cmp_IDtype); if (fnd != NULL) { if (outpos) { for ( j = 0; j < 3; j++) pos[numfnd][j] = part_pnt[i]->pos[j] / head.boxsize; } wpart[numfnd++].part = *part_pnt[i]; } if (numfnd >= numids) break; } if (verbose) { printf("haloid %d | numfnd %d\n", haloid, numfnd); fflush(stdout); } pcenter(wpart, numfnd, maxdist, center, usepart); free(wpart); } gadpart_dist *wpart = (gadpart_dist *) malloc (sizeof(gadpart_dist) * npart); gadpart *outpart; if (write_gad_file) outpart = (gadpart *) malloc (sizeof(gadpart) * npart); for ( i = 0; i < npart; i++ ) { wpart[i].part = *part_pnt[i]; if (write_gad_file) outpart[i] = *part_pnt[i]; } free(part_pnt); pcenter(wpart, npart, searchdist, center, usepart); if (verbose) { printf("haloid %d | center %g %g %g\n", haloid, center[0], center[1], center[2]); if (write_gad_file) { struct header outhead = head; outhead.npart[1]=npart; outhead.nall[1]=npart; char gadfilename[128]; sprintf(gadfilename,"halo_%d.dat", haloid); writegadget_part(gadfilename, outhead, outpart); } } if (write_gad_file) free(outpart); int vcnt = 0; double mvir = 0; qsort(wpart, npart, sizeof(gadpart_dist), cmp_dist); double rvir = r200(wpart, npart, 200, head, &vcnt, &mvir); if (verbose) { printf("haloid %d | vcnt %d | rvir %g\n", haloid, vcnt, rvir); printf("haloid %d | center %g %g %g\n", haloid, center[0], center[1], center[2]); } if ((write_catalogue) && (rvir > 0)) { double nfw_c; double par[2]; par[0]=0.005; par[1]= 20. / conv_dist; double rcs; nfw_c = nfwfit(par, wpart, npart, rvir, soft, &rcs); double xoff = xoffset(wpart, npart, rvir, center); sprintf(output[haloid], "%8d\t%8g\t%8g\t%8g\t%8g\t%8g\t%8g\t%8g\t%8g\n", haloid, mvir, rvir, nfw_c, rcs, xoff, center[0], center[1], center[2]); } else { rvir=0; sprintf(output[haloid], "%8d\t%8g\t%8g\t%8g\t%8g\t%8g\t%8g\t%8g\t%8g\n", haloid, rvir, rvir, rvir, rvir, rvir, center[0], center[1], center[2]); } double dist = 0; i = 0; int num_new_ids = 0; fltarr *newpos = NULL; if (outpos) newpos = (fltarr*) calloc (npart, sizeof(IDtype)); IDtype *newids = (IDtype*) calloc (npart, sizeof(IDtype)); while (dist < (trace_factor * rvir)) { dist = wpart[i].dist; IDtype *fnd; IDtype id_key = wpart[i].part.id; fnd = bsearch( &id_key, idlist, numids, sizeof(IDtype), cmp_IDtype); if (fnd == NULL) { if (outpos) { for (j = 0; j < 3; j++) newpos[num_new_ids][j] = wpart[i].part.pos[j] / head.boxsize; } newids[num_new_ids++] = id_key; } i++; if (i == npart) break; } free(wpart); maxdist = dist; if (verbose) printf("haloid %d |#%d particles added | maxdist: %g \n", haloid, num_new_ids, maxdist); fp = fopen(idlistname, "w"); int totnumids = numids + num_new_ids; fwrite(&totnumids, sizeof(int), 1, fp); fwrite(center, sizeof(float), 3, fp); fwrite(&maxdist, sizeof(float), 1, fp); if (numids) fwrite(&idlist[0], sizeof(IDtype), numids, fp); if (num_new_ids) fwrite(&newids[0], sizeof(IDtype), num_new_ids, fp); fclose(fp); if (outpos) { fp = fopen(posfilename, "w"); fwrite(&totnumids, sizeof(int), 1, fp); if (numids) fwrite(&pos[0], sizeof(fltarr), numids, fp); if (num_new_ids) fwrite(&newpos[0], sizeof(fltarr), num_new_ids, fp); fclose(fp); } if (numids) { free(idlist); if (outpos) free(pos); } if (npart) { free(newids); if (outpos) free(newpos); } } if (write_catalogue) { fp = fopen(catname, "w"); for (haloid = start_id; haloid <= max_halo_id; haloid++ ) { fprintf(fp, "%s", output[haloid]); } fclose(fp); } for (i = 0; i < (max_halo_id+1); i++ ) free(output[i]); free(output); return 0; }
fill.c
/* Fill the nonzero term of the A matrix */ #include <petscmat.h> #include <petscvec.h> #include <petscsnes.h> #include <../src/mat/impls/baij/seq/baij.h> #include <omp.h> #include <ktime.h> #include <geometry.h> #ifdef __USE_HW_COUNTER #include <perf.h> #include <kperf.h> #endif #include <phy.h> #include <kernel.h> int fill_mat(struct fill *restrict fill) { #ifdef __USE_HW_COUNTER const struct fd fd = fill->perf_counters->fd; struct counters start; perf_read(fd, &start); const uint64_t icycle = __rdtsc(); #endif struct ktime ktime; setktime(&ktime); const double *restrict q = fill->q; const struct geometry *restrict g = fill->g; const struct ivals *restrict iv = fill->iv; const struct ts *restrict ts = fill->ts; Mat A = fill->A; int ierr; uint32_t i; Mat_SeqBAIJ *restrict a = (Mat_SeqBAIJ *) A->data; size_t sz = (a->bs2 * a->i[a->mbs]); __assume_aligned(a->a, 64); memset(a->a, 0, sz * sizeof(double)); size_t nnodes = g->n->sz; size_t bsz = g->c->bsz; double cfl = ts->cfl; double *restrict area = g->n->area; double *restrict cdt = ts->cdt; /* Loop over the nodes to compute the local indices of each row and column to insert the values using PETSc routine */ #pragma omp parallel for for(i = 0; i < nnodes; i++) { double tmp = area[i] / (cfl * cdt[i]); uint32_t j; for(j = 0; j < bsz; j++) { uint32_t idx = j + bsz * i; /* Inserts or adds values into certain locations of a matrix, using a local ordering of the nodes */ MatSetValues(A, 1, (const int *) &idx, 1, (const int *) &idx, (const double *) &tmp, ADD_VALUES); } } struct edge *restrict eptr = g->e->eptr; struct xyzn *restrict xyzn = g->e->xyzn; uint32_t *restrict ie = g->s->ie; uint32_t *restrict part = g->s->part; #pragma omp parallel { uint32_t t = omp_get_thread_num(); uint32_t ie0 = ie[t]; uint32_t ie1 = ie[t+1]; uint32_t i; for(i = ie0; i < ie1; i++) { uint32_t n0 = eptr->n0[i]; uint32_t n1 = eptr->n1[i]; double xn = xyzn->x0[i]; double yn = xyzn->x1[i]; double zn = xyzn->x2[i]; double ln = xyzn->x3[i]; /* Now lets get our other 2 vectors For first vector, use {1,0,0} and subtract off the component in the direction of the face normal. If the inner product of {1,0,0} is close to unity, use {0,1,0} */ double dot = xn; double X1, Y1, Z1; if(fabs(dot) < 0.95f) { X1 = 1.f - dot * xn; Y1 = - dot * yn; Z1 = - dot * zn; } else { dot = yn; X1 = - dot * xn; Y1 = 1.f - dot * yn; Z1 = - dot * zn; } /* Normalize the first vector */ double size = X1 * X1; size += Y1 * Y1; size += Z1 * Z1; size = sqrt(size); X1 /= size; Y1 /= size; Z1 /= size; /* Take cross-product of normal and V1 to get V2 */ double X2 = yn * Z1; X2 -= zn * Y1; double Y2 = zn * X1; Y2 -= xn * Z1; double Z2 = xn * Y1; Z2 -= yn * X1; /* Variables on left */ // Velocity u double uL = q[bsz * n0 + 1]; // Velocity v double vL = q[bsz * n0 + 2]; // Velocity w double wL = q[bsz * n0 + 3]; double ubarL = xn * uL; ubarL += yn * vL; ubarL += zn * wL; /* Variables on right */ // Velocity u double uR = q[bsz * n1 + 1]; // Velocity v double vR = q[bsz * n1 + 2]; // Velocity w double wR = q[bsz * n1 + 3]; double ubarR = xn * uR; ubarR += yn * vR; ubarR += zn * wR; /* Now compute eigenvalues and |A| from averaged variables Avergage variables */ double u = 0.5f * (uL + uR); double v = 0.5f * (vL + vR); double w = 0.5f * (wL + wR); double ubar = xn * u; ubar += yn * v; ubar += zn * w; double c2 = ubar * ubar + BETA; double c = sqrt(c2); /* Put in the eigenvalue smoothing stuff */ double eig1 = ln * fabs(ubar); double eig2 = ln * fabs(ubar); double eig3 = ln * fabs(ubar + c); double eig4 = ln * fabs(ubar - c); double phi1 = xn * BETA; phi1 += u * ubar; double phi2 = yn * BETA; phi2 += v * ubar; double phi3 = zn * BETA; phi3 += w * ubar; double phi4 = Y2 * phi3; phi4 -= Z2 * phi2; double phi5 = Z2 * phi1; phi5 -= X2 * phi3; double phi6 = X2 * phi2; phi6 -= Y2 * phi1; double phi7 = Z1 * phi2; phi7 -= Y1 * phi3; double phi8 = X1 * phi3; phi8 -= Z1 * phi1; double phi9 = Y1 * phi1; phi9 -= X1 * phi2; /* Components of T(inverse) (call this y) */ double c2inv = 1.f / c2; double y11 = u * phi4; y11 += v * phi5; y11 += w * phi6; y11 = -c2inv * y11 / BETA; double y21 = u * phi7; y21 += v * phi8; y21 += w * phi9; y21 = -c2inv * y21 / BETA; double y31 = c2inv * (c - ubar); y31 = 0.5f * y31 / BETA; double y41 = c2inv * (c + ubar); y41 = -0.5f * y41 / BETA; double y12 = c2inv * phi4; double y22 = c2inv * phi7; double y32 = c2inv * 0.5f * xn; double y42 = c2inv * 0.5f * xn; double y13 = c2inv * phi5; double y23 = c2inv * phi8; double y33 = c2inv * 0.5f * yn; double y43 = c2inv * 0.5f * yn; double y14 = c2inv * phi6; double y24 = c2inv * phi9; double y34 = c2inv * 0.5f * zn; double y44 = c2inv * 0.5f * zn; /* Now get elements of T */ double t13 = c * BETA; double t23 = u * (ubar + c); t23 += xn * BETA; double t33 = v * (ubar + c); t33 += yn * BETA; double t43 = w * (ubar + c); t43 += zn * BETA; double t14 = -c * BETA; double t24 = u * (ubar - c); t24 += xn * BETA; double t34 = v * (ubar - c); t34 += yn * BETA; double t44 = w * (ubar - c); t44 += zn * BETA; /* Compute T * |lambda| * T(inv) */ double a11 = eig3 * t13 * y31; a11 += eig4 * t14 * y41; double a12 = eig3 * t13 * y32; a12 += eig4 * t14 * y42; double a13 = eig3 * t13 * y33; a13 += eig4 * t14 * y43; double a14 = eig3 * t13 * y34; a14 += eig4 * t14 * y44; double a21 = eig1 * X1 * y11; a21 += eig2 * X2 * y21; a21 += eig3 * t23 * y31; a21 += eig4 * t24 * y41; double a22 = eig1 * X1 * y12; a22 += eig2 * X2 * y22; a22 += eig3 * t23 * y32; a22 += eig4 * t24 * y42; double a23 = eig1 * X1 * y13; a23 += eig2 * X2 * y23; a23 += eig3 * t23 * y33; a23 += eig4 * t24 * y43; double a24 = eig1 * X1 * y14; a24 += eig2 * X2 * y24; a24 += eig3 * t23 * y34; a24 += eig4 * t24 * y44; double a31 = eig1 * Y1 * y11; a31 += eig2 * Y2 * y21; a31 += eig3 * t33 * y31; a31 += eig4 * t34 * y41; double a32 = eig1 * Y1 * y12; a32 += eig2 * Y2 * y22; a32 += eig3 * t33 * y32; a32 += eig4 * t34 * y42; double a33 = eig1 * Y1 * y13; a33 += eig2 * Y2 * y23; a33 += eig3 * t33 * y33; a33 += eig4 * t34 * y43; double a34 = eig1 * Y1* y14; a34 += eig2 * Y2 * y24; a34 += eig3 * t33 * y34; a34 += eig4 * t34 * y44; double a41 = eig1 * Z1 * y11; a41 += eig2 * Z2 * y21; a41 += eig3 * t43 * y31; a41 += eig4 * t44 * y41; double a42 = eig1 * Z1 * y12; a42 += eig2 * Z2 * y22; a42 += eig3 * t43 * y32; a42 += eig4 * t44 * y42; double a43 = eig1 * Z1 * y13; a43 += eig2 * Z2 * y23; a43 += eig3 * t43 * y33; a43 += eig4 * t44 * y43; double a44 = eig1 * Z1 * y14; a44 += eig2 * Z2 * y24; a44 += eig3 * t43 * y34; a44 += eig4 * t44 * y44; /* Regular Jacobians on left: Form 0.5 * (A + |A|) */ double lb = ln * BETA; double lx = ln * xn; double ly = ln * yn; double lz = ln * zn; double val[4][8]; val[0][0] = 0.5f * a11; val[0][1] = 0.5f * ((lb * xn) + a12); val[0][2] = 0.5f * ((lb * yn) + a13); val[0][3] = 0.5f * ((lb * zn) + a14); val[1][0] = 0.5f * (lx + a21); val[1][1] = 0.5f * ((ln * (ubarL + xn * uL)) + a22); val[1][2] = 0.5f * ((ly * uL) + a23); val[1][3] = 0.5f * ((lz * uL) + a24); val[2][0] = 0.5f * (ly + a31); val[2][1] = 0.5f * ((lx * vL) + a32); val[2][2] = 0.5f * ((ln * (ubarL + yn * vL)) + a33); val[2][3] = 0.5f * ((lz * vL) + a34); val[3][0] = 0.5f * (lz + a41); val[3][1] = 0.5f * ((lx * wL) + a42); val[3][2] = 0.5f * ((ly * wL) + a43); val[3][3] = 0.5f * ((ln * (ubarL + zn * wL)) + a44); /* Regular Jaobians on right */ val[0][4] = 0.5f * -a11; val[0][5] = 0.5f * ((lb * xn) - a12); val[0][6] = 0.5f * ((lb * yn) - a13); val[0][7] = 0.5f * ((lb * zn) - a14); val[1][4] = 0.5f * (lx - a21); val[1][5] = 0.5f * ((ln * (ubarR + xn * uR)) - a22); val[1][6] = 0.5f * ((ly * uR) - a23); val[1][7] = 0.5f * ((lz * uR) - a24); val[2][4] = 0.5f * (ly - a31); val[2][5] = 0.5f * ((lx * vR) - a32); val[2][6] = 0.5f * ((ln * (ubarR + yn * vR)) - a33); val[2][7] = 0.5f * ((lz * vR) - a34); val[3][4] = 0.5f * (lz - a41); val[3][5] = 0.5f * ((lx * wR) - a42); val[3][6] = 0.5f * ((ly * wR) - a43); val[3][7] = 0.5f * ((ln * (ubarR + zn * wR)) - a44); uint32_t idxn[2]; idxn[0] = n0; idxn[1] = n1; if(part[n0] == t) { MatSetValuesBlocked(A, 1, (const int *) &n0, 2, (const int *) idxn, (const double *) val, ADD_VALUES); } if(part[n1] == t) { /* Exchange elements in place */ uint32_t j; for(j = 0; j < bsz; j++) { uint32_t k; for(k = 0; k < 8; k++) val[j][k] = -val[j][k]; } MatSetValuesBlocked(A, 1, (const int *) &n1, 2, (const int *) idxn, (const double *) val, ADD_VALUES); } } } size_t nsnodes = g->b->s->n->sz; uint32_t *restrict nsptr = g->b->s->n->nptr; struct xyz *restrict s_xyz = g->b->s->n->xyz; /* Solid boundary points */ #pragma omp parallel for for(i = 0; i < nsnodes; i++) { uint32_t n = nsptr[i]; double v[3]; v[0] = s_xyz->x0[i]; v[1] = s_xyz->x1[i]; v[2] = s_xyz->x2[i]; uint32_t idxm[3]; idxm[0] = bsz * n + 1; idxm[1] = bsz * n + 2; idxm[2] = bsz * n + 3; uint32_t idxn = bsz * n; MatSetValues(A, 3, (const int *) idxm, 1, (const int *) &idxn, (const double *) v, ADD_VALUES); } size_t nfnodes = g->b->f->n->sz; uint32_t *restrict nfptr = g->b->f->n->nptr; struct xyz *restrict f_xyz = g->b->f->n->xyz; /* Free boundary points */ #pragma omp parallel for for(i = 0; i < nfnodes; i++) { uint32_t n = nfptr[i]; double xn = f_xyz->x0[i]; double yn = f_xyz->x1[i]; double zn = f_xyz->x2[i]; double ln = sqrt(xn * xn + yn * yn + zn * zn); xn /= ln; yn /= ln; zn /= ln; /* 9 FLOPS */ /* Now lets get our other 2 vectors For first vector, use {1,0,0} and subtract off the component in the direction of the face normal. If the inner product of {1,0,0} is close to unity, use {0,1,0} */ double dot = xn; double X1, Y1, Z1; if(fabs(dot) < 0.95f) { X1 = 1.f - dot * xn; Y1 = - dot * yn; Z1 = - dot * zn; } else { dot = yn; X1 = - dot * xn; Y1 = 1.f - dot * yn; Z1 = - dot * zn; } /* 6 FLOPS */ /* Normalize the first vector (V1) */ double size = sqrt(X1 * X1 + Y1 * Y1 + Z1 * Z1); X1 /= size; Y1 /= size; Z1 /= size; /* 9 FLOPS */ /* Take cross-product of normal with V1 to get V2 */ double X2 = yn * Z1 - zn * Y1; double Y2 = zn * X1 - xn * Z1; double Z2 = xn * Y1 - yn * X1; /* 9 FLOPS */ /* Calculate elements of T and T(inverse) evaluated at freestream */ double ubar0 = xn * iv->u; ubar0 += yn * iv->v; ubar0 += zn * iv->w; double c20 = ubar0 * ubar0 + BETA; double c0 = sqrt(c20); double phi1 = xn * BETA; phi1 += iv->u * ubar0; double phi2 = yn * BETA; phi2 += iv->v * ubar0; double phi3 = zn * BETA; phi3 += iv->w * ubar0; double phi4 = Y2 * phi3; phi4 -= Z2 * phi2; double phi5 = Z2 * phi1; phi5 -= X2 * phi3; double phi6 = X2 * phi2; phi6 -= Y2 * phi1; double phi7 = Z1 * phi2; phi7 -= Y1 * phi3; double phi8 = X1 * phi3; phi8 -= Z1 * phi1; double phi9 = Y1 * phi1; phi9 -= X1 * phi2; /* 9 * 3 + 8 FLOPS */ double t13 = c0 * BETA; double t23 = iv->u * (ubar0 + c0); t23 += xn * BETA; double t33 = iv->v * (ubar0 + c0); t33 += yn * BETA; double t43 = iv->w * (ubar0 + c0); t43 += zn * BETA; double t14 = -c0 * BETA; double t24 = iv->u * (ubar0 - c0); t24 += xn * BETA; double t34 = iv->v * (ubar0 - c0); t34 += yn * BETA; double t44 = iv->w * (ubar0 - c0); t44 += zn * BETA; double ti11 = iv->u * phi4; ti11 += iv->v * phi5; ti11 += iv->w * phi6; ti11 = -ti11 / BETA / c20; double ti21 = iv->u * phi7; ti21 += iv->v * phi8; ti21 += iv->w * phi9; ti21 = -ti21 / BETA / c20; double ti31 = (c0 - ubar0) / (2.f * BETA * c20); double ti41 = -(c0 + ubar0) / (2.f * BETA * c20); double ti12 = phi4 / c20; double ti22 = phi7 / c20; double ti32 = 0.5f * xn / c20; double ti42 = 0.5f * xn / c20; double ti13 = phi5 / c20; double ti23 = phi8 / c20; double ti33 = 0.5f * yn / c20; double ti43 = 0.5f * yn / c20; double ti14 = phi6 / c20; double ti24 = phi9 / c20; double ti34 = 0.5f * zn / c20; double ti44 = 0.5f * zn / c20; /* 27 + 16 + 9 + 6 + 6 + 6 FLOPS */ /* Now, get the variables on the "inside" */ double pi = q[bsz * n + 0]; double ui = q[bsz * n + 1]; double vi = q[bsz * n + 2]; double wi = q[bsz * n + 3]; double un = xn * ui; un += yn * vi; un += zn * wi; /* 5 FLOPS */ /* If ubar is negative, take the reference condition from outside */ double pr, prp, ur, uru, vr, vrv, wr, wrw; if(un > 0.f) { pr = pi; prp = 1.f; ur = ui; uru = 1.f; vr = vi; vrv = 1.f; wr = wi; wrw = 1.f; } else { pr = iv->p; prp = 0.f; ur = iv->u; uru = 0.f; vr = iv->v; vrv = 0.f; wr = iv->w; wrw = 0.f; } /* Set rhs */ double rhs1 = ti11 * pr; rhs1 += ti12 * ur; rhs1 += ti13 * vr; rhs1 += ti14 * wr; double rhs1p = ti11 * prp; double rhs1u = ti12 * uru; double rhs1v = ti13 * vrv; double rhs1w = ti14 * wrw; double rhs2 = ti21 * pr; rhs2 += ti22 * ur; rhs2 += ti23 * vr; rhs2 += ti24 * wr; double rhs2p = ti21 * prp; double rhs2u = ti22 * uru; double rhs2v = ti23 * vrv; double rhs2w = ti24 * wrw; double rhs3 = ti31 * pi; rhs3 += ti32 * ui; rhs3 += ti33 * vi; rhs3 += ti34 * wi; double rhs4 = ti41 * iv->p; rhs4 += ti42 * iv->u; rhs4 += ti43 * iv->v; rhs4 += ti44 * iv->w; /* 12 + 24 FLOPS */ /* Now do matrix multiplication to get values on boundary */ double pb = t13 * rhs3; pb += t14 * rhs4; double pbp = t13 * ti31; double pbu = t13 * ti32; double pbv = t13 * ti33; double pbw = t13 * ti34; double ub = X1 * rhs1; ub += X2 * rhs2; ub += t23 * rhs3; ub += t24 * rhs4; double ubp = X1 * rhs1p; ubp += X2 * rhs2p; ubp += t23 * ti31; double ubu = X1 * rhs1u; ubu += X2 * rhs2u; ubu += t23 * ti32; double ubv = X1 * rhs1v; ubv += X2 * rhs2v; ubv += t23 * ti33; double ubw = X1 * rhs1w; ubw += X2 * rhs2w; ubw += t23 * ti34; double vb = Y1 * rhs1; vb += Y2 * rhs2; vb += t33 * rhs3; vb += t34 * rhs4; double vbp = Y1 * rhs1p; vbp += Y2 * rhs2p; vbp += t33 * ti31; double vbu = Y1 * rhs1u; vbu += Y2 * rhs2u; vbu += t33 * ti32; double vbv = Y1 * rhs1v; vbv += Y2 * rhs2v; vbv += t33 * ti33; double vbw = Y1 * rhs1w; vbw += Y2 * rhs2w; vbw += t33 * ti34; double wb = Z1 * rhs1; wb += Z2 * rhs2; wb += t43 * rhs3; wb += t44 * rhs4; double wbp = Z1 * rhs1p; wbp += Z2 * rhs2p; wbp += t43 * ti31; double wbu = Z1 * rhs1u; wbu += Z2 * rhs2u; wbu += t43 * ti32; double wbv = Z1 * rhs1v; wbv += Z2 * rhs2v; wbv += t43 * ti33; double wbw = Z1 * rhs1w; wbw += Z2 * rhs2w; wbw += t43 * ti34; /* 5 * 15 + 6 + 5 + 2 FLOPS */ double unb = xn * ub; unb += yn * vb; unb += zn * wb; double unbp = xn * ubp; unbp += yn * vbp; unbp += zn * wbp; double unbu = xn * ubu; unbu += yn * vbu; unbu += zn * wbu; double unbv = xn * ubv; unbv += yn * vbv; unbv += zn * wbv; double unbw = xn * ubw; unbw += yn * vbw; unbw += zn * wbw; /* 5 * 5 FLOPS */ /* Now add contribution to lhs */ double v[16]; v[0] = ln * BETA * unbp; v[1] = ln * BETA * unbu; v[2] = ln * BETA * unbv; v[3] = ln * BETA * unbw; v[4] = ln * (ub * unbp + unb * ubp + xn * pbp); v[5] = ln * (ub * unbu + unb * ubu + xn * pbu); v[6] = ln * (ub * unbv + unb * ubv + xn * pbv); v[7] = ln * (ub * unbw + unb * ubw + xn * pbw); v[8] = ln * (vb * unbp + unb * vbp + yn * pbp); v[9] = ln * (vb * unbu + unb * vbu + yn * pbu); v[10] = ln * (vb * unbv + unb * vbv + yn * pbv); v[11] = ln * (vb * unbw + unb * vbw + yn * pbw); v[12] = ln * (wb * unbp + unb * wbp + zn * pbp); v[13] = ln * (wb * unbu + unb * wbu + zn * pbu); v[14] = ln * (wb * unbv + unb * wbv + zn * pbv); v[15] = ln * (wb * unbw + unb * wbw + zn * pbw); /* 6 * 12 + 8 FLOPS */ MatSetValuesBlocked(A, 1, (const int *) &n, 1, (const int *) &n, (const double *) v, ADD_VALUES); } ierr = MatAssemblyBegin(A, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); ierr = MatAssemblyEnd(A, MAT_FINAL_ASSEMBLY); CHKERRQ(ierr); compute_time(&ktime, &fill->t->fill); #ifdef __USE_HW_COUNTER const uint64_t cycle = __rdtsc() - icycle; struct counters end; perf_read(fd, &end); struct tot tot; perf_calc(start, end, &tot); fill->perf_counters->ctrs->jacobian.cycles += cycle; fill->perf_counters->ctrs->jacobian.tot.imcR += tot.imcR; fill->perf_counters->ctrs->jacobian.tot.imcW += tot.imcW; fill->perf_counters->ctrs->jacobian.tot.edcR += tot.edcR; fill->perf_counters->ctrs->jacobian.tot.edcW += tot.edcW; #endif return 0; }
softmax-inl.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * \file softmax-inl.h * \brief */ #ifndef MXNET_OPERATOR_NN_SOFTMAX_INL_H_ #define MXNET_OPERATOR_NN_SOFTMAX_INL_H_ #include <algorithm> #include <string> #include <utility> #include <vector> #include <type_traits> #include "../mxnet_op.h" #include "../operator_common.h" #include "../tensor/broadcast_reduce_op.h" using mshadow::red::limits::MinValue; namespace mxnet { namespace op { namespace mxnet_op { struct softmax_fwd { template <typename AType> MSHADOW_XINLINE static AType Map(float a, AType b) { return AType(expf(a) / b); } template <typename AType> MSHADOW_XINLINE static AType Map(double a, AType b) { return AType(exp(a) / b); } }; struct log_softmax_fwd { template <typename DType> MSHADOW_XINLINE static float Map(DType a, float b) { return a - logf(b); } template <typename DType> MSHADOW_XINLINE static double Map(DType a, double b) { return a - log(b); } }; template <typename OP, bool negate, typename AType, typename DType, typename OType, typename IType, int ndim> inline void Softmax(Stream<cpu>* s, DType* in, OType* out, IType* length, Shape<ndim> shape, int axis, const DType temperature) { index_t M = shape[axis]; if (M == 0) return; index_t N = shape.Size() / M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; index_t sa = stride[axis]; if (length == nullptr) { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); DType mmax = negate ? -in[base] : in[base]; DType val; for (index_t j = 1; j < M; ++j) { val = negate ? -in[base + j * sa] : in[base + j * sa]; if (mmax < val) mmax = val; } AType sum = AType(0); DType in_val; // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j * sa] : in[base + j * sa]; sum += std::exp(in_val - mmax); } for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j * sa] : in[base + j * sa]; out[base + j * sa] = OP::Map(in_val - mmax, sum); } } else { for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j * sa] : in[base + j * sa]; sum += std::exp((in_val - mmax) / temperature); } for (index_t j = 0; j < M; ++j) { in_val = negate ? -in[base + j * sa] : in[base + j * sa]; out[base + j * sa] = OP::Map((in_val - mmax) / temperature, sum); } } } } else { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t len = static_cast<index_t>(length[i]); index_t base = unravel_dot(i, sshape, stride); DType mmax = negate ? -in[base] : in[base]; DType val; for (index_t j = 1; j < len; ++j) { val = negate ? -in[base + j * sa] : in[base + j * sa]; if (mmax < val) mmax = val; } for (index_t j = len; j < M; ++j) { out[base + j * sa] = OType(0.0f); } AType sum = AType(0); DType in_val; // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime if (temperature == 1.0) { for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j * sa] : in[base + j * sa]; sum += std::exp(in_val - mmax); } for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j * sa] : in[base + j * sa]; out[base + j * sa] = OP::Map(in_val - mmax, sum); } } else { for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j * sa] : in[base + j * sa]; sum += std::exp((in_val - mmax) / temperature); } for (index_t j = 0; j < len; ++j) { in_val = negate ? -in[base + j * sa] : in[base + j * sa]; out[base + j * sa] = OP::Map((in_val - mmax) / temperature, sum); } } } } } struct masked_softmax_where { template <typename DType, int ndim> MSHADOW_XINLINE static void Map(index_t id, DType* out, const bool* cond, const DType* x, const double y, Shape<ndim> data_shape, Shape<ndim> mask_shape) { index_t mask_pos = 0; index_t stride = 1; for (index_t i = ndim - 1, j = id; i >= 0; --i) { auto tmp = j / data_shape[i]; if (mask_shape[i] != 1) { mask_pos += (j - tmp * mask_shape[i]) * stride; } stride *= mask_shape[i]; j = tmp; } KERNEL_ASSIGN(out[id], kWriteTo, (cond[mask_pos] ? x[id] : static_cast<DType>(y))); } }; template <typename OP, bool masked_neg_inf, bool negate, typename AType, typename DType, int ndim> inline void MaskedSoftmax(Stream<cpu>* s, DType* in, DType* out, bool* mask, Shape<ndim> data_shape, Shape<ndim> mask_shape, int axis, const double temperature, bool normalize, const OpContext& ctx) { Tensor<cpu, 1, DType> workspace = ctx.requested[0].get_space_typed<cpu, 1, DType>(Shape1(data_shape.Size()), s); DType* masked_input = TBlob(workspace).dptr<DType>(); double neg = MinValue<DType>(); Kernel<masked_softmax_where, cpu>::Launch( s, data_shape.Size(), masked_input, mask, in, neg, data_shape, mask_shape); int* max_lenghts = nullptr; double masked_value = 0.0; if (masked_neg_inf) masked_value = -INFINITY; Softmax<OP, negate, AType, DType>( s, masked_input, out, max_lenghts, data_shape, axis, temperature); Kernel<masked_softmax_where, cpu>::Launch( s, data_shape.Size(), out, mask, out, masked_value, data_shape, mask_shape); } struct softmax_bwd { template <typename DType, typename AType> MSHADOW_XINLINE static AType Map(DType ograd, DType out, AType sum) { return AType(out * (ograd - sum)); } }; struct log_softmax_bwd { template <typename AType> MSHADOW_XINLINE static AType Map(float ograd, float out, AType sum) { return AType(ograd - expf(out) * sum); } template <typename AType> MSHADOW_XINLINE static AType Map(double ograd, double out, AType sum) { return AType(ograd - exp(out) * sum); } }; template <typename OP1, typename OP2, int Req, bool negate, typename AType, typename DType, typename OType, typename IType, int ndim> inline void SoftmaxGrad(Stream<cpu>* s, OType* out, OType* ograd, DType* igrad, IType* length, Shape<ndim> shape, int axis, const DType temperature) { index_t M = shape[axis]; if (M == 0) return; index_t N = shape.Size() / M; Shape<ndim> stride = calc_stride(shape); Shape<ndim> sshape = shape; sshape[axis] = 1; index_t sa = stride[axis]; if (length != nullptr) { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); index_t len = static_cast<index_t>(length[i]); AType sum = AType(0); for (index_t j = 0; j < len; ++j) { sum += OP1::Map(ograd[base + j * sa], out[base + j * sa]); } // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime DType final_result; if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j * sa], out[base + j * sa], sum) : OP2::Map(ograd[base + j * sa], out[base + j * sa], sum); final_result = (j < len) ? final_result : DType(0.0f); KERNEL_ASSIGN(igrad[base + j * sa], Req, final_result); } } else { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j * sa], out[base + j * sa], sum) / temperature : OP2::Map(ograd[base + j * sa], out[base + j * sa], sum) / temperature; final_result = (j < len) ? final_result : DType(0.0f); KERNEL_ASSIGN(igrad[base + j * sa], Req, final_result); } } } } else { #pragma omp parallel for for (index_t i = 0; i < N; ++i) { index_t base = unravel_dot(i, sshape, stride); AType sum = AType(0); for (index_t j = 0; j < M; ++j) { sum += OP1::Map(ograd[base + j * sa], out[base + j * sa]); } // By default temperature is 1.0. // Adding a branch here to save the CPU 'divide-by-1' computation at runtime DType final_result; if (temperature == 1.0) { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j * sa], out[base + j * sa], sum) : OP2::Map(ograd[base + j * sa], out[base + j * sa], sum); KERNEL_ASSIGN(igrad[base + j * sa], Req, final_result); } } else { for (index_t j = 0; j < M; ++j) { final_result = negate ? -OP2::Map(ograd[base + j * sa], out[base + j * sa], sum) / temperature : OP2::Map(ograd[base + j * sa], out[base + j * sa], sum) / temperature; KERNEL_ASSIGN(igrad[base + j * sa], Req, final_result); } } } } } template <typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim, typename DType> inline void MaskedSoftmaxGrad(Stream<cpu>* s, DType* out, DType* ograd, DType* igrad, bool* mask, Shape<ndim> data_shape, Shape<ndim> mask_shape, int axis, const double temperature, const OpContext& ctx) { Tensor<cpu, 1, DType> workspace = ctx.requested[0].get_space_typed<cpu, 1, DType>(Shape1(data_shape.Size()), s); DType* masked_ograd = TBlob(workspace).dptr<DType>(); Kernel<masked_softmax_where, cpu>::Launch( s, data_shape.Size(), masked_ograd, mask, ograd, 0.0, data_shape, mask_shape); int* max_lenghts = nullptr; SoftmaxGrad<OP1, OP2, Req, negate, AType, DType, DType, int, ndim>( s, out, masked_ograd, igrad, max_lenghts, data_shape, axis, temperature); Kernel<masked_softmax_where, cpu>::Launch( s, data_shape.Size(), igrad, mask, igrad, 0.0, data_shape, mask_shape); } #ifdef __CUDACC__ const int softmax_threads_per_block = 512; template <int ndim> MSHADOW_XINLINE index_t get_mask_position(const index_t idx, const Shape<ndim>& data_shape, const Shape<ndim>& mask_shape, int axis, index_t* stride_axis) { index_t ret = 0; index_t stride = 1; *stride_axis = 1; #pragma unroll for (index_t i = ndim - 1, j = idx; i >= 0; --i) { auto tmp = j / data_shape[i]; if (i != axis && mask_shape[i] != 1) { ret += (j - tmp * mask_shape[i]) * stride; if (i > axis) *stride_axis *= mask_shape[i]; } stride *= mask_shape[i]; j = tmp; } return ret; } template <bool normalize, int x_bits, typename OP, bool masked_neg_inf, bool negate, typename AType, int ndim, typename DType> __global__ void masked_softmax_kernel(DType* in, DType* out, bool* in_mask, index_t M, int axis, Shape<ndim> sshape, Shape<ndim> stride, Shape<ndim> mask_shape, const double temperature) { extern __shared__ double shared[]; AType* smem = reinterpret_cast<AType*>(shared); // x_size const unsigned x_size = 1 << x_bits; index_t sa = stride[axis]; index_t base = unravel_dot(blockIdx.x, sshape, stride); index_t sa_mask = 0; index_t base_mask = get_mask_position(blockIdx.x, sshape, mask_shape, axis, &sa_mask); bool bcst_mask_axis = (mask_shape[axis] == 1); index_t x = threadIdx.x; DType smax = 0.0; if (normalize) { red::maximum::SetInitValue(smem[x]); for (index_t i = x; i < M; i += x_size) { bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i * sa_mask]; if (mask_value) smem[x] = ::max(smem[x], negate ? -in[base + i * sa] : in[base + i * sa]); } __syncthreads(); cuda::Reduce1D<red::maximum, x_bits>(smem); __syncthreads(); smax = smem[0]; __syncthreads(); } red::sum::SetInitValue(smem[x]); DType val; for (index_t i = x; i < M; i += x_size) { bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i * sa_mask]; if (mask_value) { val = (negate ? -in[base + i * sa] : in[base + i * sa]); smem[x] += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature))); } } __syncthreads(); cuda::Reduce1D<red::sum, x_bits>(smem); __syncthreads(); AType ssum = smem[0]; __syncthreads(); double masked_value = 0.0; if (masked_neg_inf) masked_value = -INFINITY; for (index_t i = x; i < M; i += x_size) { val = (negate ? -in[base + i * sa] : in[base + i * sa]); bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i * sa_mask]; out[base + i * sa] = mask_value ? DType(OP::Map((val - smax) / static_cast<DType>(temperature), ssum)) : DType(masked_value); } } template <bool normalize, typename OP, bool masked_neg_inf, bool negate, typename AType, typename LType, typename LTypeMask, typename DType, int ndim> __global__ void masked_softmax_stride1_kernel(const DType* in, DType* out, bool* in_mask, const index_t M, int axis, Shape<ndim> sshape, Shape<ndim> mask_shape, const double temperature, const int rows_per_block, const index_t total_rows, const size_t size_input_shared, const size_t size_mask_shared) { const int entries_per_load = sizeof(LType) / sizeof(DType); const int entries_per_load_mask = sizeof(LTypeMask) / sizeof(bool); const int row_length = entries_per_load > 0 ? M / entries_per_load : 0; const int row_length_mask = entries_per_load > 0 ? M / entries_per_load_mask : 0; extern __shared__ double shared[]; LType* persistent_storage = reinterpret_cast<LType*>(shared); // rows_per_block * M (DType), aligned to double LTypeMask* mask_shared = reinterpret_cast<LTypeMask*>(&shared[size_input_shared]); // rows_per_block * M (bool), aligned to double AType* scratch = reinterpret_cast<AType*>(&shared[size_input_shared + size_mask_shared]); // softmax_threads_per_block const int warp_size = 32; const int threads_per_row = softmax_threads_per_block / rows_per_block; const int my_local_row = threadIdx.x / threads_per_row; const int my_row = blockIdx.x * rows_per_block + my_local_row; if (my_row >= total_rows) return; const int my_id = threadIdx.x % threads_per_row; size_t base = my_row * row_length; index_t pos_mask = 0; index_t stride = mask_shape[axis]; #pragma unroll for (index_t i = axis - 1, j = my_row; i >= 0; --i) { auto tmp = j / sshape[i]; if (mask_shape[i] != 1) { pos_mask += (j - tmp * mask_shape[i]) * stride; stride *= mask_shape[i]; } j = tmp; } const LType* in_aligned = reinterpret_cast<const LType*>(in); for (index_t i = my_id; i < row_length; i += threads_per_row) { persistent_storage[my_local_row * row_length + i] = in_aligned[base + i]; } const LTypeMask* in_mask_aligned = reinterpret_cast<const LTypeMask*>(&in_mask[pos_mask]); for (index_t i = my_id; i < row_length_mask; i += threads_per_row) { mask_shared[my_local_row * row_length_mask + i] = (mask_shape[axis] > 1) ? in_mask_aligned[i] : in_mask_aligned[0]; } DType* row = reinterpret_cast<DType*>(persistent_storage + my_local_row * row_length); bool* row_mask = reinterpret_cast<bool*>(mask_shared + my_local_row * row_length_mask); __syncthreads(); DType smax = 0.0; if (normalize) { DType my_max_value; red::maximum::SetInitValue(my_max_value); for (index_t i = my_id; i < M; i += threads_per_row) { if (row_mask[i]) my_max_value = ::max(my_max_value, negate ? -row[i] : row[i]); } scratch[threadIdx.x] = my_max_value; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] = ::max(scratch[threadIdx.x], scratch[threadIdx.x + size]); } __syncthreads(); } if (my_id < warp_size) { AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return ::max(x, y); }); scratch[threadIdx.x] = my_value; } __syncthreads(); smax = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); } AType my_sum; red::sum::SetInitValue(my_sum); for (index_t i = my_id; i < M; i += threads_per_row) { if (row_mask[i]) { const DType val = (negate ? -row[i] : row[i]); my_sum += static_cast<AType>(expf((val - smax) / static_cast<AType>(temperature))); } } scratch[threadIdx.x] = my_sum; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] += scratch[threadIdx.x + size]; } __syncthreads(); } if (my_id < warp_size) { AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return x + y; }); scratch[threadIdx.x] = my_value; } __syncthreads(); AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); double masked_value = 0.0; if (masked_neg_inf) masked_value = -INFINITY; for (index_t i = my_id; i < M; i += threads_per_row) { const DType val = (negate ? -row[i] : row[i]); row[i] = row_mask[i] ? DType(OP::Map((val - smax) / static_cast<DType>(temperature), ssum)) : DType(masked_value); } __syncthreads(); LType* out_aligned = reinterpret_cast<LType*>(out); for (index_t i = my_id; i < row_length; i += threads_per_row) { out_aligned[base + i] = persistent_storage[my_local_row * row_length + i]; } } template <typename OP, bool masked_neg_inf, bool negate, typename AType, typename DType, typename OType, int ndim> inline void MaskedSoftmax(Stream<gpu>* s, DType* in, OType* out, bool* mask, Shape<ndim> data_shape, Shape<ndim> mask_shape, int axis, const double temperature, bool normalize, const OpContext& ctx) { const int x_bits = 7; const int x_size = 1 << x_bits; index_t M = data_shape[axis]; if (M == 0 || data_shape.Size() == 0) return; index_t N = data_shape.Size() / M; Shape<ndim> stride = calc_stride(data_shape); Shape<ndim> sshape = data_shape; sshape[axis] = 1; const size_t DSize = sizeof(DType); // Using max of 20 kB of shared memory for InputData in the optimized case const size_t max_opt_M = 20 * 1024 / DSize; if (stride[axis] == 1 && static_cast<size_t>(M) <= max_opt_M && std::is_same<DType, OType>::value) { int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType)); int ltype_mask = mxnet::common::cuda::get_load_type(mask_shape[axis] * sizeof(bool)); MXNET_LOAD_TYPE_SWITCH(ltype, LType, { CHECK_LE(sizeof(DType), sizeof(LType)); MXNET_LOAD_TYPE_SWITCH(ltype_mask, LTypeMask, { CHECK_LE(sizeof(bool), sizeof(LTypeMask)); int rows_per_block = mxnet::common::cuda::get_rows_per_block( M * sizeof(DType) / sizeof(LType), softmax_threads_per_block); // calculate amount shared memory (slots aligned to double) int entries_per_load = entries_per_load = sizeof(LType) / sizeof(DType); int entries_per_load_mask = sizeof(LTypeMask) / sizeof(bool); size_t size_input_shared = entries_per_load > 0 ? rows_per_block * M / entries_per_load : 0; size_t size_mask_shared = entries_per_load_mask > 0 ? rows_per_block * M / entries_per_load_mask : 0; size_input_shared = ((size_input_shared * sizeof(LType) + sizeof(double) - 1) / sizeof(double)); size_mask_shared = ((size_mask_shared * sizeof(LTypeMask) + sizeof(double) - 1) / sizeof(double)); size_t amount_shared = size_input_shared * sizeof(double) + size_mask_shared * sizeof(double) + softmax_threads_per_block * sizeof(AType); int nblocks = (N + rows_per_block - 1) / rows_per_block; if (normalize) { masked_softmax_stride1_kernel<true, OP, masked_neg_inf, negate, AType, LType, LTypeMask> <<<nblocks, softmax_threads_per_block, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>(in, out, mask, M, axis, sshape, mask_shape, temperature, rows_per_block, N, size_input_shared, size_mask_shared); } else { masked_softmax_stride1_kernel<false, OP, masked_neg_inf, negate, AType, LType, LTypeMask> <<<nblocks, softmax_threads_per_block, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>(in, out, mask, M, axis, sshape, mask_shape, temperature, rows_per_block, N, size_input_shared, size_mask_shared); } }); }); MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_stride1_kernel); } else { size_t amount_shared = x_size * sizeof(AType); if (normalize) { masked_softmax_kernel<true, x_bits, OP, masked_neg_inf, negate, AType, ndim> <<<N, x_size, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>( in, out, mask, M, axis, sshape, stride, mask_shape, temperature); } else { masked_softmax_kernel<false, x_bits, OP, masked_neg_inf, negate, AType, ndim> <<<N, x_size, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>( in, out, mask, M, axis, sshape, stride, mask_shape, temperature); } MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_kernel); } } template <typename OP1, typename OP2, int Req, bool negate, typename AType, typename LType, typename LTypeMask, typename DType, typename OType, int ndim> __global__ void masked_softmax_stride1_grad_kernel(const OType* out, const OType* ograd, DType* igrad, const bool* in_mask, const index_t M, int axis, Shape<ndim> sshape, Shape<ndim> mask_shape, const double temperature, const int rows_per_block, const index_t total_rows, const size_t size_input_shared, const size_t size_mask_shared) { const int entries_per_load = sizeof(LType) / sizeof(DType); const int entries_per_load_mask = sizeof(LTypeMask) / sizeof(bool); const int row_length = entries_per_load > 0 ? M / entries_per_load : 0; const int row_length_mask = entries_per_load > 0 ? M / entries_per_load_mask : 0; extern __shared__ double shared[]; LType* persistent_storage = reinterpret_cast<LType*>(shared); // 2 * rows_per_block * M (DType), aligned to double LTypeMask* mask_shared = reinterpret_cast<LTypeMask*>(&shared[size_input_shared]); // rows_per_block * M (bool), aligned to double AType* scratch = reinterpret_cast<AType*>(&shared[size_input_shared + size_mask_shared]); // softmax_threads_per_block const int warp_size = 32; const int threads_per_row = softmax_threads_per_block / rows_per_block; const int my_local_row = threadIdx.x / threads_per_row; const int my_row = blockIdx.x * rows_per_block + my_local_row; if (my_row >= total_rows) return; const int my_id = threadIdx.x % threads_per_row; size_t base = my_row * row_length; index_t pos_mask = 0; index_t stride = mask_shape[axis]; #pragma unroll for (index_t i = axis - 1, j = my_row; i >= 0; --i) { auto tmp = j / sshape[i]; if (mask_shape[i] != 1) { pos_mask += (j - tmp * mask_shape[i]) * stride; stride *= mask_shape[i]; } j = tmp; } const LType* out_aligned = reinterpret_cast<const LType*>(out); const LType* ograd_aligned = reinterpret_cast<const LType*>(ograd); for (index_t i = my_id; i < row_length; i += threads_per_row) { persistent_storage[my_local_row * row_length * 2 + i] = out_aligned[base + i]; persistent_storage[my_local_row * row_length * 2 + row_length + i] = ograd_aligned[base + i]; } const LTypeMask* in_mask_aligned = reinterpret_cast<const LTypeMask*>(&in_mask[pos_mask]); for (index_t i = my_id; i < row_length_mask; i += threads_per_row) { mask_shared[my_local_row * row_length_mask + i] = (mask_shape[axis] > 1) ? in_mask_aligned[i] : in_mask_aligned[0]; } DType* row = reinterpret_cast<DType*>(persistent_storage + my_local_row * row_length * 2); bool* row_mask = reinterpret_cast<bool*>(mask_shared + my_local_row * row_length_mask); __syncthreads(); AType my_sum_value; red::sum::SetInitValue(my_sum_value); for (index_t i = my_id; i < M; i += threads_per_row) { if (row_mask[i]) my_sum_value += OP1::Map(row[i + M], row[i]); } scratch[threadIdx.x] = my_sum_value; __syncthreads(); for (int size = threads_per_row / 2; size >= warp_size; size /= 2) { if (my_id < size) { scratch[threadIdx.x] = scratch[threadIdx.x] + scratch[threadIdx.x + size]; } __syncthreads(); } if (my_id < warp_size) { AType my_value = common::cuda::warp_reduce(scratch[threadIdx.x], [](AType x, AType y) { return x + y; }); scratch[threadIdx.x] = my_value; } __syncthreads(); AType ssum = scratch[threadIdx.x - threadIdx.x % threads_per_row]; __syncthreads(); for (index_t i = my_id; i < M; i += threads_per_row) { const DType val = negate ? -OP2::Map(row[i + M], row[i], ssum) : OP2::Map(row[i + M], row[i], ssum); row[i] = row_mask[i] ? DType(val / static_cast<DType>(temperature)) : DType(0.0f); if (Req == kAddTo) { row[i] += igrad[my_row * M + i]; } } __syncthreads(); LType* igrad_aligned = reinterpret_cast<LType*>(igrad); for (index_t i = my_id; i < row_length; i += threads_per_row) { igrad_aligned[base + i] = persistent_storage[my_local_row * row_length * 2 + i]; } } template <int x_bits, typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim, typename DType, typename OType> __global__ void masked_softmax_grad_kernel(OType* out, OType* ograd, DType* igrad, const bool* in_mask, index_t M, int axis, Shape<ndim> sshape, Shape<ndim> stride, Shape<ndim> mask_shape, const double temperature) { const unsigned x_size = 1 << x_bits; __shared__ AType smem[x_size]; index_t sa = stride[axis]; index_t base = unravel_dot(blockIdx.x, sshape, stride); index_t sa_mask = 0; index_t base_mask = get_mask_position(blockIdx.x, sshape, mask_shape, axis, &sa_mask); bool bcst_mask_axis = (mask_shape[axis] == 1); index_t x = threadIdx.x; red::sum::SetInitValue(smem[x]); for (index_t i = x; i < M; i += x_size) { bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i * sa_mask]; if (mask_value) smem[x] += OP1::Map(ograd[base + i * sa], out[base + i * sa]); } __syncthreads(); cuda::Reduce1D<red::sum, x_bits>(smem); __syncthreads(); AType ssum = smem[0]; __syncthreads(); DType final_result; for (index_t i = x; i < M; i += x_size) { bool mask_value = bcst_mask_axis ? in_mask[base_mask] : in_mask[base_mask + i * sa_mask]; final_result = negate ? -OP2::Map(ograd[base + i * sa], out[base + i * sa], ssum) : OP2::Map(ograd[base + i * sa], out[base + i * sa], ssum); final_result = mask_value ? final_result / static_cast<DType>(temperature) : DType(0.0f); KERNEL_ASSIGN(igrad[base + i * sa], Req, final_result); } } template <typename OP1, typename OP2, int Req, bool negate, typename AType, int ndim, typename DType, typename OType> inline void MaskedSoftmaxGrad(Stream<gpu>* s, OType* out, OType* ograd, DType* igrad, bool* mask, Shape<ndim> data_shape, Shape<ndim> mask_shape, int axis, const double temperature, const OpContext& ctx) { const int x_bits = 7; const int x_size = 1 << x_bits; index_t M = data_shape[axis]; if (M == 0 || data_shape.Size() == 0) return; index_t N = data_shape.Size() / M; Shape<ndim> stride = calc_stride(data_shape); Shape<ndim> sshape = data_shape; sshape[axis] = 1; const size_t DSize = sizeof(DType); // Using max of 20 kB of shared memory for InputData in the optimized case const size_t max_opt_M = 20 * 1024 / DSize; if (stride[axis] == 1 && static_cast<size_t>(M) <= max_opt_M && std::is_same<DType, OType>::value) { int ltype = mxnet::common::cuda::get_load_type(M * sizeof(DType)); int ltype_mask = mxnet::common::cuda::get_load_type(mask_shape[axis] * sizeof(bool)); MXNET_LOAD_TYPE_SWITCH(ltype, LType, { CHECK_LE(sizeof(DType), sizeof(LType)); MXNET_LOAD_TYPE_SWITCH(ltype_mask, LTypeMask, { CHECK_LE(sizeof(bool), sizeof(LTypeMask)); int rows_per_block = mxnet::common::cuda::get_rows_per_block( M * sizeof(DType) / sizeof(LType), softmax_threads_per_block); // calculate amount shared memory (slots aligned to double) int entries_per_load = entries_per_load = sizeof(LType) / sizeof(DType); int entries_per_load_mask = sizeof(LTypeMask) / sizeof(bool); size_t size_input_shared = entries_per_load > 0 ? rows_per_block * M / entries_per_load : 0; size_t size_mask_shared = entries_per_load_mask > 0 ? rows_per_block * M / entries_per_load_mask : 0; size_input_shared = ((2 * size_input_shared * sizeof(LType) + sizeof(double) - 1) / sizeof(double)); size_mask_shared = ((size_mask_shared * sizeof(LTypeMask) + sizeof(double) - 1) / sizeof(double)); size_t amount_shared = size_input_shared * sizeof(double) + size_mask_shared * sizeof(double) + softmax_threads_per_block * sizeof(AType); int nblocks = (N + rows_per_block - 1) / rows_per_block; masked_softmax_stride1_grad_kernel<OP1, OP2, Req, negate, AType, LType, LTypeMask> <<<nblocks, softmax_threads_per_block, amount_shared, mshadow::Stream<gpu>::GetStream(s)>>>(out, ograd, igrad, mask, M, axis, sshape, mask_shape, temperature, rows_per_block, N, size_input_shared, size_mask_shared); }); }); MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_stride1_grad_kernel); } else { masked_softmax_grad_kernel<x_bits, OP1, OP2, Req, negate, AType, ndim> <<<N, x_size, 0, mshadow::Stream<gpu>::GetStream(s)>>>( out, ograd, igrad, mask, M, axis, sshape, stride, mask_shape, temperature); MSHADOW_CUDA_POST_KERNEL_CHECK(masked_softmax_grad_kernel); } } #endif } // namespace mxnet_op struct SoftmaxParam : public dmlc::Parameter<SoftmaxParam> { int axis; dmlc::optional<double> temperature; dmlc::optional<int> dtype; dmlc::optional<bool> use_length; DMLC_DECLARE_PARAMETER(SoftmaxParam) { DMLC_DECLARE_FIELD(axis).set_default(-1).describe("The axis along which to compute softmax."); DMLC_DECLARE_FIELD(temperature) .set_default(dmlc::optional<double>()) .describe("Temperature parameter in softmax"); DMLC_DECLARE_FIELD(dtype) .add_enum("float16", mshadow::kFloat16) .add_enum("float32", mshadow::kFloat32) .add_enum("float64", mshadow::kFloat64) .set_default(dmlc::optional<int>()) .describe( "DType of the output in case this can't be inferred. " "Defaults to the same as input's dtype if not defined (dtype=None)."); DMLC_DECLARE_FIELD(use_length) .set_default(dmlc::optional<bool>(false)) .describe("Whether to use the length input as a mask over the data input."); } bool operator==(const SoftmaxParam& other) const { return this->axis == other.axis && this->temperature == other.temperature && this->dtype == other.dtype && this->use_length == other.use_length; } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream axis_s, temperature_s, dtype_s, use_length_s; axis_s << axis; temperature_s << temperature; dtype_s << dtype; use_length_s << use_length; (*dict)["axis"] = axis_s.str(); (*dict)["temperature"] = temperature_s.str(); if (dtype.has_value()) { (*dict)["dtype"] = MXNetTypeWithBool2String(dtype.value()); } else { (*dict)["dtype"] = dtype_s.str(); } (*dict)["use_length"] = use_length_s.str(); } }; struct MaskedSoftmaxParam : public dmlc::Parameter<MaskedSoftmaxParam> { int axis; dmlc::optional<double> temperature; dmlc::optional<bool> normalize; DMLC_DECLARE_PARAMETER(MaskedSoftmaxParam) { DMLC_DECLARE_FIELD(axis).set_default(-1).describe("The axis along which to compute softmax."); DMLC_DECLARE_FIELD(temperature) .set_default(dmlc::optional<double>()) .describe("Temperature parameter in softmax"); DMLC_DECLARE_FIELD(normalize) .set_default(dmlc::optional<bool>(true)) .describe("Whether to normalize input data x: x = x - max(x)"); } void SetAttrDict(std::unordered_map<std::string, std::string>* dict) { std::ostringstream axis_s, temperature_s, normalize_s; axis_s << axis; temperature_s << temperature; normalize_s << normalize; (*dict)["axis"] = axis_s.str(); (*dict)["temperature"] = temperature_s.str(); (*dict)["normalize"] = normalize_s.str(); } bool operator==(const MaskedSoftmaxParam& other) const { return this->axis == other.axis && this->temperature == other.temperature && this->normalize == other.normalize; } }; static inline bool softmax_has_dtype_override(const nnvm::NodeAttrs& attrs) { const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); return param.dtype.has_value() && param.dtype.value() != -1; } static inline bool softmax_use_length(const nnvm::NodeAttrs& attrs) { const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); return param.use_length.value(); } static inline bool SoftmaxOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), 1); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 2U : 1U); if (softmax_has_dtype_override(attrs)) { TYPE_ASSIGN_CHECK(*out_attrs, 0, param.dtype.value()); type_assign(&(*in_attrs)[0], (*out_attrs)[0]); return true; } else { std::vector<int> tmp = {in_attrs->at(0)}; return ElemwiseType<1, 1>(attrs, &tmp, out_attrs); } } static inline bool SoftmaxOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { CHECK_EQ(out_attrs->size(), 1U); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); CHECK_EQ(in_attrs->size(), param.use_length.value() ? 2U : 1U); if (param.use_length.value()) { mxnet::TShape& dshape = in_attrs->at(0); mxnet::TShape tmp_shape((dshape.ndim() == 1) ? 1U : dshape.ndim() - 1, 1); int j = 0; int axis = param.axis != -1 ? param.axis : dshape.ndim() - 1; for (int i = 0; i < dshape.ndim(); ++i) { if (i != axis) { tmp_shape[j++] = dshape[i]; } } SHAPE_ASSIGN_CHECK(*in_attrs, 1, tmp_shape); } mxnet::ShapeVector tmp = {in_attrs->at(0)}; return ElemwiseShape<1, 1>(attrs, &tmp, out_attrs); } static inline bool SoftmaxGradOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_attrs, mxnet::ShapeVector* out_attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { mxnet::ShapeVector ins = {in_attrs->at(0), in_attrs->at(1), in_attrs->at(3)}; mxnet::ShapeVector dgrad = {out_attrs->at(0)}; bool res = ElemwiseShape<3, 1>(attrs, &ins, &dgrad); SHAPE_ASSIGN_CHECK(*in_attrs, 0, ins[0]); SHAPE_ASSIGN_CHECK(*in_attrs, 1, ins[1]); SHAPE_ASSIGN_CHECK(*in_attrs, 3, ins[2]); SHAPE_ASSIGN_CHECK(*out_attrs, 0, dgrad[0]); mxnet::ShapeVector length = {in_attrs->at(2)}; mxnet::ShapeVector lgrad = {out_attrs->at(1)}; res = (res && ElemwiseShape<1, 1>(attrs, &length, &lgrad)); SHAPE_ASSIGN_CHECK(*in_attrs, 2, length[0]); SHAPE_ASSIGN_CHECK(*out_attrs, 1, lgrad[0]); return res; } else { return ElemwiseShape<3, 1>(attrs, in_attrs, out_attrs); } } else { return ElemwiseShape<2, 1>(attrs, in_attrs, out_attrs); } } static inline bool SoftmaxGradOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), softmax_use_length(attrs) ? 2U : 1U); if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { CHECK_EQ(in_attrs->size(), softmax_use_length(attrs) ? 4U : 3U); int in_dtype = (*in_attrs)[1]; int out_dtype = (*in_attrs)[softmax_use_length(attrs) ? 3 : 2]; TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype); TYPE_ASSIGN_CHECK(*out_attrs, 0, in_dtype); if (softmax_use_length(attrs)) { TYPE_ASSIGN_CHECK(*out_attrs, 1, in_attrs->at(2)); } return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1 && (!softmax_use_length(attrs) || ((*out_attrs)[1] != -1 && (*in_attrs)[1] != -1)); } else { CHECK_EQ(in_attrs->size(), 2U); int out_dtype = (*in_attrs)[1]; TYPE_ASSIGN_CHECK(*out_attrs, 0, out_dtype); TYPE_ASSIGN_CHECK(*in_attrs, 0, out_dtype); return (*out_attrs)[0] != -1 && (*in_attrs)[0] != -1; } } static inline std::vector<std::pair<int, int>> SoftmaxGradOpInplaceOption( const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { return std::vector<std::pair<int, int>>{{0, 0}, {1, 0}, {2, 1}, {3, 0}}; } else { return std::vector<std::pair<int, int>>{{0, 0}, {1, 0}, {2, 0}}; } } else { return std::vector<std::pair<int, int>>{{0, 0}, {1, 0}}; } } static inline uint32_t SoftmaxGradOpNumInputs(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { return softmax_use_length(attrs) ? 4 : 3; } return 2; } static inline std::vector<std::string> SoftmaxGradOpInputNames(const nnvm::NodeAttrs& attrs) { if (softmax_has_dtype_override(attrs) || softmax_use_length(attrs)) { if (softmax_use_length(attrs)) { return std::vector<std::string>{"ograd", "data", "length", "output"}; } else { return std::vector<std::string>{"ograd", "data", "output"}; } } else { return std::vector<std::string>{"ograd", "output"}; } } struct SoftmaxFGradient { const char* op_name; std::vector<nnvm::NodeEntry> operator()(const nnvm::ObjectPtr& n, const std::vector<nnvm::NodeEntry>& ograds) const { if (softmax_has_dtype_override(n->attrs) || softmax_use_length(n->attrs)) { return ElemwiseGradUseInOut{op_name}(n, ograds); // NOLINT } else { return ElemwiseGradUseOut{op_name}(n, ograds); // NOLINT } } }; static inline bool MaskedSoftmaxOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), 1); CHECK_EQ(in_attrs->size(), 2U); std::vector<int> tmp = {in_attrs->at(0)}; return ElemwiseType<1, 1>(attrs, &tmp, out_attrs); } static inline bool MaskedSoftmaxOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_shape, mxnet::ShapeVector* out_shape) { CHECK_EQ(out_shape->size(), 1U); CHECK_EQ(in_shape->size(), 2U); mxnet::TShape& data_shape = (*in_shape)[0]; mxnet::TShape& mask_shape = (*in_shape)[1]; if (!mxnet::ndim_is_known(data_shape) || !mxnet::ndim_is_known(mask_shape)) { return false; } CHECK(data_shape.ndim() == mask_shape.ndim()) << "Number of dimensions in data and mask does not match"; CHECK(data_shape.ndim() > 0) << "Empty tuple is not allowed"; for (int i = 0; i < data_shape.ndim(); ++i) { CHECK(data_shape[i] == mask_shape[i] || mask_shape[i] == 1) << "Mask cannot be broadcasted from " << mask_shape << " to " << data_shape; } SHAPE_ASSIGN_CHECK(*out_shape, 0, in_shape->at(0)); SHAPE_ASSIGN_CHECK(*in_shape, 0, out_shape->at(0)); return true; } static inline bool MaskedSoftmaxGradOpShape(const nnvm::NodeAttrs& attrs, mxnet::ShapeVector* in_shape, mxnet::ShapeVector* out_shape) { CHECK_EQ(out_shape->size(), 1U); CHECK_EQ(in_shape->size(), 3U); mxnet::TShape& ograd_shape = (*in_shape)[0]; mxnet::TShape& mask_shape = (*in_shape)[1]; if (!mxnet::ndim_is_known(ograd_shape) || !mxnet::ndim_is_known(mask_shape)) { return false; } CHECK(ograd_shape.ndim() == mask_shape.ndim()) << "Number of dimensions in data and mask does not match"; CHECK(ograd_shape.ndim() > 0) << "Empty tuple is not allowed"; for (int i = 0; i < ograd_shape.ndim(); ++i) { CHECK(ograd_shape[i] == mask_shape[i] || mask_shape[i] == 1) << "Mask cannot be broadcasted from " << mask_shape << " to " << ograd_shape; } SHAPE_ASSIGN_CHECK(*out_shape, 0, in_shape->at(0)); SHAPE_ASSIGN_CHECK(*out_shape, 0, in_shape->at(2)); SHAPE_ASSIGN_CHECK(*in_shape, 0, out_shape->at(0)); SHAPE_ASSIGN_CHECK(*in_shape, 2, out_shape->at(0)); return true; } static inline bool MaskedSoftmaxGradOpType(const nnvm::NodeAttrs& attrs, std::vector<int>* in_attrs, std::vector<int>* out_attrs) { CHECK_EQ(out_attrs->size(), 1U); CHECK_EQ(in_attrs->size(), 3U); int data_dtype = (*in_attrs)[0]; TYPE_ASSIGN_CHECK(*in_attrs, 2, data_dtype); TYPE_ASSIGN_CHECK(*out_attrs, 0, data_dtype); data_dtype = (*out_attrs)[0]; TYPE_ASSIGN_CHECK(*in_attrs, 0, data_dtype); return true; } static inline std::vector<std::pair<int, int>> MaskedSoftmaxGradOpInplaceOption( const nnvm::NodeAttrs& attrs) { return std::vector<std::pair<int, int>>{{0, 0}, {1, 0}, {2, 1}, {3, 0}}; } template <typename xpu, typename OP, bool negate = false> void SoftmaxCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (req[0] == kNullOp || inputs[0].Size() == 0U) return; CHECK_NE(req[0], kAddTo); const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true); if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) { common::LogOnce( "MXNET_SAFE_ACCUMULATION=1 is recommended for softmax with float16 inputs. " "See https://mxnet.apache.org/api/faq/env_var " "for more details."); } MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, { MSHADOW_REAL_TYPE_SWITCH( outputs[0].type_flag_, OType, { int type = kInt32; if (param.use_length.value()) { CHECK(inputs.size() > 1) << "Mask needs to be provided when using softmax with use_length=True."; type = inputs[1].type_flag_; } MXNET_INT32_INT64_TYPE_SWITCH(type, IType, { IType* mask_ptr = nullptr; if (param.use_length.value()) { mask_ptr = inputs[1].dptr<IType>(); } if (safe_acc) { if (shape.ndim() == 2) { Softmax<OP, negate, AType>(ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { Softmax<OP, negate, AType>(ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } else { if (shape.ndim() == 2) { Softmax<OP, negate, DType>(ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { Softmax<OP, negate, DType>(ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<OType>(), mask_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } }); }); }); } template <typename xpu, typename OP, bool masked_neg_inf, bool negate = false> void MaskedSoftmaxCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (req[0] == kNullOp || inputs[0].Size() == 0U) return; CHECK_NE(req[0], kAddTo); const MaskedSoftmaxParam& param = nnvm::get<MaskedSoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true); if (!safe_acc && inputs[0].type_flag_ == mshadow::kFloat16) { common::LogOnce( "MXNET_SAFE_ACCUMULATION=1 is recommended for masked_softmax with " "float16 inputs. " "See https://mxnet.apache.org/api/faq/env_var " "for more details."); } MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, { MXNET_NDIM_SWITCH(inputs[0].ndim(), ndim, { bool* mask_ptr = inputs[1].dptr<bool>(); if (safe_acc) { MaskedSoftmax<OP, masked_neg_inf, negate, AType>(ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), mask_ptr, inputs[0].shape_.get<ndim>(), inputs[1].shape_.get<ndim>(), axis, temperature, param.normalize.value(), ctx); } else { MaskedSoftmax<OP, masked_neg_inf, negate, DType>(ctx.get_stream<xpu>(), inputs[0].dptr<DType>(), outputs[0].dptr<DType>(), mask_ptr, inputs[0].shape_.get<ndim>(), inputs[1].shape_.get<ndim>(), axis, temperature, param.normalize.value(), ctx); } }); }); } #if MXNET_USE_CUDA struct SoftmaxRTCCompute { std::string OP; bool negate = false; void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs); }; struct SoftmaxRTCGradCompute { std::string OP1; std::string OP2; bool negate = false; void operator()(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs); }; #endif template <typename xpu, typename OP1, typename OP2, bool negate = false> void SoftmaxGradCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (softmax_use_length(attrs)) { MXNET_INT32_INT64_TYPE_SWITCH(inputs[2].type_flag_, IType, { if (req[1] != kNullOp) { mxnet_op::Kernel<mxnet_op::set_zero, xpu>::Launch( ctx.get_stream<xpu>(), outputs[1].Size(), outputs[1].dptr<IType>()); } }); } if (req[0] == kNullOp) return; const int itype = softmax_use_length(attrs) ? inputs[2].type_flag_ : kInt32; const SoftmaxParam& param = nnvm::get<SoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; mxnet::TShape shape = AxisShapeCompact(inputs[0].shape_, &axis, true); int out_idx = softmax_has_dtype_override(attrs) ? 2 : 1; out_idx = softmax_use_length(attrs) ? 3 : out_idx; bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true); MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, OType, AType, { MSHADOW_REAL_TYPE_SWITCH(outputs[0].type_flag_, DType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MXNET_INT32_INT64_TYPE_SWITCH(itype, IType, { IType* length_ptr = nullptr; if (softmax_use_length(attrs)) { length_ptr = inputs[2].dptr<IType>(); } if (safe_acc) { if (shape.ndim() == 2) { SoftmaxGrad<OP1, OP2, Req, negate, AType>(ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { SoftmaxGrad<OP1, OP2, Req, negate, AType>(ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } else { if (shape.ndim() == 2) { SoftmaxGrad<OP1, OP2, Req, negate, DType>(ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<2>(), axis, static_cast<DType>(temperature)); } else { SoftmaxGrad<OP1, OP2, Req, negate, DType>(ctx.get_stream<xpu>(), inputs[out_idx].dptr<OType>(), inputs[0].dptr<OType>(), outputs[0].dptr<DType>(), length_ptr, shape.get<3>(), axis, static_cast<DType>(temperature)); } } }); }); }); }); } template <typename xpu, typename OP1, typename OP2, bool negate = false> void MaskedSoftmaxGradCompute(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<TBlob>& inputs, const std::vector<OpReqType>& req, const std::vector<TBlob>& outputs) { using namespace mxnet_op; if (req[0] == kNullOp) return; const MaskedSoftmaxParam& param = nnvm::get<MaskedSoftmaxParam>(attrs.parsed); int axis = CheckAxis(param.axis, inputs[0].ndim()); const double temperature = param.temperature.has_value() ? param.temperature.value() : 1.0; bool safe_acc = dmlc::GetEnv("MXNET_SAFE_ACCUMULATION", true); MXNET_REAL_ACC_TYPE_SWITCH(inputs[0].type_flag_, DType, AType, { MXNET_ASSIGN_REQ_SWITCH(req[0], Req, { MXNET_NDIM_SWITCH(inputs[0].ndim(), ndim, { DType* ograd_ptr = inputs[0].dptr<DType>(); DType* out_ptr = inputs[2].dptr<DType>(); bool* mask_ptr = inputs[1].dptr<bool>(); DType* grad_data = outputs[0].dptr<DType>(); if (safe_acc) { MaskedSoftmaxGrad<OP1, OP2, Req, negate, AType>(ctx.get_stream<xpu>(), out_ptr, ograd_ptr, grad_data, mask_ptr, inputs[0].shape_.get<ndim>(), inputs[1].shape_.get<ndim>(), axis, static_cast<DType>(temperature), ctx); } else { MaskedSoftmaxGrad<OP1, OP2, Req, negate, DType>(ctx.get_stream<xpu>(), out_ptr, ograd_ptr, grad_data, mask_ptr, inputs[0].shape_.get<ndim>(), inputs[1].shape_.get<ndim>(), axis, static_cast<DType>(temperature), ctx); } }); }); }); } } // namespace op } // namespace mxnet namespace std { template <> struct hash<mxnet::op::SoftmaxParam> { size_t operator()(const mxnet::op::SoftmaxParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.axis); ret = dmlc::HashCombine(ret, val.temperature); ret = dmlc::HashCombine(ret, val.dtype); ret = dmlc::HashCombine(ret, val.use_length); return ret; } }; template <> struct hash<mxnet::op::MaskedSoftmaxParam> { size_t operator()(const mxnet::op::MaskedSoftmaxParam& val) { size_t ret = 0; ret = dmlc::HashCombine(ret, val.axis); ret = dmlc::HashCombine(ret, val.temperature); ret = dmlc::HashCombine(ret, val.normalize); return ret; } }; } // namespace std #endif // MXNET_OPERATOR_NN_SOFTMAX_INL_H_
dlansy.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zlansy.c, normal z -> d, Fri Sep 28 17:38:08 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" /***************************************************************************//** * * @ingroup plasma_lansy * * Returns the norm of a symmetric matrix as * * dlansy = ( max(abs(A(i,j))), NORM = PlasmaMaxNorm * ( * ( norm1(A), NORM = PlasmaOneNorm * ( * ( normI(A), NORM = PlasmaInfNorm * ( * ( normF(A), NORM = PlasmaFrobeniusNorm * * where norm1 denotes the one norm of a matrix (maximum column sum), * normI denotes the infinity norm of a matrix (maximum row sum) and * normF denotes the Frobenius norm of a matrix (square root of sum * of squares). Note that max(abs(A(i,j))) is not a consistent matrix * norm. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: Max norm * - PlasmaOneNorm: One norm * - PlasmaInfNorm: Infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The order of the matrix A. n >= 0. * * @param[in,out] A * On entry, the symmetric matrix A. * If uplo = PlasmaUpper, the leading N-by-N upper triangular part of A * contains the upper triangular part of the matrix A, and the strictly * lower triangular part of A is not referenced. * If uplo = PlasmaLower, the leading N-by-N lower triangular part of A * contains the lower triangular part of the matrix A, and the strictly * upper triangular part of A is not referenced. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,m). * ******************************************************************************* * * @retval double * The specified norm of the symmetric matrix A. * ******************************************************************************* * * @sa plasma_omp_dlansy * @sa plasma_clansy * @sa plasma_dlansy * @sa plasma_slansy * ******************************************************************************/ double plasma_dlansy(plasma_enum_t norm, plasma_enum_t uplo, int n, double *pA, int lda) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm) ) { plasma_error("illegal value of norm"); return -1; } if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); return -2; } if (n < 0) { plasma_error("illegal value of n"); return -3; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -5; } // quick return if (n == 0) return 0.0; // Tune parameters if (plasma->tuning) plasma_tune_lansy(plasma, PlasmaRealDouble, n); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; int retval; retval = plasma_desc_general_create(PlasmaRealDouble, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Allocate workspace. double *work = NULL; switch (norm) { case PlasmaMaxNorm: work = (double*)malloc((size_t)A.mt*A.nt*sizeof(double)); break; case PlasmaOneNorm: case PlasmaInfNorm: work = (double*)malloc(((size_t)A.mt*A.n+A.n)*sizeof(double)); break; case PlasmaFrobeniusNorm: work = (double*)malloc((size_t)2*A.mt*A.nt*sizeof(double)); break; } if (work == NULL) { plasma_error("malloc() failed"); return PlasmaErrorOutOfMemory; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); double value; // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_dge2desc(pA, lda, A, &sequence, &request); // Call tile async function. plasma_omp_dlansy(norm, uplo, A, work, &value, &sequence, &request); } // implicit synchronization free(work); // Free matrix in tile layout. plasma_desc_destroy(&A); // Return the norm. return value; } /***************************************************************************//** * * @ingroup plasma_lansy * * Calculates the max, one, infinity or Frobenius norm of a symmetric matrix. * Non-blocking equivalent of plasma_dlansy(). May return before the * computation is finished. Operates on matrices stored by tiles. All matrices * are passed through descriptors. All dimensions are taken from the * descriptors. Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] norm * - PlasmaMaxNorm: Max norm * - PlasmaOneNorm: One norm * - PlasmaInfNorm: Infinity norm * - PlasmaFrobeniusNorm: Frobenius norm * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] A * The descriptor of matrix A. * * @param[out] work * Workspace of size: * - PlasmaMaxNorm: A.mt*A.nt * - PlasmaOneNorm: A.mt*A.n + A.n * - PlasmaInfNorm: A.mt*A.n + A.n * - PlasmaFrobeniusNorm: 2*A.mt*A.nt * * @param[out] value * The calculated value of the norm requested. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_dlansy * @sa plasma_omp_clansy * @sa plasma_omp_dlansy * @sa plasma_omp_slansy * ******************************************************************************/ void plasma_omp_dlansy(plasma_enum_t norm, plasma_enum_t uplo, plasma_desc_t A, double *work, double *value, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((norm != PlasmaMaxNorm) && (norm != PlasmaOneNorm) && (norm != PlasmaInfNorm) && (norm != PlasmaFrobeniusNorm)) { plasma_error("illegal value of norm"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((uplo != PlasmaUpper) && (uplo != PlasmaLower)) { plasma_error("illegal value of uplo"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid descriptor A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.m == 0) { *value = 0.0; return; } // Call the parallel function. plasma_pdlansy(norm, uplo, A, work, value, sequence, request); }
Contractor.h
/* Copyright (c) 2013, Project OSRM, Dennis Luxen, others All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef CONTRACTOR_H #define CONTRACTOR_H #include "TemporaryStorage.h" #include "../DataStructures/BinaryHeap.h" #include "../DataStructures/DeallocatingVector.h" #include "../DataStructures/DynamicGraph.h" #include "../DataStructures/Percent.h" #include "../DataStructures/XORFastHash.h" #include "../DataStructures/XORFastHashStorage.h" #include "../Util/OpenMPWrapper.h" #include "../Util/SimpleLogger.h" #include "../Util/StringUtil.h" #include <boost/assert.hpp> #include <algorithm> #include <limits> #include <vector> class Contractor { private: struct ContractorEdgeData { ContractorEdgeData() : distance(0), id(0), originalEdges(0), shortcut(0), forward(0), backward(0), is_original_via_node_ID(false) { } ContractorEdgeData(unsigned _distance, unsigned _originalEdges, unsigned _id, bool _shortcut, bool _forward, bool _backward) : distance(_distance), id(_id), originalEdges(std::min((unsigned)1 << 28, _originalEdges)), shortcut(_shortcut), forward(_forward), backward(_backward), is_original_via_node_ID(false) { } unsigned distance; unsigned id; unsigned originalEdges : 28; bool shortcut : 1; bool forward : 1; bool backward : 1; bool is_original_via_node_ID : 1; } data; struct ContractorHeapData { short hop; bool target; ContractorHeapData() : hop(0), target(false) {} ContractorHeapData(short h, bool t) : hop(h), target(t) {} }; typedef DynamicGraph<ContractorEdgeData> ContractorGraph; // typedef BinaryHeap< NodeID, NodeID, int, ContractorHeapData, ArrayStorage<NodeID, NodeID> // > ContractorHeap; typedef BinaryHeap<NodeID, NodeID, int, ContractorHeapData, XORFastHashStorage<NodeID, NodeID>> ContractorHeap; typedef ContractorGraph::InputEdge ContractorEdge; struct ContractorThreadData { ContractorHeap heap; std::vector<ContractorEdge> inserted_edges; std::vector<NodeID> neighbours; ContractorThreadData(NodeID nodes) : heap(nodes) {} }; struct NodePriorityData { int depth; NodePriorityData() : depth(0) {} }; struct ContractionStats { int edges_deleted_count; int edges_added_count; int original_edges_deleted_count; int original_edges_added_count; ContractionStats() : edges_deleted_count(0), edges_added_count(0), original_edges_deleted_count(0), original_edges_added_count(0) { } }; struct RemainingNodeData { RemainingNodeData() : id(0), is_independent(false) {} NodeID id : 31; bool is_independent : 1; }; public: template <class ContainerT> Contractor(int nodes, ContainerT &input_edge_list) { std::vector<ContractorEdge> edges; edges.reserve(input_edge_list.size() * 2); temp_edge_counter = 0; auto diter = input_edge_list.dbegin(); auto dend = input_edge_list.dend(); ContractorEdge new_edge; while (diter != dend) { new_edge.source = diter->source(); new_edge.target = diter->target(); new_edge.data = ContractorEdgeData((std::max)((int)diter->weight(), 1), 1, diter->id(), false, diter->isForward(), diter->isBackward()); BOOST_ASSERT_MSG(new_edge.data.distance > 0, "edge distance < 1"); #ifndef NDEBUG if (new_edge.data.distance > 24 * 60 * 60 * 10) { SimpleLogger().Write(logWARNING) << "Edge weight large -> " << new_edge.data.distance; } #endif edges.push_back(new_edge); std::swap(new_edge.source, new_edge.target); new_edge.data.forward = diter->isBackward(); new_edge.data.backward = diter->isForward(); edges.push_back(new_edge); ++diter; } // clear input vector and trim the current set of edges with the well-known swap trick input_edge_list.clear(); sort(edges.begin(), edges.end()); NodeID edge = 0; for (NodeID i = 0; i < edges.size();) { const NodeID source = edges[i].source; const NodeID target = edges[i].target; const NodeID id = edges[i].data.id; // remove eigenloops if (source == target) { i++; continue; } ContractorEdge forward_edge; ContractorEdge reverse_edge; forward_edge.source = reverse_edge.source = source; forward_edge.target = reverse_edge.target = target; forward_edge.data.forward = reverse_edge.data.backward = true; forward_edge.data.backward = reverse_edge.data.forward = false; forward_edge.data.shortcut = reverse_edge.data.shortcut = false; forward_edge.data.id = reverse_edge.data.id = id; forward_edge.data.originalEdges = reverse_edge.data.originalEdges = 1; forward_edge.data.distance = reverse_edge.data.distance = std::numeric_limits<int>::max(); // remove parallel edges while (i < edges.size() && edges[i].source == source && edges[i].target == target) { if (edges[i].data.forward) { forward_edge.data.distance = std::min(edges[i].data.distance, forward_edge.data.distance); } if (edges[i].data.backward) { reverse_edge.data.distance = std::min(edges[i].data.distance, reverse_edge.data.distance); } ++i; } // merge edges (s,t) and (t,s) into bidirectional edge if (forward_edge.data.distance == reverse_edge.data.distance) { if ((int)forward_edge.data.distance != std::numeric_limits<int>::max()) { forward_edge.data.backward = true; edges[edge++] = forward_edge; } } else { // insert seperate edges if (((int)forward_edge.data.distance) != std::numeric_limits<int>::max()) { edges[edge++] = forward_edge; } if ((int)reverse_edge.data.distance != std::numeric_limits<int>::max()) { edges[edge++] = reverse_edge; } } } std::cout << "merged " << edges.size() - edge << " edges out of " << edges.size() << std::endl; edges.resize(edge); contractor_graph = std::make_shared<ContractorGraph>(nodes, edges); edges.clear(); edges.shrink_to_fit(); BOOST_ASSERT(0 == edges.capacity()); // unsigned maxdegree = 0; // NodeID highestNode = 0; // // for(unsigned i = 0; i < contractor_graph->GetNumberOfNodes(); ++i) { // unsigned degree = contractor_graph->EndEdges(i) - // contractor_graph->BeginEdges(i); // if(degree > maxdegree) { // maxdegree = degree; // highestNode = i; // } // } // // SimpleLogger().Write() << "edges at node with id " << highestNode << " has degree // " << maxdegree; // for(unsigned i = contractor_graph->BeginEdges(highestNode); i < // contractor_graph->EndEdges(highestNode); ++i) { // SimpleLogger().Write() << " ->(" << highestNode << "," << // contractor_graph->GetTarget(i) // << "); via: " << contractor_graph->GetEdgeData(i).via; // } // Create temporary file edge_storage_slot = TemporaryStorage::GetInstance().AllocateSlot(); std::cout << "contractor finished initalization" << std::endl; } ~Contractor() { TemporaryStorage::GetInstance().DeallocateSlot(edge_storage_slot); } void Run() { const NodeID number_of_nodes = contractor_graph->GetNumberOfNodes(); Percent p(number_of_nodes); const unsigned thread_count = omp_get_max_threads(); std::vector<ContractorThreadData *> thread_data_list; for (unsigned thread_id = 0; thread_id < thread_count; ++thread_id) { thread_data_list.push_back(new ContractorThreadData(number_of_nodes)); } std::cout << "Contractor is using " << thread_count << " threads" << std::endl; NodeID number_of_contracted_nodes = 0; std::vector<RemainingNodeData> remaining_nodes(number_of_nodes); std::vector<float> node_priorities(number_of_nodes); std::vector<NodePriorityData> node_data(number_of_nodes); // initialize priorities in parallel #pragma omp parallel for schedule(guided) for (int x = 0; x < (int)number_of_nodes; ++x) { remaining_nodes[x].id = x; } std::cout << "initializing elimination PQ ..." << std::flush; #pragma omp parallel { ContractorThreadData *data = thread_data_list[omp_get_thread_num()]; #pragma omp parallel for schedule(guided) for (int x = 0; x < (int)number_of_nodes; ++x) { node_priorities[x] = EvaluateNodePriority(data, &node_data[x], x); } } std::cout << "ok" << std::endl << "preprocessing " << number_of_nodes << " nodes ..." << std::flush; bool flushed_contractor = false; while (number_of_nodes > 2 && number_of_contracted_nodes < number_of_nodes) { if (!flushed_contractor && (number_of_contracted_nodes > (number_of_nodes * 0.65))) { DeallocatingVector<ContractorEdge> new_edge_set; // this one is not explicitely // cleared since it goes out of // scope anywa std::cout << " [flush " << number_of_contracted_nodes << " nodes] " << std::flush; // Delete old heap data to free memory that we need for the coming operations for (ContractorThreadData *data : thread_data_list) { delete data; } thread_data_list.clear(); // Create new priority array std::vector<float> new_node_priority(remaining_nodes.size()); // this map gives the old IDs from the new ones, necessary to get a consistent graph // at the end of contraction orig_node_id_to_new_id_map.resize(remaining_nodes.size()); // this map gives the new IDs from the old ones, necessary to remap targets from the // remaining graph std::vector<NodeID> new_node_id_from_orig_id_map(number_of_nodes, UINT_MAX); // build forward and backward renumbering map and remap ids in remaining_nodes and // Priorities. for (unsigned new_node_id = 0; new_node_id < remaining_nodes.size(); ++new_node_id) { // create renumbering maps in both directions orig_node_id_to_new_id_map[new_node_id] = remaining_nodes[new_node_id].id; new_node_id_from_orig_id_map[remaining_nodes[new_node_id].id] = new_node_id; new_node_priority[new_node_id] = node_priorities[remaining_nodes[new_node_id].id]; remaining_nodes[new_node_id].id = new_node_id; } TemporaryStorage &temporary_storage = TemporaryStorage::GetInstance(); // walk over all nodes for (unsigned i = 0; i < contractor_graph->GetNumberOfNodes(); ++i) { const NodeID start = i; for (auto current_edge : contractor_graph->GetAdjacentEdgeRange(start)) { ContractorGraph::EdgeData &data = contractor_graph->GetEdgeData(current_edge); const NodeID target = contractor_graph->GetTarget(current_edge); if (UINT_MAX == new_node_id_from_orig_id_map[i]) { // Save edges of this node w/o renumbering. temporary_storage.WriteToSlot( edge_storage_slot, (char *)&start, sizeof(NodeID)); temporary_storage.WriteToSlot( edge_storage_slot, (char *)&target, sizeof(NodeID)); temporary_storage.WriteToSlot(edge_storage_slot, (char *)&data, sizeof(ContractorGraph::EdgeData)); ++temp_edge_counter; } else { // node is not yet contracted. // add (renumbered) outgoing edges to new DynamicGraph. ContractorEdge new_edge; new_edge.source = new_node_id_from_orig_id_map[start]; new_edge.target = new_node_id_from_orig_id_map[target]; new_edge.data = data; new_edge.data.is_original_via_node_ID = true; BOOST_ASSERT_MSG(UINT_MAX != new_node_id_from_orig_id_map[start], "new start id not resolveable"); BOOST_ASSERT_MSG(UINT_MAX != new_node_id_from_orig_id_map[target], "new target id not resolveable"); new_edge_set.push_back(new_edge); } } } // Delete map from old NodeIDs to new ones. new_node_id_from_orig_id_map.clear(); new_node_id_from_orig_id_map.shrink_to_fit(); // Replace old priorities array by new one node_priorities.swap(new_node_priority); // Delete old node_priorities vector std::vector<float>().swap(new_node_priority); // old Graph is removed contractor_graph.reset(); // create new graph std::sort(new_edge_set.begin(), new_edge_set.end()); contractor_graph = std::make_shared<ContractorGraph>(remaining_nodes.size(), new_edge_set); new_edge_set.clear(); flushed_contractor = true; // INFO: MAKE SURE THIS IS THE LAST OPERATION OF THE FLUSH! // reinitialize heaps and ThreadData objects with appropriate size for (unsigned thread_id = 0; thread_id < thread_count; ++thread_id) { thread_data_list.push_back( new ContractorThreadData(contractor_graph->GetNumberOfNodes())); } } const int last = (int)remaining_nodes.size(); #pragma omp parallel { // determine independent node set ContractorThreadData *const data = thread_data_list[omp_get_thread_num()]; #pragma omp for schedule(guided) for (int i = 0; i < last; ++i) { const NodeID node = remaining_nodes[i].id; remaining_nodes[i].is_independent = IsNodeIndependent(node_priorities, data, node); } } const auto first = stable_partition(remaining_nodes.begin(), remaining_nodes.end(), [](RemainingNodeData node_data) { return !node_data.is_independent; }); const int first_independent_node = first - remaining_nodes.begin(); // contract independent nodes #pragma omp parallel { ContractorThreadData *data = thread_data_list[omp_get_thread_num()]; #pragma omp for schedule(guided) nowait for (int position = first_independent_node; position < last; ++position) { NodeID x = remaining_nodes[position].id; ContractNode<false>(data, x); } std::sort(data->inserted_edges.begin(), data->inserted_edges.end()); } #pragma omp parallel { ContractorThreadData *data = thread_data_list[omp_get_thread_num()]; #pragma omp for schedule(guided) nowait for (int position = first_independent_node; position < last; ++position) { NodeID x = remaining_nodes[position].id; DeleteIncomingEdges(data, x); } } // insert new edges for (unsigned thread_id = 0; thread_id < thread_count; ++thread_id) { ContractorThreadData &data = *thread_data_list[thread_id]; for (const ContractorEdge &edge : data.inserted_edges) { auto current_edge_ID = contractor_graph->FindEdge(edge.source, edge.target); if (current_edge_ID < contractor_graph->EndEdges(edge.source)) { ContractorGraph::EdgeData &current_data = contractor_graph->GetEdgeData(current_edge_ID); if (current_data.shortcut && edge.data.forward == current_data.forward && edge.data.backward == current_data.backward && edge.data.distance < current_data.distance) { // found a duplicate edge with smaller weight, update it. current_data = edge.data; continue; } } contractor_graph->InsertEdge(edge.source, edge.target, edge.data); } data.inserted_edges.clear(); } // update priorities #pragma omp parallel { ContractorThreadData *data = thread_data_list[omp_get_thread_num()]; #pragma omp for schedule(guided) nowait for (int position = first_independent_node; position < last; ++position) { NodeID x = remaining_nodes[position].id; UpdateNodeNeighbours(node_priorities, node_data, data, x); } } // remove contracted nodes from the pool number_of_contracted_nodes += last - first_independent_node; remaining_nodes.resize(first_independent_node); std::vector<RemainingNodeData>(remaining_nodes).swap(remaining_nodes); // unsigned maxdegree = 0; // unsigned avgdegree = 0; // unsigned mindegree = UINT_MAX; // unsigned quaddegree = 0; // // for(unsigned i = 0; i < remaining_nodes.size(); ++i) { // unsigned degree = contractor_graph->EndEdges(remaining_nodes[i].first) // - // contractor_graph->BeginEdges(remaining_nodes[i].first); // if(degree > maxdegree) // maxdegree = degree; // if(degree < mindegree) // mindegree = degree; // // avgdegree += degree; // quaddegree += (degree*degree); // } // // avgdegree /= std::max((unsigned)1,(unsigned)remaining_nodes.size() ); // quaddegree /= std::max((unsigned)1,(unsigned)remaining_nodes.size() ); // // SimpleLogger().Write() << "rest: " << remaining_nodes.size() << ", max: " // << maxdegree << ", min: " << mindegree << ", avg: " << avgdegree << ", // quad: " << quaddegree; p.printStatus(number_of_contracted_nodes); } for (ContractorThreadData *data : thread_data_list) { delete data; } thread_data_list.clear(); } template <class Edge> inline void GetEdges(DeallocatingVector<Edge> &edges) { Percent p(contractor_graph->GetNumberOfNodes()); SimpleLogger().Write() << "Getting edges of minimized graph"; NodeID number_of_nodes = contractor_graph->GetNumberOfNodes(); if (contractor_graph->GetNumberOfNodes()) { Edge new_edge; for (NodeID node = 0; node < number_of_nodes; ++node) { p.printStatus(node); for (auto edge : contractor_graph->GetAdjacentEdgeRange(node)) { const NodeID target = contractor_graph->GetTarget(edge); const ContractorGraph::EdgeData &data = contractor_graph->GetEdgeData(edge); if (!orig_node_id_to_new_id_map.empty()) { new_edge.source = orig_node_id_to_new_id_map[node]; new_edge.target = orig_node_id_to_new_id_map[target]; } else { new_edge.source = node; new_edge.target = target; } BOOST_ASSERT_MSG(UINT_MAX != new_edge.source, "Source id invalid"); BOOST_ASSERT_MSG(UINT_MAX != new_edge.target, "Target id invalid"); new_edge.data.distance = data.distance; new_edge.data.shortcut = data.shortcut; if (!data.is_original_via_node_ID && !orig_node_id_to_new_id_map.empty()) { new_edge.data.id = orig_node_id_to_new_id_map[data.id]; } else { new_edge.data.id = data.id; } BOOST_ASSERT_MSG(new_edge.data.id != INT_MAX, // 2^31 "edge id invalid"); new_edge.data.forward = data.forward; new_edge.data.backward = data.backward; edges.push_back(new_edge); } } } contractor_graph.reset(); orig_node_id_to_new_id_map.clear(); orig_node_id_to_new_id_map.shrink_to_fit(); BOOST_ASSERT(0 == orig_node_id_to_new_id_map.capacity()); TemporaryStorage &temporary_storage = TemporaryStorage::GetInstance(); // loads edges of graph before renumbering, no need for further numbering action. NodeID start; NodeID target; ContractorGraph::EdgeData data; Edge restored_edge; for (unsigned i = 0; i < temp_edge_counter; ++i) { temporary_storage.ReadFromSlot(edge_storage_slot, (char *)&start, sizeof(NodeID)); temporary_storage.ReadFromSlot(edge_storage_slot, (char *)&target, sizeof(NodeID)); temporary_storage.ReadFromSlot( edge_storage_slot, (char *)&data, sizeof(ContractorGraph::EdgeData)); restored_edge.source = start; restored_edge.target = target; restored_edge.data.distance = data.distance; restored_edge.data.shortcut = data.shortcut; restored_edge.data.id = data.id; restored_edge.data.forward = data.forward; restored_edge.data.backward = data.backward; edges.push_back(restored_edge); } temporary_storage.DeallocateSlot(edge_storage_slot); } private: inline void Dijkstra(const int max_distance, const unsigned number_of_targets, const int maxNodes, ContractorThreadData *const data, const NodeID middleNode) { ContractorHeap &heap = data->heap; int nodes = 0; unsigned number_of_targets_found = 0; while (heap.Size() > 0) { const NodeID node = heap.DeleteMin(); const int distance = heap.GetKey(node); const short current_hop = heap.GetData(node).hop + 1; if (++nodes > maxNodes) { return; } // Destination settled? if (distance > max_distance) { return; } if (heap.GetData(node).target) { ++number_of_targets_found; if (number_of_targets_found >= number_of_targets) { return; } } // iterate over all edges of node for (auto edge : contractor_graph->GetAdjacentEdgeRange(node)) { const ContractorEdgeData &data = contractor_graph->GetEdgeData(edge); if (!data.forward) { continue; } const NodeID to = contractor_graph->GetTarget(edge); if (middleNode == to) { continue; } const int to_distance = distance + data.distance; // New Node discovered -> Add to Heap + Node Info Storage if (!heap.WasInserted(to)) { heap.Insert(to, to_distance, ContractorHeapData(current_hop, false)); } // Found a shorter Path -> Update distance else if (to_distance < heap.GetKey(to)) { heap.DecreaseKey(to, to_distance); heap.GetData(to).hop = current_hop; } } } } inline float EvaluateNodePriority(ContractorThreadData *const data, NodePriorityData *const node_data, const NodeID node) { ContractionStats stats; // perform simulated contraction ContractNode<true>(data, node, &stats); // Result will contain the priority float result; if (0 == (stats.edges_deleted_count * stats.original_edges_deleted_count)) { result = 1 * node_data->depth; } else { result = 2 * (((float)stats.edges_added_count) / stats.edges_deleted_count) + 4 * (((float)stats.original_edges_added_count) / stats.original_edges_deleted_count) + 1 * node_data->depth; } BOOST_ASSERT(result >= 0); return result; } template <bool RUNSIMULATION> inline bool ContractNode(ContractorThreadData *data, NodeID node, ContractionStats *stats = NULL) { ContractorHeap &heap = data->heap; int inserted_edges_size = data->inserted_edges.size(); std::vector<ContractorEdge> &inserted_edges = data->inserted_edges; for (auto in_edge : contractor_graph->GetAdjacentEdgeRange(node)) { const ContractorEdgeData &in_data = contractor_graph->GetEdgeData(in_edge); const NodeID source = contractor_graph->GetTarget(in_edge); if (RUNSIMULATION) { BOOST_ASSERT(stats != NULL); ++stats->edges_deleted_count; stats->original_edges_deleted_count += in_data.originalEdges; } if (!in_data.backward) { continue; } heap.Clear(); heap.Insert(source, 0, ContractorHeapData()); int max_distance = 0; unsigned number_of_targets = 0; for (auto out_edge : contractor_graph->GetAdjacentEdgeRange(node)) { const ContractorEdgeData &out_data = contractor_graph->GetEdgeData(out_edge); if (!out_data.forward) { continue; } const NodeID target = contractor_graph->GetTarget(out_edge); const int path_distance = in_data.distance + out_data.distance; max_distance = std::max(max_distance, path_distance); if (!heap.WasInserted(target)) { heap.Insert(target, INT_MAX, ContractorHeapData(0, true)); ++number_of_targets; } } if (RUNSIMULATION) { Dijkstra(max_distance, number_of_targets, 1000, data, node); } else { Dijkstra(max_distance, number_of_targets, 2000, data, node); } for (auto out_edge : contractor_graph->GetAdjacentEdgeRange(node)) { const ContractorEdgeData &out_data = contractor_graph->GetEdgeData(out_edge); if (!out_data.forward) { continue; } const NodeID target = contractor_graph->GetTarget(out_edge); const int path_distance = in_data.distance + out_data.distance; const int distance = heap.GetKey(target); if (path_distance < distance) { if (RUNSIMULATION) { BOOST_ASSERT(stats != NULL); stats->edges_added_count += 2; stats->original_edges_added_count += 2 * (out_data.originalEdges + in_data.originalEdges); } else { ContractorEdge new_edge; new_edge.source = source; new_edge.target = target; new_edge.data = ContractorEdgeData(path_distance, out_data.originalEdges + in_data.originalEdges, node /*, 0, in_data.turnInstruction*/, true, true, false); ; inserted_edges.push_back(new_edge); std::swap(new_edge.source, new_edge.target); new_edge.data.forward = false; new_edge.data.backward = true; inserted_edges.push_back(new_edge); } } } } if (!RUNSIMULATION) { int iend = inserted_edges.size(); for (int i = inserted_edges_size; i < iend; ++i) { bool found = false; for (int other = i + 1; other < iend; ++other) { if (inserted_edges[other].source != inserted_edges[i].source) { continue; } if (inserted_edges[other].target != inserted_edges[i].target) { continue; } if (inserted_edges[other].data.distance != inserted_edges[i].data.distance) { continue; } if (inserted_edges[other].data.shortcut != inserted_edges[i].data.shortcut) { continue; } inserted_edges[other].data.forward |= inserted_edges[i].data.forward; inserted_edges[other].data.backward |= inserted_edges[i].data.backward; found = true; break; } if (!found) { inserted_edges[inserted_edges_size++] = inserted_edges[i]; } } inserted_edges.resize(inserted_edges_size); } return true; } inline void DeleteIncomingEdges(ContractorThreadData *data, const NodeID node) { std::vector<NodeID> &neighbours = data->neighbours; neighbours.clear(); // find all neighbours for (auto e : contractor_graph->GetAdjacentEdgeRange(node)) { const NodeID u = contractor_graph->GetTarget(e); if (u != node) { neighbours.push_back(u); } } // eliminate duplicate entries ( forward + backward edges ) std::sort(neighbours.begin(), neighbours.end()); neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin()); for (int i = 0, e = (int)neighbours.size(); i < e; ++i) { contractor_graph->DeleteEdgesTo(neighbours[i], node); } } inline bool UpdateNodeNeighbours(std::vector<float> &priorities, std::vector<NodePriorityData> &node_data, ContractorThreadData *const data, const NodeID node) { std::vector<NodeID> &neighbours = data->neighbours; neighbours.clear(); // find all neighbours for (auto e : contractor_graph->GetAdjacentEdgeRange(node)) { const NodeID u = contractor_graph->GetTarget(e); if (u == node) { continue; } neighbours.push_back(u); node_data[u].depth = (std::max)(node_data[node].depth + 1, node_data[u].depth); } // eliminate duplicate entries ( forward + backward edges ) std::sort(neighbours.begin(), neighbours.end()); neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin()); // re-evaluate priorities of neighboring nodes for (const NodeID u : neighbours) { priorities[u] = EvaluateNodePriority(data, &(node_data)[u], u); } return true; } inline bool IsNodeIndependent( const std::vector<float> &priorities /*, const std::vector< NodePriorityData >& node_data*/, ContractorThreadData *const data, NodeID node) const { const float priority = priorities[node]; std::vector<NodeID> &neighbours = data->neighbours; neighbours.clear(); for (auto e : contractor_graph->GetAdjacentEdgeRange(node)) { const NodeID target = contractor_graph->GetTarget(e); if (node == target) { continue; } const float target_priority = priorities[target]; BOOST_ASSERT(target_priority >= 0); // found a neighbour with lower priority? if (priority > target_priority) { return false; } // tie breaking if (std::abs(priority - target_priority) < std::numeric_limits<float>::epsilon() && bias(node, target)) { return false; } neighbours.push_back(target); } std::sort(neighbours.begin(), neighbours.end()); neighbours.resize(std::unique(neighbours.begin(), neighbours.end()) - neighbours.begin()); // examine all neighbours that are at most 2 hops away for (const NodeID u : neighbours) { for (auto e : contractor_graph->GetAdjacentEdgeRange(u)) { const NodeID target = contractor_graph->GetTarget(e); if (node == target) { continue; } const float target_priority = priorities[target]; assert(target_priority >= 0); // found a neighbour with lower priority? if (priority > target_priority) { return false; } // tie breaking if (std::abs(priority - target_priority) < std::numeric_limits<float>::epsilon() && bias(node, target)) { return false; } } } return true; } // This bias function takes up 22 assembly instructions in total on X86 inline bool bias(const NodeID a, const NodeID b) const { unsigned short hasha = fast_hash(a); unsigned short hashb = fast_hash(b); // The compiler optimizes that to conditional register flags but without branching // statements! if (hasha != hashb) { return hasha < hashb; } return a < b; } std::shared_ptr<ContractorGraph> contractor_graph; std::vector<ContractorGraph::InputEdge> contracted_edge_list; unsigned edge_storage_slot; uint64_t temp_edge_counter; std::vector<NodeID> orig_node_id_to_new_id_map; XORFastHash fast_hash; }; #endif // CONTRACTOR_H
ParallelCG.h
/* This file is part of NSEssentials. Use of this source code is granted via a BSD-style license, which can be found in License.txt in the repository root. @author Nico Schertler @author Misha Kazhdan */ #pragma once #ifdef HAVE_EIGEN #include <Eigen/Dense> // A parallel conjugate gradient solver complying to the Eigen // Sparse Solver concept. namespace nse { namespace math { template <typename Matrix> class ParallelCG { public: ParallelCG() : maxIterations(-1), m(nullptr), toleranceSq(-1) { } //Specifies the column range of the initial guess and the solution that you want to solve int solveColLowerInclusive = 0; int solveColUpperExclusive = -1; void setMaxIterations(int i) { maxIterations = i; } void setTolerance(double t) { toleranceSq = t * t; } int iterations() const { return _iterations; } void compute(const Matrix& m) { if (maxIterations == -1) maxIterations = m.rows(); if (toleranceSq == -1) toleranceSq = 1e-16; this->m = &m; //Calculate preconditioner invDiag.resize(m.rows()); for (int j = 0; j < m.outerSize(); ++j) { typename Matrix::InnerIterator it(m, j); while (it && it.index() != j) ++it; if (it && it.index() == j && it.value() != 0) invDiag(j) = 1.0f / it.value(); else invDiag(j) = 1; } } template <typename RHSType, typename SolutionType, typename Scalar = typename RHSType::Scalar> void solveWithGuess(const RHSType& rhs, const SolutionType& guess, SolutionType& solution) { Eigen::Matrix<Scalar, -1, 1> r(rhs.rows()), d(rhs.rows()), q(rhs.rows()), s(rhs.rows()); int upperCol = solveColUpperExclusive; if (upperCol < 0) upperCol = guess.cols(); assert(upperCol - solveColLowerInclusive == rhs.cols()); _iterations = 0; for (int col = solveColLowerInclusive; col < upperCol; ++col) { #pragma omp parallel for for (int i = 0; i < solution.rows(); ++i) solution.coeffRef(i, col) = guess.coeff(i, col); parallelMatrixMultiplyVector(*m, solution, col, r); Scalar rhsNormSq = 0; #pragma omp parallel for reduction( + : rhsNormSq) for (int i = 0; i < rhs.rows(); i++) { r(i) = rhs.coeff(i, col - solveColLowerInclusive) - r(i); d(i) = invDiag(i) * r(i); rhsNormSq += rhs.coeff(i, col - solveColLowerInclusive) * rhs.coeff(i, col - solveColLowerInclusive); } Scalar threshold = toleranceSq * rhsNormSq; Scalar delta_new = 0; #pragma omp parallel for reduction( + : delta_new ) for (int i = 0; i < rhs.rows(); i++) delta_new += r(i) * d(i); if (delta_new < threshold) { continue; } int it; for (it = 0; it < maxIterations && delta_new > threshold; it++) { parallelMatrixMultiplyVector(*m, d, 0, q); Scalar dDotQ = 0; #pragma omp parallel for reduction( + : dDotQ ) for (int i = 0; i < rhs.rows(); i++) dDotQ += d(i) * q(i); Scalar alpha = delta_new / dDotQ; #pragma omp parallel for for (int i = 0; i < rhs.rows(); i++) solution.coeffRef(i, col) = (typename SolutionType::Scalar)(solution.coeff(i, col) + d(i) * alpha); const int RESET_COUNT = 50; if ((it % RESET_COUNT) == (RESET_COUNT - 1)) { parallelMatrixMultiplyVector(*m, solution, col, r); #pragma omp parallel for for (int i = 0; i < rhs.rows(); i++) { r(i) = rhs.coeff(i, col - solveColLowerInclusive) - r(i); s(i) = invDiag(i) * r(i); } } else { #pragma omp parallel for for (int i = 0; i < rhs.rows(); i++) { r(i) = (typename RHSType::Scalar)(r(i) - q(i) * alpha); s(i) = invDiag(i) * r(i); } } Scalar delta_old = delta_new; delta_new = 0; #pragma omp parallel for reduction( + : delta_new ) for (int i = 0; i < rhs.rows(); i++) delta_new += r(i) * s(i); Scalar beta = delta_new / delta_old; #pragma omp parallel for for (int i = 0; i < rhs.rows(); i++) d(i) = (typename RHSType::Scalar)(s(i) + d(i) * beta); } _iterations += it; } //for every column _iterations /= upperCol - solveColLowerInclusive; } private: template <typename RHSType, typename SolutionType> void parallelMatrixMultiplyVector(const Matrix& m, const RHSType& x, int col, SolutionType& out) const { #pragma omp parallel for for (int row = 0; row < m.rows(); row++) { double accum = 0; for (typename Matrix::InnerIterator it(m, row); it; ++it) accum += it.value() * x.coeff(it.index(), col); out(row) = (typename SolutionType::Scalar)accum; } } Eigen::VectorXf invDiag; int maxIterations; double toleranceSq; const Matrix* m; int _iterations; }; } } #endif
dem_structures_coupling_utilities.h
/* * Author: Miguel Angel Celigueta * * [email protected] */ #ifndef KRATOS_STRUCTURES_DEM_COUPLING_UTILITIES_H #define KRATOS_STRUCTURES_DEM_COUPLING_UTILITIES_H // /* External includes */ // System includes // Project includes #include "includes/variables.h" /* System includes */ #include <limits> #include <iostream> #include <iomanip> /* External includes */ #ifdef _OPENMP #include <omp.h> #endif /* Project includes */ #include "includes/define.h" #include "includes/model_part.h" #include "custom_conditions/RigidFace.h" #include "custom_conditions/RigidEdge.h" #include "DEM_application_variables.h" #include "dem_structures_coupling_application_variables.h" #include "custom_elements/spheric_continuum_particle.h" namespace Kratos { class DemStructuresCouplingUtilities { public: typedef ModelPart::NodesContainerType::ContainerType::iterator NodesIteratorType; KRATOS_CLASS_POINTER_DEFINITION(DemStructuresCouplingUtilities); /// Default constructor DemStructuresCouplingUtilities(){} /// Destructor virtual ~DemStructuresCouplingUtilities(){} //*************************************************************************************************************** //*************************************************************************************************************** void TransferStructuresSkinToDem(ModelPart& r_source_model_part, ModelPart& r_destination_model_part, Properties::Pointer props) { // std::string error = CheckProvidedProperties(props); const int dimension = r_source_model_part.GetProcessInfo()[DOMAIN_SIZE]; // if (error != "all_ok") KRATOS_ERROR << "The Dem Walls ModelPart has no valid Properties. Missing " << error << " . Exiting." << std::endl; r_destination_model_part.Conditions().Sort(); int id = 1; if (r_destination_model_part.Conditions().size()) id = (r_destination_model_part.ConditionsEnd()-1)->Id() + 1; ModelPart::ConditionsContainerType& source_conditions = r_source_model_part.Conditions(); // Adding conditions for (unsigned int i = 0; i < source_conditions.size(); i++) { ModelPart::ConditionsContainerType::iterator it = r_source_model_part.ConditionsBegin() + i; Geometry< Node<3> >::Pointer p_geometry = it->pGetGeometry(); Condition::Pointer cond; if (dimension == 2) { cond = Condition::Pointer(new RigidEdge2D(id, p_geometry, props)); } else { cond = Condition::Pointer(new RigidFace3D(id, p_geometry, props)); } cond->Set(DEMFlags::STICKY, true); r_destination_model_part.AddCondition(cond); //TODO: add all of them in a single sentence! AddConditions. Use a temporary PointerVector as a list (not std::vector!). id++; } // Adding nodes r_destination_model_part.AddNodes(r_source_model_part.NodesBegin(), r_source_model_part.NodesEnd()); } std::string CheckProvidedProperties(Properties::Pointer props) { std::vector<const Variable<double>* > list_of_variables_double_to_check = {&STATIC_FRICTION, &DYNAMIC_FRICTION, &FRICTION_DECAY, &WALL_COHESION, &SEVERITY_OF_WEAR, &IMPACT_WEAR_SEVERITY, &BRINELL_HARDNESS, &YOUNG_MODULUS, &POISSON_RATIO}; std::vector<const Variable<bool>* > list_of_variables_bool_to_check = {&COMPUTE_WEAR}; for (int i=0; i<(int)list_of_variables_double_to_check.size(); i++) { if(!props->Has(*list_of_variables_double_to_check[i])) return list_of_variables_double_to_check[i]->Name(); } for (int i=0; i<(int)list_of_variables_bool_to_check.size(); i++) { if(!props->Has(*list_of_variables_bool_to_check[i])) return list_of_variables_bool_to_check[i]->Name(); } return "all_ok"; } void SmoothLoadTrasferredToFem(ModelPart& r_model_part, const double portion_of_the_force_which_is_new) { #pragma omp parallel for for (int i=0; i<(int)r_model_part.Nodes().size(); i++) { auto node_it = r_model_part.NodesBegin() + i; array_1d<double, 3> averaged_force; array_1d<double, 3>& node_dem_load = node_it->FastGetSolutionStepValue(DEM_SURFACE_LOAD); noalias(averaged_force) = portion_of_the_force_which_is_new * node_dem_load + (1.0 - portion_of_the_force_which_is_new) * node_it->FastGetSolutionStepValue(DEM_SURFACE_LOAD, 1); noalias(node_dem_load) = averaged_force; } } void ComputeSandProduction(ModelPart& dem_model_part, ModelPart& outer_walls_model_part, const double time) { const std::string sand_prod_filename = "sand_production_graph.txt"; static std::ofstream ofs_sand_prod_file; static bool first_time_entered = true; if (first_time_entered) { ofs_sand_prod_file.open(sand_prod_filename, std::ofstream::out | std::ofstream::trunc); first_time_entered = false; } ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements(); double current_total_mass_in_grams = 0.0; for (unsigned int k = 0; k < pElements.size(); k++) { ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k; Element* raw_p_element = &(*it); SphericParticle* p_sphere = dynamic_cast<SphericParticle*>(raw_p_element); if (p_sphere->Is(ISOLATED)) continue; const double particle_density = p_sphere->GetDensity(); const double particle_volume = p_sphere->CalculateVolume(); current_total_mass_in_grams += particle_volume * particle_density * 1.0e3; } static const double initial_total_mass_in_grams = current_total_mass_in_grams; const double cumulative_sand_mass_in_grams = initial_total_mass_in_grams - current_total_mass_in_grams; //ModelPart::ConditionsContainerType::iterator condition_begin = outer_walls_model_part.ConditionsBegin(); //const double face_pressure_in_psi = condition_begin->GetValue(POSITIVE_FACE_PRESSURE) * 0.000145; ProcessInfo& r_process_info = dem_model_part.GetProcessInfo(); const double Pascals_to_psi_factor = 0.000145; const double face_pressure_in_psi = fabs(r_process_info[TARGET_STRESS_Z]) * Pascals_to_psi_factor; static std::ofstream sand_prod_file("sand_production_graph.txt", std::ios_base::out | std::ios_base::app); sand_prod_file << time << " " << face_pressure_in_psi << " " << cumulative_sand_mass_in_grams << '\n'; sand_prod_file.flush(); } void MarkBrokenSpheres(ModelPart& dem_model_part) { ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements(); for (unsigned int k = 0; k < pElements.size(); k++) { ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k; Element* raw_p_element = &(*it); SphericContinuumParticle* p_sphere = dynamic_cast<SphericContinuumParticle*>(raw_p_element); if (p_sphere->Is(ISOLATED)) continue; bool go_to_next_particle = false; for (unsigned int i = 0; i < p_sphere->mContinuumInitialNeighborsSize; i++) { if (!p_sphere->mIniNeighbourFailureId[i]) { go_to_next_particle = true; break; } } if (go_to_next_particle) continue; else p_sphere->Set(ISOLATED, true); } } void ComputeSandProductionWithDepthFirstSearchNonRecursiveImplementation(ModelPart& dem_model_part, ModelPart& outer_walls_model_part, const double time) { const std::string sand_prod_filename = "sand_production_graph_with_chunks_non_recursive.txt"; static std::ofstream ofs_sand_prod_file; const std::string granulometry_distr_filename = "granulometry_distribution.txt"; static std::ofstream ofs_granulometry_distr_file; static bool first_time_entered = true; if (first_time_entered) { ofs_sand_prod_file.open(sand_prod_filename, std::ofstream::out | std::ofstream::trunc); ofs_granulometry_distr_file.open(granulometry_distr_filename, std::ofstream::out | std::ofstream::trunc); first_time_entered = false; } ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements(); std::vector<double> chunks_masses; for (unsigned int k = 0; k < pElements.size(); k++) { ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k; it->Set(VISITED, false); } std::vector<SphericContinuumParticle*> stack_of_particles_to_check; for (unsigned int k = 0; k < pElements.size(); k++) { ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k; Element* raw_p_element = &(*it); SphericContinuumParticle* p_sphere = dynamic_cast<SphericContinuumParticle*>(raw_p_element); double this_chunk_mass = 0.0; stack_of_particles_to_check.push_back(p_sphere); while (stack_of_particles_to_check.size()) { SphericContinuumParticle* current_particle = stack_of_particles_to_check.back(); stack_of_particles_to_check.pop_back(); if (current_particle->Is(VISITED)) continue; const double particle_density = current_particle->GetDensity(); const double particle_volume = current_particle->CalculateVolume(); this_chunk_mass += particle_volume * particle_density * 1.0e3; current_particle->Set(VISITED, true); for (size_t i = 0; i < current_particle->mContinuumInitialNeighborsSize; i++) { SphericParticle* p_neighbour_sphere = current_particle->mNeighbourElements[i]; if (p_neighbour_sphere == NULL) continue; if (p_neighbour_sphere->Is(VISITED)) continue; //not necessary, but saves increasing and decreasing stack_of_particles_to_check's size if (current_particle->mIniNeighbourFailureId[i]) continue; auto existing_element_it = dem_model_part.GetMesh(0).Elements().find(p_neighbour_sphere->Id()); if (existing_element_it == dem_model_part.GetMesh(0).ElementsEnd()) continue; SphericContinuumParticle* p_neigh_cont_sphere = dynamic_cast<SphericContinuumParticle*>(p_neighbour_sphere); stack_of_particles_to_check.push_back(p_neigh_cont_sphere); } } if (this_chunk_mass) chunks_masses.push_back(this_chunk_mass); } const double max_mass_of_a_single_chunck = *std::max_element(chunks_masses.begin(), chunks_masses.end()); const double current_total_mass_in_grams = max_mass_of_a_single_chunck; static const double initial_total_mass_in_grams = current_total_mass_in_grams; const double cumulative_sand_mass_in_grams = initial_total_mass_in_grams - current_total_mass_in_grams; ProcessInfo& r_process_info = dem_model_part.GetProcessInfo(); const double Pascals_to_psi_factor = 0.000145; const double face_pressure_in_psi = fabs(r_process_info[TARGET_STRESS_Z]) * Pascals_to_psi_factor; ofs_sand_prod_file << time << " " << face_pressure_in_psi << " " << cumulative_sand_mass_in_grams << '\n'; ofs_sand_prod_file.flush(); unsigned int number_of_time_steps_between_granulometry_prints = 1e9; static unsigned int printing_counter = 0; if (printing_counter == number_of_time_steps_between_granulometry_prints) { ofs_granulometry_distr_file << time; for (unsigned int k = 0; k < chunks_masses.size(); k++) ofs_granulometry_distr_file << " " << chunks_masses[k]; ofs_granulometry_distr_file << '\n'; printing_counter = 0; } printing_counter++; ofs_granulometry_distr_file.flush(); } void ComputeSandProductionWithDepthFirstSearch(ModelPart& dem_model_part, ModelPart& outer_walls_model_part, const double time) { const std::string filename = "sand_production_graph_with_chunks.txt"; std::ifstream ifile(filename.c_str()); static bool first_time_entered = true; if ((bool) ifile && first_time_entered) { std::remove(filename.c_str()); first_time_entered = false; } ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements(); std::vector<double> chunks_masses; for (unsigned int k = 0; k < pElements.size(); k++) { ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k; it->Set(VISITED, false); } for (unsigned int k = 0; k < pElements.size(); k++) { ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k; Element* raw_p_element = &(*it); SphericContinuumParticle* p_sphere = dynamic_cast<SphericContinuumParticle*>(raw_p_element); double this_chunk_mass = 0.0; if( it->IsNot(VISITED) ) { DepthFirstSearchVisit(p_sphere, this_chunk_mass); chunks_masses.push_back(this_chunk_mass); } } const double max_mass_of_a_single_chunck = *std::max_element(chunks_masses.begin(), chunks_masses.end()); const double current_total_mass_in_grams = max_mass_of_a_single_chunck; static const double initial_total_mass_in_grams = current_total_mass_in_grams; const double cumulative_sand_mass_in_grams = initial_total_mass_in_grams - current_total_mass_in_grams; ModelPart::ConditionsContainerType::iterator condition_begin = outer_walls_model_part.ConditionsBegin(); const double Pascals_to_psi_factor = 0.000145; const double face_pressure_in_psi = condition_begin->GetValue(POSITIVE_FACE_PRESSURE) * Pascals_to_psi_factor; static std::ofstream sand_prod_file(filename, std::ios_base::out | std::ios_base::app); sand_prod_file << time << " " << face_pressure_in_psi << " " << cumulative_sand_mass_in_grams << '\n'; sand_prod_file.flush(); } void DepthFirstSearchVisit(SphericContinuumParticle* p_sphere, double& this_chunk_mass) { p_sphere->Set(VISITED, true); const double particle_radius = p_sphere->GetRadius(); const double particle_density = p_sphere->GetDensity(); this_chunk_mass += (4.0/3.0) * Globals::Pi * particle_density * particle_radius * particle_radius * particle_radius * 1000.0; for (size_t i=0; i<p_sphere->mContinuumInitialNeighborsSize; i++) { SphericParticle* p_neighbour_sphere = p_sphere->mNeighbourElements[i]; if (p_neighbour_sphere==NULL) continue; if (p_sphere->mIniNeighbourFailureId[i]) continue; if (p_neighbour_sphere->IsNot(VISITED)) { SphericContinuumParticle* p_neigh_cont_sphere = dynamic_cast<SphericContinuumParticle*>(p_neighbour_sphere); DepthFirstSearchVisit(p_neigh_cont_sphere, this_chunk_mass); } } } void ComputeTriaxialSandProduction(ModelPart& dem_model_part, ModelPart& outer_walls_model_part_1, ModelPart& outer_walls_model_part_2, const double time) { const std::string filename = "sand_production_graph.txt"; std::ifstream ifile(filename.c_str()); static bool first_time_entered = true; if ((bool) ifile && first_time_entered) { std::remove(filename.c_str()); first_time_entered = false; } ModelPart::ElementsContainerType& pElements = dem_model_part.GetCommunicator().LocalMesh().Elements(); double current_total_mass_in_grams = 0.0; for (unsigned int k = 0; k < pElements.size(); k++) { ModelPart::ElementsContainerType::iterator it = pElements.ptr_begin() + k; Element* raw_p_element = &(*it); SphericParticle* p_sphere = dynamic_cast<SphericParticle*>(raw_p_element); if (p_sphere->Is(ISOLATED)) continue; const double particle_radius = p_sphere->GetRadius(); const double particle_density = p_sphere->GetDensity(); current_total_mass_in_grams += (4.0/3.0) * Globals::Pi * particle_density * particle_radius * particle_radius * particle_radius * 1000.0; } static const double initial_total_mass_in_grams = current_total_mass_in_grams; const double cumulative_sand_mass_in_grams = initial_total_mass_in_grams - current_total_mass_in_grams; ModelPart::ConditionsContainerType::iterator condition_begin_1 = outer_walls_model_part_1.ConditionsBegin(); ModelPart::ConditionsContainerType::iterator condition_begin_2 = outer_walls_model_part_2.ConditionsBegin(); const double Pascals_to_psi_factor = 0.000145; const double face_pressure_in_psi = (condition_begin_1->GetValue(POSITIVE_FACE_PRESSURE) + condition_begin_2->GetValue(POSITIVE_FACE_PRESSURE) + 3.45e6) * Pascals_to_psi_factor * 0.33333333333333; // 3.45e6 is the sigma_z constant pressure static std::ofstream sand_prod_file(filename, std::ios_base::out | std::ios_base::app); sand_prod_file << time << " " << face_pressure_in_psi << " " << cumulative_sand_mass_in_grams << '\n'; sand_prod_file.flush(); } //*************************************************************************************************************** //*************************************************************************************************************** /// Turn back information as a stemplate<class T, std::size_t dim> tring. virtual std::string Info() const { return ""; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const { } protected: private: /// Assignment operator DemStructuresCouplingUtilities & operator=(DemStructuresCouplingUtilities const& rOther); ///@} }; // Class DemStructuresCouplingUtilities } // namespace Python. #endif // KRATOS_STRUCTURES_DEM_COUPLING_UTILITIES_H
weightedNorm1.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ #include <cmath> extern "C" void FUNC(weightedNorm1)(const dlong & Nblocks, const dlong & N, const dfloat * __restrict__ cpu_w, const dfloat * __restrict__ cpu_a, dfloat * __restrict__ cpu_wa){ dfloat wa2 = 0; #ifdef __NEKRS__OMP__ #pragma omp parallel for reduction(+:wa2) #endif for(int i=0;i<N;++i){ const dfloat ai = cpu_a[i]; const dfloat wi = cpu_w[i]; wa2 += fabs(ai)*wi; } cpu_wa[0] = wa2; }
fig4.68-master.c
/* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. Copyright 2009 Sun Microsystems, Inc. All rights reserved. The contents of this file are subject to the terms of the BSD License("BSD")(the "License"). You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt The BSD License Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistribution of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistribution in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Sun Microsystems, Inc. or the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided "AS IS," without a warranty of any kind. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. You acknowledge that this software is not designed, licensed or intended for use in the design, construction, operation or maintenance of any nuclear facility. */ #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #define TRUE 1 #define FALSE 0 #else #define omp_get_thread_num() 0 #define omp_get_num_threads() 1 #endif int main() { int n = 9; int i, a, b[n]; #ifdef _OPENMP (void) omp_set_dynamic(FALSE); if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");} (void) omp_set_num_threads(4); #endif for (i=0; i<n; i++) b[i] = -1; #pragma omp parallel shared(a,b) private(i) { #pragma omp master { a = 10; printf("Master construct is executed by thread %d\n", omp_get_thread_num()); } #pragma omp barrier #pragma omp for for (i=0; i<n; i++) b[i] = a; } /*-- End of parallel region --*/ printf("After the parallel region:\n"); for (i=0; i<n; i++) printf("b[%d] = %d\n",i,b[i]); return(0); }
debug_test_system.h
// ========================================================================== // SeqAn - The Library for Sequence Analysis // ========================================================================== // Copyright (c) 2006-2015, Knut Reinert, FU Berlin // Copyright (c) 2013 NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright // notice, this list of conditions and the following disclaimer in the // documentation and/or other materials provided with the distribution. // * Neither the name of Knut Reinert or the FU Berlin nor the names of // its contributors may be used to endorse or promote products derived // from this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL KNUT REINERT OR THE FU BERLIN BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT // LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY // OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. // // ========================================================================== // Author: Manuel Holtgrewe <[email protected]> // ========================================================================== // The SeqAn testing infrastructure. Based on ideas from the OpenMS // "ClassTest.h". // ========================================================================== // TODO(holtgrew): This could use some cleanup. // SEQAN_NO_GENERATED_FORWARDS #ifndef SEQAN_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_ #define SEQAN_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_ #include <iostream> // stdout, stderr #include <iomanip> #include <cstring> // strrpos #include <cstdlib> // exit() #include <cstdio> #include <cstdarg> // va_start, va_list, va_end #include <algorithm> // min() #include <set> #include <vector> #include <string> #include <typeinfo> #ifdef PLATFORM_WINDOWS #include <Windows.h> // DeleteFile() #else // #ifdef PLATFORM_WINDOWS #include <unistd.h> // unlink() #include <sys/stat.h> // mkdir() #include <dirent.h> // DIR #if SEQAN_HAS_EXECINFO #include <execinfo.h> // backtrace(), backtrace_symbols() #endif // #if SEQAN_HAS_EXECINFO #include <cxxabi.h> // __cxa_demangle() #include <signal.h> #endif // #ifdef PLATFORM_WINDOWS // ============================================================================ // Classes // ============================================================================ // ---------------------------------------------------------------------------- // Class Demangler // ---------------------------------------------------------------------------- // Holds the name of a given C++ type T. // NOTE(esiragusa): this class could become a subclass of CStyle String... namespace seqan { template <typename T> struct Demangler { #ifdef PLATFORM_GCC char *data_begin; #else const char *data_begin; #endif Demangler() { T t; _demangle(*this, t); } Demangler(T const & t) { _demangle(*this, t); } ~Demangler() { #ifdef PLATFORM_GCC free(data_begin); #endif } }; // ============================================================================ // Functions // ============================================================================ // ---------------------------------------------------------------------------- // Function _demangle(Demangler) // ---------------------------------------------------------------------------- template <typename T> inline void _demangle(Demangler<T> & me, T const & t) { #ifdef PLATFORM_GCC int status; me.data_begin = abi::__cxa_demangle(typeid(t).name(), NULL, NULL, &status); #else me.data_begin = typeid(t).name(); #endif } // ---------------------------------------------------------------------------- // Function toCString(Demangler) // ---------------------------------------------------------------------------- template <typename T> inline const char * toCString(Demangler<T> const & me) { return me.data_begin; } } /*! * @defgroup AssertMacros Assertion and Check Macros * @brief The assertion and check macros provided by SeqAn. * * Assertions are checks performed at runtime when debugging is enabled. Debugging is enabled by defining the * preprocessor symbol <tt>SEQAN_ENABLE_DEBUG</tt> as <tt>1</tt> (the default is to set it to <tt>0</tt> if the common C * macro <tt>NDEBUG</tt> is defined and to set it to <tt>1</tt> otherwise. When using the SeqAn build system or the * CMake FindSeqAn.cmake module, this is automatically set appropriately. * * The SEQAN_CHECK and SEQAN_FAIL macro always lead to an exit of the program with a non-0 return value. */ /*! * @macro AssertMacros#SEQAN_FAIL * @headerfile <seqan/basic.h> * @brief Force abortion of program, regardless of debugging settings. * * @signature SEQAN_FAIL(msg[, args]); * * @param[in] msg A format string. * @param[in] args An optional list of arguments that are used for filling msg. * * @section Remarks * * Use this if something really unexpected happens inside your functions and there is no way to report this through the * API. A good example would be logic errors, e.g. invalid values. * * @section Examples * * In the following example, the <tt>SEQAN_FAIL</tt> is there if a possible value is added to <tt>MyEnum</tt> but the * function <tt>foo</tt> is not updated accordingly. * * @code{.cpp} * enum MyEnum * { * VALUE_ONE, * VALUE_TWO * }; * * bool foo(MyEnum x) * { * switch (x) * { * case VALUE_ONE: * // do something * return true; * case VALUE_TWO: * // do something * return true; * } * * SEQAN_FAIL("Logic error. Should never reach here. x == %d.", x); * return false; * } * @endcode */ #define SEQAN_FAIL(...) \ do { \ ::seqan::ClassTest::forceFail(__FILE__, __LINE__, \ __VA_ARGS__); \ ::seqan::ClassTest::fail(); \ } while (false) /*! * @macro AssertMacros#SEQAN_CHECK * @headerfile <seqan/basic.h> * @brief Force abortion of program if a condition is not met, regardless of debugging settings. * * @signature SEQAN_CHECK(condition, msg[, args]); * * @param[in] condition An expression that is checked. * @param[in] msg A format string. * @param[in] args An optional list of arguments. * * @section Remarks * * Use this if something really unexpected happens inside your functions and there is no way to report this through the * API. A good example would be logic errors, e.g. invalid values. * * @section Examples * * In the following example, the <tt>SEQAN_CHECK</tt> stops program execution if a value is added to <tt>MyEnum</tt> but * the function <tt>foo</tt> is not updated accordingly. * * @code{.cpp} * enum MyEnum * { * VALUE_ONE, * VALUE_TWO * }; * * bool foo(MyEnum x) * { * SEQAN_CHECK((x == VALUE_ONE || x == VALUE_TWO), "Invalid value for x == %d.", x); * * switch (x) * { * case VALUE_ONE: * // do something * return true; * case VALUE_TWO: * // do something * return true; * } * * return false; // Should never reach here, checked above with SEQAN_CHECK. * } * @endcode */ #define SEQAN_CHECK(_arg1, ...) \ do { \ if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \ (_arg1), # _arg1, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // SeqAn's has three global debug/testing levels: testing, debug and // release. Depending on the level, the SEQAN_ASSERT_* and // SEQAN_CHECKPOINT macros will be enabled. // // Note that this is independent of the <cassert> assertions and // NDEBUG being defined. // // The levels are enabled by the values of the macros // SEQAN_ENABLE_TESTING and SEQAN_ENABLE_DEBUG. By setting a macro to // 0, one disables the level and by setting the macro to 1, one // enables a level. Enabling testing also enables debug, overriding a // value of 0 for SEQAN_ENABLE_DEBUG. // // If the level is release (both the macros for debug and testing are // 0), the assertions will be disabled. If the level is debug then // the assertions will be enabled. If the level is testing then the // checkpoint macros will also be enabled. // // The default is to enable debugging but disable testing. // // You can print the current level using the function seqan::printDebugLevel(). /*! * @macro TestSystemMacros#SEQAN_ENABLE_TESTING * @headerfile <seqan/basic.h> * @brief Indicates whether testing is enabled. * * @signature SEQAN_ENABLE_TESTING * * When set to 1, testing is enabled. If it is undefined or set to 0, testing is disabled. This means the macros for * the tests (SEQAN_BEGIN_TESTSUITE, SEQAN_DEFINE_TEST, SEQAN_CALL_TEST, and SEQAN_END_TESTSUITE) will be enabled. This * makes failing assertions raise exceptions instead of calling <tt>abort()</tt> (which terminates the program). * * By default, this is set to 0. * * If you want to change this value in your C++ program code you have to define this value before including any SeqAn header! * * If set to 1 then @link TestSystemMacros#SEQAN_ENABLE_DEBUG @endlink is forced to 1 as well. * * @see TestSystemMacros#SEQAN_ENABLE_DEBUG */ // Set default for SEQAN_ENABLE_TESTING. #ifndef SEQAN_ENABLE_TESTING #define SEQAN_ENABLE_TESTING 0 #endif // #ifndef SEQAN_ENABLE_TESTING /*! * @macro TestSystemMacros#SEQAN_ENABLE_DEBUG * @headerfile <seqan/basic.h> * @brief Indicates whether debugging is enabled. * * @signature SEQAN_ENABLE_DEBUG * * When enabled (set to 1) then debugging is enabled. This means the assertion macros are expanded to actual test code. * If debugging (and testing) is disabled then the SeqAn assertion macros expand to no instructions. * * By default, thi sis set to 0 if <tt>NDEBUG</tt> is defined and set to 1 if <tt>NDEBUG</tt> is not defined. * * If you want to change this value then you have to define this value before including any SeqAn header. * * Force-enabled if SEQAN_ENABLE_TESTING is set to 1. * * @see TestSystemMacros#SEQAN_ENABLE_TESTING */ // Set default for SEQAN_ENABLE_DEBUG. #ifndef SEQAN_ENABLE_DEBUG #ifdef NDEBUG #define SEQAN_ENABLE_DEBUG 0 #else // #ifdef NDEBUG #define SEQAN_ENABLE_DEBUG 1 #endif // #ifdef NDEBUG #endif // #ifndef SEQAN_ENABLE_DEBUG // Force-enable debugging if testing is enabled. #if SEQAN_ENABLE_TESTING #undef SEQAN_ENABLE_DEBUG #define SEQAN_ENABLE_DEBUG 1 #endif // #if SEQAN_ENABLE_TESTING // Allow disabling checkpoints independent of testing. #ifndef SEQAN_ENABLE_CHECKPOINTS #define SEQAN_ENABLE_CHECKPOINTS 0 // SEQAN_ENABLE_TESTING #endif // #ifndef SEQAN_ENABLE_CHECKPOINTS /*! * @macro TestSystemMacros#SEQAN_TYPEDEF_FOR_DEBUG * @headerfile <seqan/basic.h> * @brief When using typedefs that are only used in debug mode then they have to be marked with macro. * * @signature SEQAN_TYPEDE_FOR_DEBUG * * @section Examples * * @code{.cpp} * typedef int TInt SEQAN_TYPEDEF_FOR_DEBUG; * @endcode */ #if !SEQAN_ENABLE_DEBUG #define SEQAN_TYPEDEF_FOR_DEBUG SEQAN_UNUSED #else #define SEQAN_TYPEDEF_FOR_DEBUG #endif namespace seqan { // SEQAN_CXX_FLAGS_ contains the compiler flags, SEQAN_CXX_FLAGS is a string // literal with this value. #if !defined(SEQAN_CXX_FLAGS_) #define SEQAN_CXX_FLAGS_ SEQAN_CXX_FLAGS_NOT_SET #endif // !defined(SEQAN_CXX_FLAGS__) #define SEQAN_MKSTRING_(str) # str #define SEQAN_MKSTRING(str) SEQAN_MKSTRING_(str) #define SEQAN_CXX_FLAGS SEQAN_MKSTRING(SEQAN_CXX_FLAGS_) //#undef SEQAN_MKSTRING //#undef SEQAN_MKSTRING_ /*! * @fn printDebugLevel * @headerfile <seqan/basic.h> * @brief Print the current SeqAn debug level and the compiler flags to the given stream. * * @signature void printDebugLevel(stream); * * @param[in,out] stream A std::ostream where the information about the levels are streamed to. */ template <typename TStream> void printDebugLevel(TStream & stream) { stream << "SEQAN_ENABLE_DEBUG == " << SEQAN_ENABLE_DEBUG << std::endl; stream << "SEQAN_ENABLE_TESTING == " << SEQAN_ENABLE_TESTING << std::endl; stream << "SEQAN_ENABLE_CHECKPOINTS == " << SEQAN_ENABLE_CHECKPOINTS << std::endl; stream << "SEQAN_CXX_FLAGS == \"" << SEQAN_CXX_FLAGS << "\"" << std::endl; } #if defined(PLATFORM_WINDOWS) || !SEQAN_HAS_EXECINFO template <typename TSize> void printStackTrace(TSize /*maxFrames*/) {} #else // print a demangled stack backtrace of the caller function // TODO(esiragusa): use Demangler. template <typename TSize> void printStackTrace(TSize maxFrames) { void * addrlist[256]; char temp[4096]; char addr[20]; char offset[20]; size_t size; int status; char * symname; char * demangled; std::cerr << std::endl << "stack trace:" << std::endl; int addrlist_len = backtrace(addrlist, maxFrames); char ** symbollist = backtrace_symbols(addrlist, addrlist_len); for (int i = 1; i < addrlist_len; ++i) { offset[0] = 0; addr[0] = 0; demangled = NULL; // LINUX FORMAT: // ./sam2svg [0x473b8c] // /lib/libc.so.6 [0x7f40d2526f60] // ./sam2svg(_Z2f3v+0x10) [0x47200c] // ./sam2svg(_Z2f2v+0xd) [0x472021] // ./sam2svg(main+0x1367) [0x4735fc] // /lib/libc.so.6(__libc_start_main+0xe6) [0x7f40d25131a6] // if (3 == sscanf(symbollist[i], "%*[^(](%4095[^+]+%[^)]) %s", temp, offset, addr)) { symname = temp; if (NULL != (demangled = abi::__cxa_demangle(temp, NULL, &size, &status))) { symname = demangled; } } // MAC OS X FORMAT: // 1 sam2svg 0x0000000100003a39 _ZN5seqanL28signalHandlerPrintStackTraceEi + 21 // 2 libSystem.B.dylib 0x00007fff87a6d67a _sigtramp + 26 // 3 libSystem.B.dylib 0x00007fff87a76df7 tiny_free_do_recirc_to_depot + 980 // 4 sam2svg 0x00000001000021b9 _Z2f2v + 9 // 5 sam2svg 0x00000001000034b1 main + 4546 // 6 sam2svg 0x0000000100002190 start + 52 else if (3 == sscanf(symbollist[i], "%*d %*s %s %s %*s %s", addr, temp, offset)) { symname = temp; if (NULL != (demangled = abi::__cxa_demangle(temp, NULL, &size, &status))) { symname = demangled; } } // LINUX FORMAT: // ./sam2svg [0x473b8c] // /lib/libc.so.6 [0x7f40d2526f60] else if (2 == sscanf(symbollist[i], "%s %s", temp, addr)) { symname = temp; } // DEFAULT: else { symname = symbollist[i]; } std::cerr << std::setw(3) << i - 1; std::cerr << std::setw(20) << addr; std::cerr << " " << symname; if (offset[0] != 0) std::cerr << " + " << offset; std::cerr << std::endl; free(demangled); } std::cerr << std::endl; // Only the array must be freed according to man page, not the contents. free(symbollist); } static void signalHandlerPrintStackTrace(int signum) { std::cerr << std::endl; printStackTrace(20); signal(signum, SIG_DFL); kill(getpid(), signum); } inline int _deploySignalHandlers() { signal(SIGSEGV, signalHandlerPrintStackTrace); // segfault signal(SIGFPE, signalHandlerPrintStackTrace); // divide by zero // ... return 0; } #if SEQAN_ENABLE_DEBUG // automatically deploy signal handlers that output the stack trace on a trap (in debug mode) template <typename T> struct SignalHandlersDummy_ { static const int i; }; template <typename T> const int SignalHandlersDummy_<T>::i = _deploySignalHandlers(); namespace { #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-variable" #endif // ifdef __clang__ volatile int signalHandlersDummy_ = SignalHandlersDummy_<void>::i; #ifdef __clang__ #pragma clang diagnostic pop #endif // ifdef __clang__ } #endif // #if SEQAN_ENABLE_DEBUG #endif // defined(PLATFORM_WINDOWS) || !SEQAN_HAS_EXECINFO // Namespace for the testing infrastructure. // // This namespace contains the variables and functions that are used // in the macros below to perform the tests. namespace ClassTest { // Raised when an assertion fails in test mode. struct AssertionFailedException {}; // Container for static global data for the tests. struct StaticData { // Number of tests that were run. static int & testCount() { static int result = 0; return result; } // Number of errors that occurred. static int & errorCount() { static int result = 0; return result; } // Number of skipped tests. static int & skippedCount() { static int result = 0; return result; } // Flag whether there was an error in this test. static bool & thisTestOk() { static bool result = 0; return result; } // Flag whether this test was skipped. static bool & thisTestSkipped() { static bool result = 0; return result; } // Name of the current test. static const char * & currentTestName() { const char * defaultValue = ""; static const char * result = const_cast<char *>(defaultValue); return result; } // Base path to the binary. Extrapolated from __FILE__. static char * & basePath() { const char * defaultValue = "."; static char * result = const_cast<char *>(defaultValue); return result; } static char const * _computePathToRoot() { // Get path to include. const char * file = __FILE__; int pos = -1; for (size_t i = 0; i < strlen(file) - strlen("include"); ++i) { if (strncmp(file + i, "include", strlen("include")) == 0) { pos = i; } } for (; pos > 0 && *(file + pos - 1) != '/' && *(file + pos - 1) != '\\'; --pos) continue; if (pos == -1) { std::cerr << "Could not extrapolate path to repository from __FILE__ == \"" << __FILE__ << "\"" << std::endl; exit(1); } static char buffer[1024]; strncpy(&buffer[0], file, pos); buffer[pos - 1] = '\0'; return &buffer[0]; } // Base path to the directory containing "core" and "extras." // Extrapolated from __FILE__. static char const * pathToRoot() { const char * result = 0; if (!result) result = _computePathToRoot(); return result; } // Total number of checkpoints in header file. static int & totalCheckPointCount() { static int result = 0; return result; } // Total number of checkpoints found in binary files. static int & foundCheckPointCount() { static int result = 0; return result; } // Names of temporary files as returned by tempFileName. This // global state is used to remove any existing such files // after completing the testsuite. static::std::vector<std::string> & tempFileNames() { static::std::vector<std::string> filenames; return filenames; } }; // Open a temporary file, unlink it, return posix handle. Note: This has not been tested yet. // TODO(holtgrew): Not used yet and Windows code does not work. /* inline int openTempFile() { #ifdef PLATFORM_WINDOWS char * fileName = _tempnam(NULL, "SQN"); if (!fileName) { ::std::cerr << "Cannot create a unique temporary filename" << ::std::endl; exit(1); } int result = open(fileName, _O_RDWR | OPEN_TEMPORARY); free(fileName); return result; #else // A Unix... char filenameBuffer[100]; strcpy(filenameBuffer, "/tmp/SEQANXXXXXXXXXX"); int result = mkstemp(filenameBuffer); unlink(filenameBuffer); return result; #endif // ifdef PLATFORM_WINDOWS } */ // Return the path to a temporary file, in a static buffer in this // function. This is not thread safe! inline const char * tempFileName() { static char fileNameBuffer[1000]; #ifdef PLATFORM_WINDOWS static char filePathBuffer[1000]; // Gets the temp path env string (no guarantee it's a valid path). DWORD dwRetVal = 0; dwRetVal = GetTempPath(1000, // length of the buffer filePathBuffer); // buffer for path if (dwRetVal > 1000 || (dwRetVal == 0)) { std::cerr << "GetTempPath failed" << std::endl; exit(1); } UINT uRetVal = 0; uRetVal = GetTempFileName(filePathBuffer, // directory for tmp files TEXT("SEQAN."), // temp file name prefix 0, // create unique name fileNameBuffer); // buffer for name if (uRetVal == 0) { std::cerr << "GetTempFileName failed" << std::endl; exit(1); } DeleteFile(fileNameBuffer); CreateDirectoryA(fileNameBuffer, NULL); StaticData::tempFileNames().push_back(fileNameBuffer); strcat(fileNameBuffer, "\\test_file"); return fileNameBuffer; #else // ifdef PLATFORM_WINDOWS_VS strcpy(fileNameBuffer, "/tmp/SEQAN.XXXXXXXXXXXXXXXXXXXX"); mode_t cur_umask = umask(S_IRWXO | S_IRWXG); // to silence Coverity warning int _tmp = mkstemp(fileNameBuffer); (void) _tmp; umask(cur_umask); unlink(fileNameBuffer); mkdir(fileNameBuffer, 0777); StaticData::tempFileNames().push_back(fileNameBuffer); strcat(fileNameBuffer, "/test_file"); return fileNameBuffer; #endif // ifdef PLATFORM_WINDOWS } // Initialize the testing infrastructure. // // Used through SEQAN_BEGIN_TESTSUITE(test_name) inline void beginTestSuite(const char * testSuiteName, const char * argv0) { // First things first: Print test suite name and current debug level. std::cout << "TEST SUITE " << testSuiteName << std::endl; printDebugLevel(std::cout); (void)testSuiteName; StaticData::testCount() = 0; StaticData::skippedCount() = 0; StaticData::errorCount() = 0; StaticData::totalCheckPointCount() = 0; StaticData::foundCheckPointCount() = 0; // Get path to argv0. const char * end = argv0; const char * ptr = std::min(strchr(argv0, '\\'), strchr(argv0, '/')); // On Windows, we can have both \ and /. for (; ptr != 0; ptr = std::min(strchr(ptr + 1, '\\'), strchr(ptr + 1, '/'))) end = ptr; int rpos = end - argv0; if (rpos <= 0) { StaticData::basePath() = new char[2]; strcpy(StaticData::basePath(), "."); } else { int len = rpos; StaticData::basePath() = new char[len]; strncpy(StaticData::basePath(), argv0, len); } #ifdef PLATFORM_WINDOWS_VS // Set CRT reporting such that everything goes to stderr and there are // no popups causing timeouts. _set_error_mode(_OUT_TO_STDERR); _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR); _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR); _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_FILE); _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR); #endif // PLATFORM_WINDOWS_VS } // Run test suite finalization. // // Used through SEQAN_END_TESTSUITE // // Prints a bottom banner with the error count and returns the // program's return code. inline int endTestSuite() { delete[] StaticData::basePath(); std::cout << "**************************************" << std::endl; std::cout << " Total Check Points : " << StaticData::totalCheckPointCount() << std::endl; std::cout << " Found Check Points : " << StaticData::foundCheckPointCount() << std::endl; std::cout << " Lost Check Points : " << StaticData::totalCheckPointCount() - StaticData::foundCheckPointCount() << std::endl; std::cout << "--------------------------------------" << std::endl; std::cout << " Total Tests: " << StaticData::testCount() << std::endl; std::cout << " Skipped: " << StaticData::skippedCount() << std::endl; std::cout << " Errors: " << StaticData::errorCount() << std::endl; std::cout << "**************************************" << std::endl; // TODO(holtgrew): Re-enable that all check points have to be found for the test to return 1; /* if (StaticData::totalCheckPointCount() != StaticData::foundCheckPointCount()) return 1; */ // Delete all temporary files that still exist. for (unsigned i = 0; i < StaticData::tempFileNames().size(); ++i) { #ifdef PLATFORM_WINDOWS HANDLE hFind; WIN32_FIND_DATA data; std::string temp = StaticData::tempFileNames()[i].c_str() + std::string("\\*"); hFind = FindFirstFile(temp.c_str(), &data); if (hFind != INVALID_HANDLE_VALUE) { do { std::string tempp = StaticData::tempFileNames()[i].c_str() + std::string("\\") + data.cFileName; if (strcmp(data.cFileName, ".") == 0 || strcmp(data.cFileName, "..") == 0) continue; // Skip these. if (!DeleteFile(tempp.c_str())) std::cerr << "WARNING: Could not delete file " << tempp << "\n"; } while (FindNextFile(hFind, &data)); FindClose(hFind); } if (!RemoveDirectory(StaticData::tempFileNames()[i].c_str())) std::cerr << "WARNING: Could not delete directory " << StaticData::tempFileNames()[i] << "\n"; #else // #ifdef PLATFORM_WINDOWS DIR * dpdf; struct dirent * epdf; dpdf = opendir(StaticData::tempFileNames()[i].c_str()); if (dpdf != NULL) { while ((epdf = readdir(dpdf)) != NULL) { std::string temp = StaticData::tempFileNames()[i].c_str() + std::string("/") + std::string(epdf->d_name); unlink(temp.c_str()); } } rmdir(StaticData::tempFileNames()[i].c_str()); if (closedir(dpdf) != 0) std::cerr << "WARNING: Could not delete directory " << StaticData::tempFileNames()[i] << "\n"; #endif // #ifdef PLATFORM_WINDOWS } if (StaticData::errorCount() != 0) return 1; return 0; } // Run test initialization. inline void beginTest(const char * testName) { StaticData::currentTestName() = testName; StaticData::thisTestOk() = true; StaticData::thisTestSkipped() = false; StaticData::testCount() += 1; } // Run test finalization. inline void endTest() { if (StaticData::thisTestSkipped()) { std::cout << StaticData::currentTestName() << " SKIPPED" << std::endl; } else if (StaticData::thisTestOk()) { std::cout << StaticData::currentTestName() << " OK" << std::endl; } else { std::cerr << StaticData::currentTestName() << " FAILED" << std::endl; } } // Marks the current test as "skipped". inline void skipCurrentTest() { StaticData::thisTestSkipped() = true; StaticData::skippedCount() += 1; } // Called by the macro SEQAN_ASSERT_FAIL. inline void forceFail(const char * file, int line, const char * comment, ...) { StaticData::errorCount() += 1; std::cerr << file << ":" << line << " FAILED! "; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; } // Similar to forceFail above, but accepting a va_list parameter. inline void vforceFail(const char * file, int line, const char * comment, va_list argp) { StaticData::errorCount() += 1; std::cerr << file << ":" << line << " FAILED! "; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; } // Same as forceFail above, but with comment set to 0. inline void forceFail(const char * file, int line) { forceFail(file, line, 0); } // Called by the macro SEQAN_ASSERT_EQ. // // Tests that the given two value are equal. Returns true iff the // two values are equal. template <typename T1, typename T2> bool testEqual(char const * file, int line, T1 const & value1, char const * expression1, T2 const & value2, char const * expression2, char const * comment, ...) { if (!(value1 == value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " == " << expression2 << " was: " << value1 << " != " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testEqual above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 == value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " == " << expression2 << " was: " << value1 << " != " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testEqual above, but with comment set to 0. template <typename T1, typename T2> bool testEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testEqual(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_IN_DELTA. // // Tests that the given two value are equal. Returns true iff the // two values are equal. template <typename T1, typename T2, typename T3> bool testInDelta(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const T3 & value3, const char * expression3, const char * comment, ...) { if (!(value1 >= value2 - value3 && value1 <= value2 + value3)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " in [" << expression2 << " - " << expression3 << ", " << expression2 << " + " << expression3 << "] was: " << value1 << " not in [" << value2 - value3 << ", " << value2 + value3 << "]"; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testInDelta above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2, typename T3> bool vtestInDelta(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const T3 & value3, const char * expression3, const char * comment, va_list argp) { if (!(value1 >= value2 - value3 && value1 <= value2 + value3)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " in [" << expression2 << " - " << expression3 << ", " << expression2 << " + " << expression3 << "] was: " << value1 << " not in [" << value2 - value3 << ", " << value2 + value3 << "]"; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testInDelta above, but with comment set to 0. template <typename T1, typename T2, typename T3> bool testInDelta(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const T3 & value3, const char * expression3) { return testInDelta(file, line, value1, expression1, value2, expression2, value3, expression3, 0); } // Called by the macro SEQAN_ASSERT_NEQ. // // Tests that the given two value are not equal. Returns true iff // the two values are equal. template <typename T1, typename T2> bool testNotEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 != value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " != " << expression2 << " was: " << value1 << " == " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testNotEqual above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestNotEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 != value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " != " << expression2 << " was: " << value1 << " == " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testNotEqual above, but with comment set to 0. template <typename T1, typename T2> bool testNotEqual(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testNotEqual(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_GEQ. // // Tests that the first value is greater than or equal to the // second one. Returns true iff the test yields true. template <typename T1, typename T2> bool testGeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 >= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " >= " << expression2 << " was: " << value1 << " < " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testGeq above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestGeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 >= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " >= " << expression2 << " was: " << value1 << " < " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testGeq above, but with comment set to 0. template <typename T1, typename T2> bool testGeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testGeq(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_GT. // // Tests that the first value is greater than the second one. // Returns true iff the test yields true. template <typename T1, typename T2> bool testGt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 > value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " > " << expression2 << " was: " << value1 << " <= " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testGt above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestGt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 > value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " > " << expression2 << " was: " << value1 << " <= " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testGt above, but with comment set to 0. template <typename T1, typename T2> bool testGt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testGt(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_LEQ. // // Tests that the first value is less than or equal to the second // one. Returns true iff the test yields true. template <typename T1, typename T2> bool testLeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 <= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " <= " << expression2 << " was: " << value1 << " > " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testLeq above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestLeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 <= value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " <= " << expression2 << " was: " << value1 << " > " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testLeq above, but with comment set to 0. template <typename T1, typename T2> bool testLeq(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testLeq(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT_LT. // // Tests that the first value is greater than the second one. // Returns true iff the test yields true. template <typename T1, typename T2> bool testLt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, ...) { if (!(value1 < value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " < " << expression2 << " was: " << value1 << " >= " << value2; if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testLt above, but accepts a va_list instead of variadic // parameters. template <typename T1, typename T2> bool vtestLt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2, const char * comment, va_list argp) { if (!(value1 < value2)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression1 << " < " << expression2 << " was: " << value1 << " >= " << value2; if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testLt above, but comment is 0. template <typename T1, typename T2> bool testLt(const char * file, int line, const T1 & value1, const char * expression1, const T2 & value2, const char * expression2) { return testLt(file, line, value1, expression1, value2, expression2, 0); } // Called by the macro SEQAN_ASSERT. // // Test that the given argument evaluates to true. template <typename T> bool testTrue(const char * file, int line, const T & value_, const char * expression_, const char * comment, ...) { if (!(value_)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be true but was " << (value_); if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testTrue above, but accepts a va_list instead of variadic // parameters. template <typename T> bool vtestTrue(const char * file, int line, const T & value_, const char * expression_, const char * comment, va_list argp) { if (!(value_)) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be true but was " << (value_); if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testTrue above, but comment will automatically be set to 0. template <typename T> bool testTrue(const char * file, int line, const T & value_, const char * expression_) { return testTrue(file, line, value_, expression_, 0); } // Called by the macro SEQAN_ASSERT. // // Test that the given argument evaluates to false. template <typename T> bool testFalse(const char * file, int line, const T & value_, const char * expression_, const char * comment, ...) { if (value_) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be false but was " << (value_); if (comment) { std::cerr << " ("; va_list args; va_start(args, comment); vfprintf(stderr, comment, args); va_end(args); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Similar to testFalse above, but accepts a va_list instead of variadic // parameters. template <typename T> bool vtestFalse(const char * file, int line, const T & value_, const char * expression_, const char * comment, va_list argp) { if (value_) { // Increase global error count. StaticData::thisTestOk() = false; StaticData::errorCount() += 1; // Print assertion failure text, with comment if any is given. std::cerr << file << ":" << line << " Assertion failed : " << expression_ << " should be false but was " << (value_); if (comment) { std::cerr << " ("; vfprintf(stderr, comment, argp); std::cerr << ")"; } std::cerr << std::endl; return false; } return true; } // Same as testFalse above, but comment will automatically be set to 0. template <typename T> bool testFalse(const char * file, int line, const T & value_, const char * expression_) { return testFalse(file, line, value_, expression_, 0); } // Represents a check point in a file. struct CheckPoint { // Path to the file. const char * file; // Line in the file. unsigned int line; // Less-than comparator for check points. bool operator<(const CheckPoint & other) const { int c = strcmp(file, other.file); if (c < 0) return true; if (c == 0 && line < other.line) return true; return false; } }; // Wrapper for a set of check points. // TODO(holtgrew): Simply store the set? struct CheckPointStore { static::std::set<CheckPoint> & data() { static::std::set<CheckPoint> result; return result; } }; // Puts the given check point into the CheckPointStore's data. inline bool registerCheckPoint(unsigned int line, const char * file) { const char * file_name = strrchr(file, '/'); const char * file_name_2 = strrchr(file, '\\'); if (file_name_2 > file_name) file_name = file_name_2; if (!file_name) file_name = file; else ++file_name; CheckPoint cp = {file_name, line}; #ifdef _OMP #pragma omp critical #endif // #ifdef _OMP CheckPointStore::data().insert(cp); return true; } // Test whether the given check point exists in the check point // store. inline void testCheckPoint(const char * file, unsigned int line) { StaticData::totalCheckPointCount() += 1; CheckPoint cp = {file, line}; if (CheckPointStore::data().find(cp) == CheckPointStore::data().end()) { std::cerr << file << ":" << line << " -- Check point lost." << std::endl; return; } StaticData::foundCheckPointCount() += 1; } // Verify the check points for the given file. inline void verifyCheckPoints(const char * file) { char const * file_name = strrchr(file, '/'); char const * file_name_2 = strrchr(file, '\\'); if (file_name_2 > file_name) file_name = file_name_2; if (!file_name) file_name = file; else ++file_name; int len = strlen(StaticData::pathToRoot()) + strlen("/") + strlen(file) + 1; char * absolutePath = new char[len]; absolutePath[0] = '\0'; strcat(absolutePath, StaticData::pathToRoot()); strcat(absolutePath, "/"); strcat(absolutePath, file); FILE * fl = ::std::fopen(absolutePath, "r"); delete[] absolutePath; if (!fl) { std::cerr << file << " -- verifyCheckPoints could not find this file." << std::endl; } unsigned int line_number = 1; char buf[1 << 16]; while (::std::fgets(buf, sizeof(buf), fl)) { if (::std::strstr(buf, "SEQAN_CHECKPOINT")) { testCheckPoint(file_name, line_number); } ++line_number; } ::std::fclose(fl); } #if SEQAN_ENABLE_TESTING // If in testing mode then raise an AssertionFailedException. inline void fail() { StaticData::thisTestOk() = false; printStackTrace(20); throw AssertionFailedException(); } #else // If not in testing mode then quit with an abort. inline void fail() { printStackTrace(20); abort(); } #endif // #if SEQAN_ENABLE_TESTING } // namespace ClassTest /*! * @macro TestSystemMacros#SEQAN_DEFINE_TEST * @headerfile <seqan/basic.h> * @brief Expand to test definition. * * @signature SEQAN_DEFINE_TEST(test_name) * * This macro expands to the definition of a $void$ function with <tt>SEQAN_TEST_ + test_name</tt> as its name. * * @section Example * * @code{.cpp} * SEQAN_DEFINE_TEST(test_name) * { * SEQAN_ASSERT_LT(0, 3); * } * @endcode */ // This macro expands to function header for one test. #define SEQAN_DEFINE_TEST(test_name) \ template <bool speed_up_dummy_to_prevent_compilation_of_unused_tests_> \ void SEQAN_TEST_ ## test_name() /*! * @defgroup TestSystemMacros Test System Macros * @brief Macros for the test system. */ /*! * @macro TestSystemMacros#SEQAN_BEGIN_TESTSUITE * @headerfile <seqan/basic.h> * @brief Expand to a test suite beginning. * * @signature SEQAN_BEGIN_TESTSUITE(name) * * @param[in] name The name of the test suite. * * This macro expands to a <tt>main()</tt> function and some initialization code that sets up the test system. * * @section Examples * * @code{.cpp} * #include <seqan/basic.h> * * SEQAN_BEGIN_TESTSUITE(test_foo) * { * SEQAN_CALL_TEST(test_foo_my_test); * } * SEQAN_END_TESTSUITE * @endcode */ #if SEQAN_ENABLE_TESTING // This macro expands to startup code for a test file. #define SEQAN_BEGIN_TESTSUITE(suite_name) \ int main(int argc, char ** argv) { \ (void) argc; \ ::seqan::ClassTest::beginTestSuite(# suite_name, argv[0]); /*! * @macro TestSystemMacros#SEQAN_END_TESTSUITE * @headerfile <seqan/basic.h> * @brief Expand to test suite ending. * * @signature SEQAN_END_TESTSUITE * * This macro expands to finalization code for a test suite. * * @section Examples * * @code{.cpp} * #include <seqan/basic.h> * * SEQAN_BEGIN_TESTSUITE(test_foo) * { * SEQAN_CALL_TEST(test_foo_my_test); * } * SEQAN_END_TESTSUITE * @endcode */ // This macro expands to shutdown code for a test file. #define SEQAN_END_TESTSUITE \ return ::seqan::ClassTest::endTestSuite(); \ } /*! * @macro TestSystemMacros#SEQAN_CALL_TEST * @headerfile <seqan/basic.h> * @brief Expand to calling a test. * * @signature SEQAN_CALL_TEST(test_name); * * This expects the test to be defined with SEQAN_DEFINE_TEST. This macro will expand to code that calls the code * inside a try/catch block. Use this macro within a test suite, only. * * @section Examples * * @code{.cpp} * // Within a test suite. * SEQAN_CALL_TEST(test_name); * @endcode */ // This macro expands to code to call a given test. #define SEQAN_CALL_TEST(test_name) \ do { \ seqan::ClassTest::beginTest(# test_name); \ try { \ SEQAN_TEST_ ## test_name<true>(); \ } catch (seqan::ClassTest::AssertionFailedException e) { \ /* Swallow exception, go on with next test. */ \ (void) e; /* Get rid of unused variable warning. */ \ } catch (std::exception const & e) { \ std::cerr << "Unexpected exception of type " \ << toCString(seqan::Demangler<std::exception>(e)) \ << "; message: " << e.what() << "\n"; \ seqan::ClassTest::StaticData::thisTestOk() = false; \ seqan::ClassTest::StaticData::errorCount() += 1; \ } catch (...) { \ std::cerr << "Unexpected exception of unknown type\n"; \ seqan::ClassTest::StaticData::thisTestOk() = false; \ seqan::ClassTest::StaticData::errorCount() += 1; \ } \ seqan::ClassTest::endTest(); \ } while (false) /*! * @macro TestSystemMacros#SEQAN_SKIP_TEST * @headerfile <seqan/basic.h> * @brief Force the test to return without failing and mark it as skipped. * * @signature SEQAN_SKIP_TEST; * * @section Examples * * @code{.cpp} * SEQAN_DEFINE_TEST(test_skipped) * { * SEQAN_SKIP_TEST; * } * @endcode */ // This macro returns from the current function and logs a "skipped" // event for the current test. #define SEQAN_SKIP_TEST \ do { \ ::seqan::ClassTest::skipCurrentTest(); \ return; \ } while (false) #endif // #if SEQAN_ENABLE_TESTING // variadic macros are not supported by VS 2003 and before #if !defined(_MSC_VER) || (_MSC_VER >= 1400) #if SEQAN_ENABLE_DEBUG && !defined(__CUDA_ARCH__) /*! * @macro AssertMacros#SEQAN_ASSERT * @headerfile <seqan/basic.h> * @brief Test that the given expression can be coerced to <tt>true</tt>. * * @signature SEQAN_ASSERT(expression); * @signature SEQAN_ASSERT_MSG(expression, message[, parameters]); * * @param[in] expression An expression to check for being true. * @param[in] message A format string. * @param[in] parameters An optional list of parameters. * * @section Remarks * * The main advantage of this macro is that it prints the values of its argument on failures. Note that the * <tt>operator&lt;&lt;</tt> to the type of <tt>std::cerr</tt> has to be defined for the type of both expression * parameters. Otherwise, simply use the equivalent SEQAN_ASSERT @call. * * See SEQAN_CHECK and SEQAN_FAIL for (conditionally) aborting your program regardless of debug settings. * * @section Examples * * @code{.cpp} * SEQAN_ASSERT(0); // will fail * SEQAN_ASSERT(1); // will run through * SEQAN_ASSERT_MSG(0, "message %d", 2); // Will fail with message. * @endcode */ /*! * @macro AssertMacros#SEQAN_ASSERT_NOT * @headerfile <seqan/basic.h> * @brief Test that the given expression can be coerced to <tt>false</tt>. * * @signature SEQAN_ASSERT_NOT(expression) * @signature SEQAN_ASSERT_NOT_MSG(expression, message[, parameters]) * * @param[in] expression An expression to check for being false. * @param[in] message A format string. * @param[in] parameters An optional list of parameters. * * @section Remarks * * The main advantage of this macro is that it prints the values of its argument on failures. Note that the * <tt>operator&lt;&lt;</tt> to the type of <tt>std::cerr</tt> has to be defined for the type of both expression * parameters. Otherwise, simply use the equivalent SEQAN_ASSERT call. * * See SEQAN_CHECK and SEQAN_FAIL for (conditionally) aborting your program regardless of debug settings. * * @section Examples * * @code{.cpp} * SEQAN_ASSERT_NOT(0); // will run through * SEQAN_ASSERT_NOT(1); // will fail * SEQAN_ASSERT_NOT_MSG(0, "msg %s", "test"); // will fail with message * @endcode */ /*! * @macro AssertMacros#SEQAN_ASSERT_EQ * @headerfile <seqan/basic.h> * @brief Test that two given expressions are equal, as defined by the matching call to the <tt>operator=(,)</tt>. * @signature SEQAN_ASSERT_EQ(expression1, expression2); * @signature SEQAN_ASSERT_EQ_MSG(expression1, expression2, comment[, parameters]); * * @param[in] expression1 The first expression. * @param[in] expression2 The second expression. * @param[in] comment A C-string (<tt>char const *</tt>) to use as a format string for printing a message * on failure. * @param[in] parameters An optional parameter that is put into <tt>printf()</tt> with format string * <tt>comment</tt>. * * The main advantage of this macro is that it prints the values of its argument on failures. Note that the * <tt>operator&lt;&lt;</tt> to the type of <tt>std::cerr</tt> has to be defined for the type of both expression * parameters. Otherwise, simply use the equivalent SEQAN_ASSERT call. * * See SEQAN_CHECK and SEQAN_FAIL for (conditionally) aborting your program regardless of debug settings. * * @section Examples * * @code{.cpp} * SEQAN_ASSERT_EQ(0, false); // will run through * SEQAN_ASSERT_EQ(1, false); // will fail * SEQAN_ASSERT_EQ(1, "foo"); // will not compile * SEQAN_ASSERT_EQ_MSG(1, false, "msg"); // will fail with message * @endcode */ /*! * @macro AssertMacros#SEQAN_ASSERT_NEQ * @headerfile <seqan/basic.h> * @brief Test that two given expressions are not equal, as defined by the matching call to the <tt>operator!=(,)</tt>. * * @signature SEQAN_ASSERT_NEQ(expression1, expression2); * @signature SEQAN_ASSERT_NEQ_MSG(expression1, expression2, comment[, parameters]); * * @param[in] expression1 The first expression. * @param[in] expression2 The second expression. * @param[in] comment A C-string (<tt>char const *</tt>) to use as a format string for printing a message * on failure. * @param[in] parameters An optional parameter that is put into <tt>printf()</tt> with format string * <tt>comment</tt>. * * The main advantage of this macro is that it prints the values of its argument on failures. Note that the * <tt>operator&lt;&lt;</tt> to the type of <tt>std::cerr</tt> has to be defined for the type of both expression * parameters. Otherwise, simply use the equivalent SEQAN_ASSERT call. * * See SEQAN_CHECK and SEQAN_FAIL for (conditionally) aborting your program regardless of debug settings. * * @section Examples * * @code{.cpp} * SEQAN_ASSERT_NEQ(0, false); // will fail * SEQAN_ASSERT_NEQ(1, false); // will run through * SEQAN_ASSERT_NEQ(1, "foo"); // will not compile * SEQAN_ASSERT_NEQ_MSG(1, false, "msg"); // will fail with message * @endcode */ /*! * @macro AssertMacros#SEQAN_ASSERT_LT * @headerfile <seqan/basic.h> * @brief Test that the two given expressions are in the less-than relation as defined by the matching call to * operator<(,). * * @signature SEQAN_ASSERT_LT(expression1, expression2); * @signature SEQAN_ASSERT_LT(expression1, expression2, comment[, parameters]); * * @param[in] expression1 The first expression. * @param[in] expression2 The second expression. * @param[in] comment A C-string (<tt>char const *</tt>) to use as a format string for printing a message * on failure. * @param[in] parameters An optional parameter that is put into <tt>printf()</tt> with format string * <tt>comment</tt>. * * The main advantage of this macro is that it prints the values of its argument on failures. Note that the * <tt>operator&lt;&lt;</tt> to the type of <tt>std::cerr</tt> has to be defined for the type of both expression * parameters. Otherwise, simply use the equivalent SEQAN_ASSERT call. * * See SEQAN_CHECK and SEQAN_FAIL for (conditionally) aborting your program regardless of debug settings. * * @section Examples * * @code{.cpp} * SEQAN_ASSERT_LT(0, 1); // will run through * SEQAN_ASSERT_LT(1, 1); // will not run through * SEQAN_ASSERT_LT_MSG(1, 1, "msg"); // will fail with message * @endcode */ /*! * @macro AssertMacros#SEQAN_ASSERT_LEQ * * @brief Test that the two given expressions are in the less-than-or-equal * relation as defined by the matching call to operator<=(,). * * @signature SEQAN_ASSERT_LEQ(expression1, expression2) * @signature SEQAN_ASSERT_LEQ_MSG(expression1, expression2, comment[, * parameters]) * * @param[in] expression1 The first expression. * @param[in] expression2 The second expression. * @param[in] comment A C-string (<tt>char const *</tt>) to use as a format string for printing a message * on failure. * @param[in] parameters An optional parameter that is put into <tt>printf()</tt> with format string * <tt>comment</tt>. * * The main advantage of this macro is that it prints the values of its argument * on failures. Note that the <tt>operator&lt;&lt;</tt> to the type of * <tt>std::cerr</tt> has to be defined for the type of both expression * parameters. Otherwise, simply use the equivalent SEQAN_ASSERT * call. * * See SEQAN_CHECK and SEQAN_FAIL for * (conditionally) aborting your program regardless of debug settings. * * @section Examples * * @code{.cpp} * SEQAN_ASSERT_LEQ(1, 1); // will run through * SEQAN_ASSERT_LEQ(1, 2); // will not run through * SEQAN_ASSERT_LEQ_MSG(1, 2, "msg"); // will fail with message * @endcode */ /*! * @macro AssertMacros#SEQAN_ASSERT_GT * * @brief Test that the two given expressions are in the greather-than relation * as defined by the matching call to operator>(,). * * @signature SEQAN_ASSERT_GT(expression1, expression2); * @signature SEQAN_ASSERT_GT_MSG(expression1, expression2, comment[, parameters]); * * @param[in] expression1 The first expression. * @param[in] expression2 The second expression. * @param[in] comment A C-string (<tt>char const *</tt>) to use as a format string for printing a message * on failure. * @param[in] parameters An optional parameter that is put into <tt>printf()</tt> with format string * <tt>comment</tt>. * * The main advantage of this macro is that it prints the values of its argument * on failures. Note that the <tt>operator&lt;&lt;</tt> to the type of * <tt>std::cerr</tt> has to be defined for the type of both expression * parameters. Otherwise, simply use the equivalent SEQAN_ASSERT * call. * * See SEQAN_CHECK and SEQAN_FAIL for * (conditionally) aborting your program regardless of debug settings. * * @section Examples * * @code{.cpp} * SEQAN_ASSERT_GT(2, 1); // will run through * SEQAN_ASSERT_GT(1, 1); // will not run through * SEQAN_ASSERT_GT_MSG(1, 1, "msg"); // will fail with message * @endcode */ /*! * @macro AssertMacros#SEQAN_ASSERT_GEQ * * @brief Test that the two given expressions are in the greater-than-or-equal * relation as defined by the matching call to operator>=(,). * * @signature SEQAN_ASSERT_GEQ(expression1, expression2); * @signature SEQAN_ASSERT_GEQ_MSG(expression1, expression2, comment[, parameters]); * * @param[in] expression1 The first expression. * @param[in] expression2 The second expression. * @param[in] comment A C-string (<tt>char const *</tt>) to use as a format string for printing a message * on failure. * @param[in] parameters An optional parameter that is put into <tt>printf()</tt> with format string * <tt>comment</tt>. * * The main advantage of this macro is that it prints the values of its argument on failures. Note that the * <tt>operator&lt;&lt;</tt> to the type of <tt>std::cerr</tt> has to be defined for the type of both expression * parameters. Otherwise, simply use the equivalent SEQAN_ASSERT call. * * See SEQAN_CHECK and SEQAN_FAIL for (conditionally) aborting your program regardless of debug settings. * * @section Examples * * @code{.cpp} * SEQAN_ASSERT_GEQ(1, 1); // will run through * SEQAN_ASSERT_GEQ(0, 1); // will not run through * SEQAN_ASSERT_GEQ_MSG(0, 1, "msg"); // will fail with message * @endcode */ /*! * @macro AssertMacros#SEQAN_ASSERT_IN_DELTA * * @brief Test that a value <tt>y</tt> lies within an <tt>delta</tt> environment of a value <tt>x</tt>. * * @signature SEQAN_ASSERT_IN_DELTA(x, y, delta); * @signature SEQAN_ASSERT_IN_DELTA_MSG(x, y, delta, comment[, parameters]); * * @param[in] x The value to center the environment in. * @param[in] y The value to check whether it falls within the environment. * @param[in] delta The environment size. * @param[in] comment A C-string (<tt>char const *</tt>) to use as a format string for printing a message * on failure. * @param[in] parameters An optional parameter that is put into <tt>printf()</tt> with format string * <tt>comment</tt>. * * The main advantage of this macro is that it prints the values of its argument on failures. Note that the * <tt>operator&lt;&lt;</tt> to the type of <tt>std::cerr</tt> has to be defined for the type of both expression * parameters. Otherwise, simply use the equivalent SEQAN_ASSERT call. * * See SEQAN_CHECK and SEQAN_FAIL for (conditionally) aborting your program regardless of debug settings. * * @section Examples * * @code{.cpp} * SEQAN_ASSERT_IN_DELTA(0, 0, 0.1); // will run through * SEQAN_ASSERT_IN_DELTA(1, -2, 1); // will fail * SEQAN_ASSERT_IN_DELTA(1, "foo"); // will not compile * SEQAN_ASSERT_IN_DELTA_MSG(1, 0, 0.1, "msg"); // will fail with message * @endcode */ // Force a test failure. // // Usage: SEQAN_ASSERT_FAIL("Failed at position %d", pos); #define SEQAN_ASSERT_FAIL(...) \ do { \ ::seqan::ClassTest::forceFail(__FILE__, __LINE__, \ __VA_ARGS__); \ ::seqan::ClassTest::fail(); \ } while (false) // Equality assertion without a comment. // // Usage: SEQAN_ASSERT_EQ(4, 4); #define SEQAN_ASSERT_EQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Equality assertion with a comment. // // Usage: SEQAN_ASSERT_EQ(4, 4); #define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testEqual(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // In-delta-environment assertion without a comment. // // Usage: SEQAN_ASSERT_IN_DELTA(4.1, 4, 0.1); #define SEQAN_ASSERT_IN_DELTA(_arg1, _arg2, _arg3) \ do { \ if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ (_arg3), # _arg3)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // In-delta-environment assertion witha comment. // // Usage: SEQAN_ASSERT_IN_DELTA_MSG(4.1, 4, 0.1, "3.9 <= 4.1 <= 4.1"); #define SEQAN_ASSERT_IN_DELTA_MSG(_arg1, _arg2, _arg3, ...) \ do { \ if (!::seqan::ClassTest::testInDelta(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ (_arg3), # _arg3, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Inequality assertion without a comment. // // Usage: SEQAN_ASSERT_NEQ(4, 5); #define SEQAN_ASSERT_NEQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Inequality assertion with a comment. // // Usage: SEQAN_ASSERT_NEQ(4, 5); #define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testNotEqual(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than-or-equal assertion without a comment. #define SEQAN_ASSERT_LEQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than-or-equal assertion with a comment. #define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testLeq(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than assertion without a comment. #define SEQAN_ASSERT_LT(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Less-than assertion with a comment. #define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testLt(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than-or-equal assertion without a comment. #define SEQAN_ASSERT_GEQ(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than-or-equal assertion with a comment. #define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testGeq(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than assertion without a comment. #define SEQAN_ASSERT_GT(_arg1, _arg2) \ do { \ if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Greater-than assertion with a comment. #define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) \ do { \ if (!::seqan::ClassTest::testGt(__FILE__, __LINE__, \ (_arg1), # _arg1, \ (_arg2), # _arg2, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // TODO(holtgrew): Rename to SEQAN_ASSERT once that name is free.; // Trueness assertion with a comment. // // Usage: SEQAN_ASSERT(false); #define SEQAN_ASSERT(_arg1) \ do { \ if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \ (_arg1), # _arg1)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // TODO(holtgrew): Rename to SEQAN_ASSERT once that name is free.; // Trueness assertion with a comment. #define SEQAN_ASSERT_MSG(_arg1, ...) \ do { \ if (!::seqan::ClassTest::testTrue(__FILE__, __LINE__, \ (_arg1), # _arg1, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Falseness assertion without a comment. // // Usage: SEQAN_ASSERT_NOT(false); #define SEQAN_ASSERT_NOT(_arg1) \ do { \ if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \ (_arg1), # _arg1)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) // Falseness assertion with a comment. #define SEQAN_ASSERT_NOT_MSG(_arg1, ...) \ do { \ if (!::seqan::ClassTest::testFalse(__FILE__, __LINE__, \ (_arg1), # _arg1, \ __VA_ARGS__)) { \ ::seqan::ClassTest::fail(); \ } \ } while (false) #elif SEQAN_ENABLE_DEBUG && defined(__CUDA_ARCH__) #define SEQAN_ASSERT_EQ(_arg1, _arg2) do { assert(_arg1 == _arg2); } while (false) #define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) do { assert(_arg1 == _arg2); } while (false) #define SEQAN_ASSERT_NEQ(_arg1, _arg2) do { assert(_arg1 != _arg2); } while (false) #define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) do { assert(_arg1 != _arg2); } while (false) #define SEQAN_ASSERT_LEQ(_arg1, _arg2) do { assert(_arg1 <= _arg2); } while (false) #define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) do { assert(_arg1 <= _arg2); } while (false) #define SEQAN_ASSERT_LT(_arg1, _arg2) do { assert(_arg1 < _arg2); } while (false) #define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) do { assert(_arg1 < _arg2); } while (false) #define SEQAN_ASSERT_GEQ(_arg1, _arg2) do { assert(_arg1 >= _arg2); } while (false) #define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) do { assert(_arg1 >= _arg2); } while (false) #define SEQAN_ASSERT_GT(_arg1, _arg2) do { assert(_arg1 > _arg2); } while (false) #define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) do { assert(_arg1 > _arg2); } while (false) #define SEQAN_ASSERT(_arg1) do { assert(_arg1); } while (false) #define SEQAN_ASSERT_MSG(_arg1, ...) do { assert(_arg1); } while (false) #define SEQAN_ASSERT_NOT(_arg1) do { assert(!_arg1); } while (false) #define SEQAN_ASSERT_NOT_MSG(_arg1, ...) do { assert(!_arg1); } while (false) #define SEQAN_ASSERT_FAIL(...) do { assert(false); } while (false) #else #define SEQAN_ASSERT_EQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_EQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_NEQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_NEQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_LEQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_LEQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_LT(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_LT_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_GEQ(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_GEQ_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT_GT(_arg1, _arg2) do {} while (false) #define SEQAN_ASSERT_GT_MSG(_arg1, _arg2, ...) do {} while (false) #define SEQAN_ASSERT(_arg1) do {} while (false) #define SEQAN_ASSERT_MSG(_arg1, ...) do {} while (false) #define SEQAN_ASSERT_NOT(_arg1) do {} while (false) #define SEQAN_ASSERT_NOT_MSG(_arg1, ...) do {} while (false) #define SEQAN_ASSERT_FAIL(...) do {} while (false) #endif // #if defined(SEQAN_ENABLE_DEBUG) && !defined(__CUDA_ARCH__) #else // no variadic macros #if SEQAN_ENABLE_DEBUG inline void SEQAN_ASSERT_FAIL(const char * comment, ...) { va_list args; va_start(args, comment); ::seqan::ClassTest::vforceFail("", 0, comment, args); ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3) { if (!::seqan::ClassTest::testInDelta("", 0, _arg1, "", _arg2, "", _arg3, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA_MSG(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestInDelta("", 0, _arg1, "", _arg2, "", _arg3, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_EQ(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testEqual("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_EQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestEqual("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_NEQ(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testNotEqual("", _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_NEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestNotEqual("", _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_LEQ(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testLeq("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_LEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestLeq("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_LT(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testLt("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_LT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestLt("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_GEQ(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testGeq("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_GEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestGeq("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1, typename T2> void SEQAN_ASSERT_GT(T1 const & _arg1, T2 const & _arg2) { if (!::seqan::ClassTest::testGt("", 0, _arg1, "", _arg2, "")) ::seqan::ClassTest::fail(); } template <typename T1, typename T2> void SEQAN_ASSERT_GT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestGt("", 0, _arg1, "", _arg2, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1> void SEQAN_ASSERT(T1 const & _arg1) { if (!::seqan::ClassTest::testTrue("", 0, _arg1, "")) ::seqan::ClassTest::fail(); } template <typename T1> void SEQAN_ASSERT_MSG(T1 const & _arg1, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestTrue("", 0, _arg1, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } template <typename T1> void SEQAN_ASSERT_NOT(T1 const & _arg1) { if (!::seqan::ClassTest::testFalse("", 0, _arg1, "")) ::seqan::ClassTest::fail(); } template <typename T1> void SEQAN_ASSERT_NOT_MSG(T1 const & _arg1, const char * comment, ...) { va_list args; va_start(args, comment); if (!::seqan::ClassTest::vtestFalse("", 0, _arg1, "", comment, args)) ::seqan::ClassTest::fail(); va_end(args); } #else // #if SEQAN_ENABLE_DEBUG inline void SEQAN_ASSERT_FAIL(const char * comment, ...) {} template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3) {} template <typename T1, typename T2, typename T3> void SEQAN_ASSERT_IN_DELTA_MSG(T1 const & _arg1, T2 const & _arg2, T3 const & _arg3, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_EQ(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_EQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_NEQ(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_NEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_LEQ(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_LEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_LT(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_LT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_GEQ(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_GEQ_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1, typename T2> void SEQAN_ASSERT_GT(T1 const & _arg1, T2 const & _arg2) {} template <typename T1, typename T2> void SEQAN_ASSERT_GT_MSG(T1 const & _arg1, T2 const & _arg2, const char * comment, ...) {} template <typename T1> void SEQAN_ASSERT(T1 const & _arg1) {} template <typename T1> void SEQAN_ASSERT_MSG(T1 const & _arg1, const char * comment, ...) {} template <typename T1> void SEQAN_ASSERT_NOT(T1 const & _arg1) {} template <typename T1> void SEQAN_ASSERT_NOT_MSG(T1 const & _arg1, const char * comment, ...) {} #endif // #if SEQAN_ENABLE_DEBUG #endif // no variadic macros // Returns a string (of type char*) with the path to the called binary. // // Use this to locate files relative to the test binary. #define SEQAN_PROGRAM_PATH \ ::seqan::ClassTest::StaticData::basePath() /*! * @macro SEQAN_PATH_TO_ROOT * @headerfile <seqan/basic.h> * @brief Return path to the checkout root directory. * * @signature TCharPtr SEQAN_PATH_TO_ROOT() * * @return TCharPtr <tt>char const *</tt>, string with the path to the parent directory of the tests directory. * * @section Examples * * @code{.cpp} * CharString buffer = SEQAN_PATH_TO_ROOT(); * append(buffer, "/tests/files/example.txt"); * * FILE *f = fopen(toCString(buffer), "w"); * fprintf(f, "Test Data"); * fclose(f); * @endcode * * @deprecated Unsafe. * @see getAbsolutePath * @see SEQAN_TEMP_FILENAME */ // TODO(holtgrew): Subject to change wiht restructuring. // Returns a const char * string with the path to the projects directory. #define SEQAN_PATH_TO_ROOT() \ ::seqan::ClassTest::StaticData::pathToRoot() // Returns the POSIX int file handle to an open file. // TODO(holtgrewe): Uncomment if openTempFile has been implemented. // #define SEQAN_OPEN_TEMP_FILE() (::seqan::ClassTest::openTempFile()) /*! * @macro SEQAN_TEMP_FILENAME * @headerfile <seqan/basic.h> * @brief Generates the name to a temporary file. * * @signature TCharType SEQAN_TEMP_FILENAME(); * * @return TCharType <tt>char const *</tt>, string with the path to a temporary file. * * @section Remarks * * The pointed to string is stored in a buffer and is overwritten by the next call to this macro. Copy it out if you * need it. * * @section Examples * * @code{.cpp} * const char *p = SEQAN_TEMP_FILENAME(); * buffer char tempFilename[1000]; * strcpy(tempFilename, p); * FILE *f = fopen(tempFilename, "w"); * fprintf(f, "Test Data"); * fclose(f); * @endcode * @see SEQAN_PATH_TO_ROOT */ // Returns a temporary filename. #define SEQAN_TEMP_FILENAME() (::seqan::ClassTest::tempFileName()) #if SEQAN_ENABLE_CHECKPOINTS // Create a check point at the point where the macro is placed. // TODO(holtgrew): Should be called SEQAN_CHECK_POINT to be consistent. #define SEQAN_CHECKPOINT \ ::seqan::ClassTest::registerCheckPoint(__LINE__, __FILE__); // Call the check point verification code for the given file. #define SEQAN_VERIFY_CHECKPOINTS(filename) \ ::seqan::ClassTest::verifyCheckPoints(filename) #else // #if SEQAN_ENABLE_CHECKPOINTS #define SEQAN_CHECKPOINT // If checkpoints are to be verified if testing is disabled then print // a warning. #define SEQAN_VERIFY_CHECKPOINTS(filename) \ do { \ fprintf(stderr, ("WARNING: Check point verification is " \ "disabled. Trying to verify %s from %s:%d.\n"), \ filename, __FILE__, __LINE__); \ } while (false) #endif // #if SEQAN_ENABLE_CHECKPOINTS #if !SEQAN_ENABLE_TESTING #define SEQAN_BEGIN_TESTSUITE(suite_name) \ int main(int argc, char ** argv) { \ (void) argc; \ (void) argv; \ fprintf(stderr, "Warning: SEQAN_ENABLE_TESTING is wrong and you used the macro SEQAN_BEGIN_TESTSUITE!\n"); #define SEQAN_END_TESTSUITE \ return 0; \ } #define SEQAN_CALL_TEST(test_name) do { SEQAN_TEST_ ## test_name(); } while (false) #define SEQAN_SKIP_TEST do {} while (false) #endif // #if !SEQAN_ENABLE_TESTING // ---------------------------------------------------------------------------- // Function getAbsolutePath() // ---------------------------------------------------------------------------- /*! * @fn getAbsolutePath * @headerfile <seqan/basic.h> * @brief Returns absolute path for a filename within the source repository. * * @signature std::string getAbsolutePath(const char * filename) * * @return <tt>std::string</tt>, absolute path for a filename within the source repository. */ inline std::string getAbsolutePath(const char * path) { return std::string(SEQAN_PATH_TO_ROOT()) + path; } } // namespace seqan #endif // SEQAN_INCLUDE_SEQAN_BASIC_DEBUG_TEST_SYSTEM_H_
qubo.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #include <math.h> #include <getopt.h> #include <assert.h> #ifdef PARALLEL #include <omp.h> #endif #include "normal.c" // QUBO solver // Solves QUBO problem: // Minimise sum_{i,j} Q_ij x_i x_j over choices of x_i // i,j corresponds to an edge from i to j in the "Chimera" graph C_N. // The sum is taken over both directions (i,j and j,i) and includes the diagonal terms // (configurable using option -w). // x_i can take the values statemap[0] and statemap[1] (default 0,1). // This includes the case described in section 3.2 of http://www.cs.amherst.edu/ccm/cf14-mcgeoch.pdf // // This now includes the union of all historical test code and is rather sprawling. Any // particular technique probably only requires a small subset of this program. In other // words, it could do with a clean out and separating into different programs. // // Chimera graph, C_N: // Vertices are (x,y,o,i) 0<=x,y<N, 0<=o<2, 0<=i<4 // Edge from (x,y,o,i) to (x',y',o',i') if // (x,y)=(x',y'), o!=o', OR // |x-x'|=1, y=y', o=o'=0, i=i', OR // |y-y'|=1, x=x', o=o'=1, i=i' // // x,y are the horizontal,vertical co-ords of the K4,4 // o=0..1 is the "orientation" (0=horizontally connected, 1=vertically connected) // i=0..3 is the index within the "semi-K4,4"="bigvertex" // There is an involution given by {x<->y o<->1-o} // #define NV (8*N*N) // Num vertices #define NE (8*N*(3*N-1)) // Num edges (not used) #define NBV (2*N*N) // Num "big" vertices (semi-K4,4s) #define NBE (N*(3*N-2)) // Num "big" edges (not used) #define enc(x,y,o) ((o)+((N*(x)+(y))<<1)) #define encp(x,y,o) ((x)>=0&&(x)<N&&(y)>=0&&(y)<N?enc(x,y,o):NBV) // bounds-protected version #define decx(p) (((p)>>1)/N) #define decy(p) (((p)>>1)%N) #define deco(p) ((p)&1) // encI is the same as enc but incorporates the involution x<->y, o<->1-o #define encI(inv,x,y,o) (((inv)^(o))+((N*(x)+(y)+(inv)*(N-1)*((y)-(x)))<<1)) //#define encI(inv,x,y,o) ((inv)?enc(y,x,1-(o)):enc(x,y,o)) //#define encI(inv,x,y,o) (enc(x,y,o)+(inv)*(enc(y,x,1-(o))-enc(x,y,o))) #define enc2(x,y) (N*(x)+(y)) #define enc2p(x,y) ((x)>=0&&(x)<N&&(y)>=0&&(y)<N?enc2(x,y):N*N) // bounds-protected version int (*Q)[4][7]; // Q[NBV][4][7] // Weights: Q[r][i][d] = weight of i^th vertex of r^th big vertex in direction d // Directions 0-3 corresponds to intra-K_4,4 neighbours, and // 4 = Left or Down, 5 = Right or Up, 6 = self int QC; // Centre constant = (if enabled by -c) sum of pre-shifted energy of state X and X with bipartite half flipped // Only actually constant/meaningful if Q was derived from an Ising model with no external fields int (*adj)[4][7][2]; // adj[NBV][4][7][2] // Complete adjacency list, including both directions along an edge and self-loops // adj[p][i][d]={q,j} <-> d^th neighbour of encoded vertex p, index i, is // encoded vertex q, index j // d as above int (*okv)[4]; // okv[NBV][4] list of working vertices int *XBplus; // XBplus[(N+2)*N*2] int *XBa; // XBa[NBV] // XBa[enc(x,y,o)] = State (0..15) of big vert // Allow extra space to avoid having to check for out-of-bounds accesses // (Doesn't matter that they wrap horizontally, since the weights will be 0 for these edges.) typedef short intqba;// Use int if range of values exceeds 16 bits, or use short to be more compact, cacheable intqba (*QBa)[3][16][16]; // QBa[NBV][3][16][16] // Weights for big verts (derived from Q[]) // QBa[enc(x,y,o)][d][s0][s1] = total weight from big vert (x,y,o) in state s0 // to the big vert in direction d in state s1 // d=0 is intra-K_4,4, d=1 is Left/Down, d=2 is Right/Up int (*ok)[16]; // ok[NBV+1][16] ok[enc(x,y,o)][s] = s^th allowable state in cell x,y,o (list) int *nok; // nok[NBV+1] nok[enc(x,y,o)] = number of allowable states in x,y,o // The last entry is single state entry which is used when things go outside the grid int (*ok2)[256];// ok2[N*N+1][256] ok2[enc2(x,y)][s] = s^th allowable state in K44 x,y (list) int *nok2; // nok2[N*N+1] nok2[enc2(x,y)] = number of allowable states in K44 x,y #define QB(x,y,o,d,s0,s1) (QBa[enc(x,y,o)][d][s0][s1]) #define QBI(inv,x,y,o,d,s0,s1) (QBa[encI(inv,x,y,o)][d][s0][s1])// Involution-capable addressing of QB #define XB(x,y,o) (XBa[enc(x,y,o)]) #define XBI(inv,x,y,o) (XBa[encI(inv,x,y,o)])// Involution-capable addressing of XB int N;// Size of Chimera graph int statemap[2];// statemap[0], statemap[1] are the two possible values that the state variables take int deb;// verbosity int seed,seed2; double ext; #define MAXNGP 100 int ngp;// Number of general parameters double genp[MAXNGP]={0}; // General parameters typedef long long int int64;// gcc's 64-bit type typedef long long unsigned int uint64;// gcc's 64-bit type typedef unsigned char UC; #define NTB 1024 int ps[NTB][256]; #define MIN(x,y) ((x)<(y)?(x):(y)) #define MAX(x,y) ((x)>(y)?(x):(y)) #define NTIMS 100 double lcpu[NTIMS],tcpu[NTIMS]={0}; int64 ntim[NTIMS]={0}; //#define TICK(n) {lcpu[n]=cpu();} //#define TOCK(n) {tcpu[n]+=cpu()-lcpu[n];ntim[n]++;} #define TICK(n) {} #define TOCK(n) {} // Isolate random number generator in case we need to replace it with something better void initrand(int seed){srandom(seed);} int randbit(void){return (random()>>16)&1;} int randsign(void){return randbit()*2-1;} int randnib(void){return (random()>>16)&15;} int randnum(void){return random();} int randint(int n){return random()%n;} double randfloat(void){return (random()+.5)/(RAND_MAX+1.0);} #define RANDFLOAT ((randtab[randptr++]+0.5)/(RAND_MAX+1.0)) unsigned int *randtab; int randptr,randlength; typedef struct { int emin,emax; long double *etab0,*etab,m0,m1,Q0,Q1,Q2; unsigned int *ftab0,*ftab; unsigned char (*septab0)[16][4]; // [16][16][4], static unsigned int (*septab1)[16][16]; // [NBV][16][16] signed char (*septab1a)[16][16]; // [NBV][16][16], static long double (*septab2)[16][16]; // [NBV][16][16] signed char (*septab2a)[16][16][2]; // [NBV][16][16][2], static long double (*septab3)[4][2][2]; // [NBV][4][2][2] signed char (*septab3a)[4][2][2]; // [NBV][4][2][2], static } gibbstables; // septab0[a][b][i] = (i<<2)|(a_i<<1)|b_i a_i, b_i =i^th bits of a, b // septab1[p][b][s] = Z0/(Z0+Z1) scaled to RAND_MAX, and // septab2[p][b][s] = Z0+Z1, where // t = temperature number (i.e., using beta=be[t]) // p = big vertex = (x,y,o) say // b = state of (x,y,1-o) (b=0,...,15) // s = i<<2|(j<<1)|k, (i=0,1,2,3, j=0,1, k=0,1) as from septab0 // Z_l = Z-value arising from (x-1,y,0,i)=j, (x,y,0,i)=l, (x+1,y,0,i)=k, (x,y,1)=b (mutatis mutandis if o=1) // It evaluates all edges from (x,y,o,i), including its self-edge and (x,y,1-o,i)'s self-edge (but none others) // septab2a[p][b][s][l] = W_l (a small integer), where exp(-beta*W_l)=etab[W_l]=Z_l and p,b,s,l are as in septab2 // (W_l is beta-independent, which makes septab2a much more compact than septab2) // septab3[p][i][j][k] = exp(-be[t]*(-J_{pq}-J_{qp})*statemap[j]*statemap[k]), where // p=(x,y,0,i), q=(x,y+1,0,i) (mutatis mutandis if o=1) int septab1a_compact,septab2a_compact,septab3a_compact; double cpu(){return clock()/(double)CLOCKS_PER_SEC;} void prtimes(void){ int i; for(i=0;i<NTIMS;i++)if(ntim[i])printf("Time %3d %12lld %10.2f %12g\n",i,ntim[i],tcpu[i],tcpu[i]/ntim[i]); } void initrandtab(int length){ int i; randtab=(unsigned int*)malloc(length*sizeof(unsigned int)); if(!randtab){fprintf(stderr,"Couldn't allocate randtab of length %d\n",length);exit(1);} randptr=0;randlength=length; for(i=0;i<length;i++)randtab[i]=randnum(); } void initgraph(int wn){ int d,i,j,o,p,t,u,x,y,z; for(p=0;p<NBV;p++)for(i=0;i<4;i++)for(d=0;d<7;d++)adj[p][i][d][0]=adj[p][i][d][1]=-1;// Set "non-existent" flag for(x=0;x<N;x++)for(y=0;y<N;y++)for(o=0;o<2;o++){ p=enc(x,y,o); for(i=0;i<4;i++){ for(j=0;j<4;j++){adj[p][i][j][0]=enc(x,y,1-o);adj[p][i][j][1]=j;} z=o?y:x; if(z>0){adj[p][i][4][0]=enc(x-1+o,y-o,o);adj[p][i][4][1]=i;} if(z<N-1){adj[p][i][5][0]=enc(x+1-o,y+o,o);adj[p][i][5][1]=i;} adj[p][i][6][0]=p;adj[p][i][6][1]=i; } } // Choose random subset of size wn to be the working nodes t=wn;u=NV; for(p=0;p<NBV;p++)for(i=0;i<4;i++){okv[p][i]=(randint(u)<t);t-=okv[p][i];u--;} } void getbigweights1(void){// Get derived weights on "big graph" QB[] from Q[] // Optimised version of getbigweights() // This (messier) version is just here to show that the setup time can be more-or-less negligible. // Could be faster if we optimised for the case that statemap={0,1} or {-1,1}, but it's fast enough for now. int i,j,o,p,q,v,x,y,po,s0,s1,x0,x1,x00,x0d,dd,dd2; memset(QBa,0,NBV*3*16*16*sizeof(intqba)); x0=statemap[0];x1=statemap[1]; x00=x0*x0;x0d=x0*(x1-x0);dd=(x1-x0)*(x1-x0);dd2=x1*x1-x0*x0; for(x=0;x<N;x++)for(y=0;y<N;y++){ intqba (*QBal)[16],vv[16][16]; p=enc(x,y,0);po=enc(x,y,1); for(i=0,v=0;i<4;i++)for(j=0;j<4;j++){vv[i][j]=Q[p][i][j]+Q[po][j][i];v+=vv[i][j];} for(i=0;i<4;i++)v+=Q[p][i][6]+Q[po][i][6]; v*=x00; QBal=QBa[p][0]; QBal[0][0]=v; for(i=0;i<4;i++){ QBal[1<<i][0]=v+(vv[i][0]+vv[i][1]+vv[i][2]+vv[i][3])*x0d+Q[p][i][6]*dd2; QBal[0][1<<i]=v+(vv[0][i]+vv[1][i]+vv[2][i]+vv[3][i])*x0d+Q[po][i][6]*dd2; } for(i=1;i<4;i++)for(s0=(1<<i)+1;s0<(1<<(i+1));s0++){ QBal[0][s0]=QBal[0][1<<i]+QBal[0][s0-(1<<i)]-v; QBal[s0][0]=QBal[1<<i][0]+QBal[s0-(1<<i)][0]-v; } for(i=0;i<4;i++)for(j=0;j<4;j++)QBal[1<<i][1<<j]=QBal[1<<i][0]+QBal[0][1<<j]-v+dd*vv[i][j]; for(i=0;i<4;i++)for(s0=(1<<i);s0<(1<<(i+1));s0++){ for(j=0;j<4;j++){ QBal[s0][1<<j]=QBal[1<<i][1<<j]+QBal[s0-(1<<i)][1<<j]-QBal[0][1<<j]; for(s1=(1<<j)+1;s1<(1<<(j+1));s1++){ QBal[s0][s1]=QBal[s0][1<<j]+QBal[s0][s1-(1<<j)]-QBal[s0][0]; } } } for(o=0;o<2;o++)if((o?y:x)<N-1){ int aa[16],oo[16]; p=enc(x,y,o); q=enc(x+1-o,y+o,o); for(i=0,v=0;i<4;i++)v+=Q[p][i][5]+Q[q][i][4]; v*=x00; aa[0]=0;oo[0]=v; for(i=0;i<4;i++){ aa[1<<i]=(Q[p][i][5]+Q[q][i][4])*x1*(x1-x0); oo[1<<i]=v+(Q[p][i][5]+Q[q][i][4])*x0*(x1-x0); for(s0=(1<<i)+1;s0<(1<<(i+1));s0++){ aa[s0]=aa[1<<i]+aa[s0-(1<<i)]; oo[s0]=oo[1<<i]+oo[s0-(1<<i)]-v; } } for(s0=0;s0<16;s0++)for(s1=0;s1<16;s1++)QBa[p][2][s0][s1]=aa[s0&s1]+oo[s0|s1]; } }// x,y for(x=0;x<N;x++)for(y=0;y<N;y++){ if(x<N-1)memcpy(QBa[enc(x+1,y,0)][1],QBa[enc(x,y,0)][2],256*sizeof(intqba)); if(y<N-1)memcpy(QBa[enc(x,y+1,1)][1],QBa[enc(x,y,1)][2],256*sizeof(intqba)); p=enc(x,y,1);q=enc(x,y,0); for(s0=0;s0<16;s0++)for(s1=0;s1<16;s1++)QBa[p][0][s0][s1]=QBa[q][0][s1][s0]; } } void getbigweights(void){// Get derived weights on "big graph" QB[] from Q[] // Intended so that the energy is calculated by summing over each big-edge exactly once, // not forwards and backwards. See val() below. // That means that the off-diagonal bit of Q[][] has to be replaced by Q+Q^T, not // (1/2)(Q+Q^T) as would happen if you later intended to sum over both big-edge directions. // The self-loops are incorporated (once) into the intra-K_4,4 terms, QB(*,*,*,0,*,*). int d,i,j,k,o,p,q,x,y,po,s0,s1,x0,x1; getbigweights1();return;// Call equivalent optimised version and return. // Simple version below retained because it makes it clearer what is going on. for(x=0;x<N;x++)for(y=0;y<N;y++)for(o=0;o<2;o++)for(s0=0;s0<16;s0++)for(s1=0;s1<16;s1++){ for(k=0;k<3;k++)QB(x,y,o,k,s0,s1)=0; p=enc(x,y,o);po=enc(x,y,1-o); for(i=0;i<4;i++)for(d=0;d<7;d++){ q=adj[p][i][d][0];j=adj[p][i][d][1]; if(q>=0){ x0=statemap[(s0>>i)&1];x1=statemap[(s1>>j)&1]; if(d<4)QB(x,y,o,0,s0,s1)+=(Q[p][i][j]+Q[po][j][i])*x0*x1; if(d==6)QB(x,y,o,0,s0,s1)+=Q[p][i][6]*x0*x0+Q[po][j][6]*x1*x1; if(d==4)QB(x,y,o,1,s0,s1)+=(Q[p][i][4]+Q[q][j][5])*x0*x1; if(d==5)QB(x,y,o,2,s0,s1)+=(Q[p][i][5]+Q[q][j][4])*x0*x1; } } } } int val(void){// Calculate value (energy) int v,x,y; v=-((QC+1)>>1); for(x=0;x<N;x++)for(y=0;y<N;y++){ v+=QB(x,y,0,0,XB(x,y,0),XB(x,y,1)); v+=QB(x,y,0,2,XB(x,y,0),XB(x+1,y,0)); v+=QB(x,y,1,2,XB(x,y,1),XB(x,y+1,1)); } return v; } int centreconst(void){ int f,o,v,x,y; for(f=v=0;f<2;f++){ for(x=0;x<N;x++)for(y=0;y<N;y++)for(o=0;o<2;o++)XB(x,y,o)=15*f*((x+y+o)&1); v+=val(); } return v; } int stripval(int d,int c0,int c1){ // If d=0, get value of columns c0..(c1-1), not including external edges // If d=1 then same for rows c0..(c1-1) int v,x,y; v=0; for(x=c0;x<c1;x++)for(y=0;y<N;y++){ v+=QBI(d,x,y,0,0,XBI(d,x,y,0),XBI(d,x,y,1)); if(x<c1-1)v+=QBI(d,x,y,0,2,XBI(d,x,y,0),XBI(d,x+1,y,0)); v+=QBI(d,x,y,1,2,XBI(d,x,y,1),XBI(d,x,y+1,1)); } return v; } void initweights(int weightmode,int centreflag){// Randomly initialise a symmetric weight matrix // weightmode // 0 All of Q_ij independently +/-1 // 1 As 0, but diagonal not allowed // 2 Upper triangular // 3 All of Q_ij allowed, but constrained symmetric // 4 Constrained symmetric, diagonal not allowed // 5 Start with J_ij (i<j) and h_i IID {-1,1} and transform back to Q (ignoring constant term) int d,i,j,p,q,r; for(p=0;p<NBV;p++)for(i=0;i<4;i++)for(d=0;d<7;d++)Q[p][i][d]=0; for(p=0;p<NBV;p++)for(i=0;i<4;i++)for(d=0;d<7;d++){ q=adj[p][i][d][0];j=adj[p][i][d][1]; if(!(q>=0&&okv[p][i]&&okv[q][j]))continue; switch(weightmode){ case 0:Q[p][i][d]=randsign();break; case 1:if(d<6)Q[p][i][d]=randsign();break; case 2:if((d<4&&deco(p)==0)||d==5)Q[p][i][d]=randsign();break; case 3:if((d<4&&deco(p)==0)||d==5)Q[p][i][d]=2*randsign(); else if(d==6)Q[p][i][d]=randsign(); break; case 4:if((d<4&&deco(p)==0)||d==5)Q[p][i][d]=2*randsign();break; case 5:if((d<4&&deco(p)==0)||d==5){r=randsign();Q[p][i][d]=4*r;Q[p][i][6]-=2*r;Q[q][j][6]-=2*r;} else if(d==6)Q[p][i][d]+=2*randsign(); break; case 6:if((d<4&&deco(p)==0)||d==5){r=randsign()*(10+(seed%32)*(d>=4));Q[p][i][d]=2*r;Q[p][i][6]-=r;Q[q][j][6]-=r;} break; // mode 7 is "noextfield" (J_{ij}=+/-1, h_i=0) in QUBO form case 7:if((d<4&&deco(p)==0)||d==5){r=randsign();Q[p][i][d]=2*r;Q[p][i][6]-=r;Q[q][j][6]-=r;} break; // mode 8 is a test mode case 8:if((d<4&&deco(p)==0)||d==5){int n=100+20*(seed%10)*(d>=4);r=randint(2*n+1)-n;Q[p][i][d]=2*r;Q[p][i][6]-=r;Q[q][j][6]-=r;} break; // mode 9 is candidate for most difficult class of instances at N=8 (tested with strat 13) case 9:if((d<4&&deco(p)==0)||d==5){int n=100+100*(d>=4);r=randint(2*n+1)-n;Q[p][i][d]=2*r;Q[p][i][6]-=r;Q[q][j][6]-=r;} break; // mode 10 is candidate for most difficult class of instances at N=16 (tested with strat 14) case 10:if((d<4&&deco(p)==0)||d==5){int n=100+120*(d>=4);r=randint(2*n+1)-n;Q[p][i][d]=2*r;Q[p][i][6]-=r;Q[q][j][6]-=r;} break; // mode 11 is uniform on {-n,...,-1,1,...,n} (converted to QUBO form) to mimic "range" of http://arxiv.org/abs/1401.2910 case 11:if((d<4&&deco(p)==0)||d==5){int n=7;r=randint(2*n)-n;r+=(r>=0);Q[p][i][d]=2*r;Q[p][i][6]-=r;Q[q][j][6]-=r;} break; case 12:if((d<4&&deco(p)==0)||d==5){int n=7;r=randint(2*n)-n;r+=(r>=0);Q[p][i][d]=r;}// same as mode 11, for Ising form break; } } getbigweights(); QC=0; if(centreflag)QC=centreconst(); } void writeweights(char *f){ int d,i,j,p,q; FILE *fp; fp=fopen(f,"w");assert(fp); fprintf(fp,"%d %d\n",N,N); for(p=0;p<NBV;p++)for(i=0;i<4;i++)for(d=0;d<7;d++){ q=adj[p][i][d][0];j=adj[p][i][d][1]; if(q>=0&&Q[p][i][d]!=0)fprintf(fp,"%d %d %d %d %d %d %d %d %8d\n", decx(p),decy(p),deco(p),i, decx(q),decy(q),deco(q),j, Q[p][i][d]); } fclose(fp); } int readweights(char *f,int centreflag){ int d,i,n,p,w,v0,v1,x0,y0,o0,i0,e0,x1,y1,o1,i1,nx,ny,wn,gtr; char l[1000]; FILE *fp; printf("Reading weight matrix from file \"%s\"\n",f); fp=fopen(f,"r");assert(fp); while(fgets(l,1000,fp))if(l[0]!='#')break; n=sscanf(l,"%d %d %d",&nx,&ny,&gtr);assert(n>=2);if(n==2)gtr=1000000;// gtr=ground truth (not currently used) assert(nx==N&&ny==N); // Ensure weights=0 for edges that go out of bounds for(p=0;p<NBV;p++)for(i=0;i<4;i++){okv[p][i]=0;for(d=0;d<7;d++)Q[p][i][d]=0;} while(fgets(l,1000,fp)){ if(l[0]=='#')continue; assert(sscanf(l,"%d %d %d %d %d %d %d %d %d", &x0,&y0,&o0,&i0, &x1,&y1,&o1,&i1, &w)==9); if(x1==x0&&y1==y0){ if(o0==o1)e0=6; else e0=i1; }else{ if(abs(x1-x0)==1&&y1==y0&&o0==0&&o1==0){e0=4+(x1-x0+1)/2;}else if(x1==x0&&abs(y1-y0)==1&&o0==1&&o1==1){e0=4+(y1-y0+1)/2;}else {fprintf(stderr,"Unexpected edge in line: %s",l);assert(0);} } v0=enc(x0,y0,o0); v1=enc(x1,y1,o1); Q[v0][i0][e0]=w; if(w)okv[v0][i0]=okv[v1][i1]=1; } fclose(fp); for(p=0,wn=0;p<NBV;p++)for(i=0;i<4;i++)wn+=okv[p][i]; getbigweights(); QC=0; if(centreflag)QC=centreconst(); return wn; } void prstate(FILE*fp,int style,int*X0){ // style = 0: hex grid xored with X0 (if supplied) // style = 1: hex grid xored with X0 (if supplied), "gauge-fixed" (suitable if checksym()==1) // style = 2: list of vertices int nb[16]; int i,j,o,p,t,x,xor; x=xor=0; if(style==1){ xor=-1; if(statemap[0]==-1){ for(i=1,nb[0]=0;i<16;i++)nb[i]=nb[i>>1]+(i&1); for(i=0,t=0;i<N;i++)for(j=0;j<N;j++)for(o=0;o<2;o++)t+=nb[XB(i,j,o)^(X0?X0[enc(i,j,o)]:0)]; x=(t>=NV/2?15:0); } } if(style<2){ for(j=N-1;j>=0;j--){ for(i=0;i<N;i++){fprintf(fp," ");for(o=0;o<2;o++)fprintf(fp,"%X",XB(i,j,o)^((X0?X0[enc(i,j,o)]:0)&xor)^x);} fprintf(fp,"\n"); } }else{ for(p=0;p<NBV;p++)for(i=0;i<4;i++)fprintf(fp,"%d %d %d %d %4d\n",decx(p),decy(p),deco(p),i,statemap[(XBa[p]>>i)&1]); } } void writestate(char *f){ FILE *fp; fp=fopen(f,"w");assert(fp); prstate(fp,2,0); fclose(fp); } void readstate(char *f){ int s,x,y,o,i; char l[1000]; FILE *fp; printf("Reading state from file \"%s\"\n",f); fp=fopen(f,"r");assert(fp); memset(XBa,0,NBV*sizeof(int)); while(fgets(l,1000,fp)){ assert(sscanf(l,"%d %d %d %d %d",&x,&y,&o,&i,&s)==5); assert(s==statemap[0]||s==statemap[1]); XB(x,y,o)|=(s==statemap[1])<<i; } fclose(fp); } void shuf(int*a,int n){ int i,j,t; for(i=0;i<n-1;i++){ j=i+randint(n-i);t=a[i];a[i]=a[j];a[j]=t; } } void inittiebreaks(){ int i,j; for(i=0;i<NTB;i++){ for(j=0;j<256;j++)ps[i][j]=j; shuf(ps[i],256); } } int stripexhaust(int d,int c0,int c1,int upd){ // If d=0 exhaust columns c0..(c1-1), if d=1 exhaust rows c0..(c1-1) // Comments and variable names are as if in the column case (d=0) // upd=1 <-> the optimum is written back into the global state // Returns value of strip only int c,r,s,t,v,x,bc,sh,wid,smin,vmin; int64 b,M,bl,br; short*vv,v1[16];// Map from boundary state to value of interior wid=c1-c0; M=1LL<<4*wid; int ps[wid][16],vc[wid][16],h1[16]; UC (*hc)[wid][M]=0,// Comb history: hc[r][x][b] = opt value of (c0+x,r,0) given (c0+y,r,I(y<=x))=b (y=0,...,wid-1) (*hs)[wid][M]=0; // Strut history: hs[r][x][b] = opt value of (c0+x,r,1) given (c0+y,r+I(y<=x),1)=b (y=0,...,wid-1) vv=(short*)malloc(M*sizeof(short)); if(upd){ hc=(UC (*)[wid][M])malloc(N*wid*M); hs=(UC (*)[wid][M])malloc(N*wid*M); } if(!(vv&&(!upd||(hc&&hs)))){fprintf(stderr,"Couldn't allocate %gGiB in stripexhaust()\n", M*(sizeof(short)+(!!upd)*2.*N*wid)/(1<<30));return 1;} // Break ties randomly. Sufficient to choose fixed tiebreaker outside r,b loops for(x=0;x<wid;x++){for(s=0;s<16;s++)ps[x][s]=s;if(upd)shuf(ps[x],16);} memset(vv,0,M*sizeof(short)); // Encoding of boundary into b is that the (c0+y,?,?) term corresponds to nibble y for(r=0;r<N;r++){ // Comb exhaust // At this point: vv maps (*,r,1) to value of (*,r,1), (*,<r,*) // // *b0 *b1 *b2 // / / / // / / / // *---------*---------* // s0 s1 s2 // // b2{ // s1{ vc1[s1]=min_{s2} Q(s3ext,s2)+Q(s2,b2)+Q(s2,s1) } // b1{ // s0{ vc0[s0]=min_{s1} vc1[s1]+Q(s1,b1)+Q(s1,s0) } // b0{ // vv[b2][b1][b0]+=min_{s0} vc0[s0]+Q(s0,b0)+Q(s0,s-1ext) // } // } // } for(s=0;s<16;s++)vc[wid-1][s]=QBI(d,c1-1,r,0,2,s,XBI(d,c1,r,0)); x=wid-1;b=0; while(x<wid){ // At this point b has (at least) x*4 zeros at the end of its binary expansion if(x==0){ t=XBI(d,c0-1,r,0); vmin=1000000000;smin=-1; for(s=0;s<16;s++){// s=s_x v=(vc[0][s]+QBI(d,c0,r,0,0,s,b&15)+QBI(d,c0,r,0,1,s,t))<<4|ps[x][s]; if(v<vmin){vmin=v;smin=s;} } vv[b]+=vmin>>4; if(upd)hc[r][x][b]=smin; b++; while((b>>x*4&15)==0)x++; }else{ for(t=0;t<16;t++){// t=s_{x-1} vmin=1000000000;smin=-1; for(s=0;s<16;s++){// s=s_x v=(vc[x][s]+QBI(d,c0+x,r,0,0,s,b>>x*4&15)+QBI(d,c0+x,r,0,1,s,t))<<4|ps[x][s]; if(v<vmin){vmin=v;smin=s;} } vc[x-1][t]=vmin>>4; if(upd)hc[r][x][b+t]=smin; } x--; } } // At this point vv maps (*,r,1) to value of (*,<=r,*) // Strut exhaust // // * *b1 *b2 *b3 // | | | | // | ^ | | // | | | | // | | | | // *b0 *s1 * * // // (c=1 picture) for(x=wid-1;x>=0;x--){ c=c0+x; // At this point vv maps (<=c,r,1), (>c,r+1,1) to value below these vertices sh=x*4; for(br=0;br<M;br+=1LL<<(sh+4)){// br = state of (>c,r+1,1) if(r==N-1&&br>0)continue; for(bl=0;bl<1LL<<sh;bl++){// bl = state of (<c,r,1) b=bl+br; for(bc=0;bc<16;bc++){ //if(r==N-1&&bc>0)continue;// This optimisation appears to slow it down vmin=1000000000;smin=-1; for(s=0;s<16;s++){// s = state of (c,r,1) v=((vv[b+((int64)s<<sh)]+QBI(d,c,r,1,2,s,bc))<<4)|ps[x][s]; if(v<vmin){vmin=v;smin=s;} } v1[bc]=vmin>>4; h1[bc]=smin; } for(bc=0;bc<16;bc++){ int64 b1=b+((int64)bc<<sh); vv[b1]=v1[bc]; if(upd)hs[r][x][b1]=h1[bc]; } } } } // Now vv maps (*,r+1,1) to value of (*,r+1,1),(*,<=r,*) }//r if(upd){ b=0; for(r=N-1;r>=0;r--){ // Now b = opt value of (*,r+1,1) for(x=0;x<wid;x++){ sh=x*4; s=hs[r][x][b];XBI(d,c0+x,r,1)=s; b=(b&~(15LL<<sh))|(int64)s<<sh; } // Now b = opt value of (*,r,1) s=0; for(x=0;x<wid;x++){ sh=x*4; s=hc[r][x][(b&(-(1LL<<sh)))+s]; XBI(d,c0+x,r,0)=s; } } free(hs);free(hc); } v=vv[0]; free(vv); return v;//+stripval(d,0,c0)+stripval(d,c1,N); } int lineexhaust(int d,int c,int upd){return stripexhaust(d,c,c+1,upd);} int k44exhaust(int x,int y){ // Exhausts big vertex (x,y) // Writes optimum value back into the global state int t,v,s0,s1,v0,vmin,tv[16]; t=randint(NTB);// Random tiebreaker for(s1=0;s1<16;s1++)tv[s1]=QB(x,y,1,1,s1,XB(x,y-1,1))+QB(x,y,1,2,s1,XB(x,y+1,1)); vmin=1000000000; for(s0=0;s0<16;s0++){ v0=QB(x,y,0,1,s0,XB(x-1,y,0))+QB(x,y,0,2,s0,XB(x+1,y,0)); for(s1=0;s1<16;s1++){ v=((QB(x,y,0,0,s0,s1)+v0+tv[s1])<<8)|ps[t][s0+(s1<<4)]; if(v<vmin){vmin=v;XB(x,y,0)=s0;XB(x,y,1)=s1;} } } return vmin>>8; } void init_state(void){// Initialise state randomly int x,y,o; for(x=0;x<N;x++)for(y=0;y<N;y++)for(o=0;o<2;o++)XB(x,y,o)=randnib(); } int checkbisym(void){// Checks if bipartite-symmetric (true if couplings are equivalent to no external fields) // I.e., energy(state)+energy(state with bipartite half of spins flipped) = constant int i,v,x,y,qc; qc=centreconst(); for(i=0;i<100;i++){ init_state();v=val(); for(x=0;x<N;x++)for(y=0;y<N;y++)XB(x,y,(x+y)&1)^=15; v+=val(); if(v!=qc)return 0; } return 1; } int checksym(void){// Checks if symmetric (true if couplings equivalent to no external fields) // I.e., energy(state)=energy(state with all spins flipped) int i,o,v,x,y; for(i=0;i<100;i++){ init_state();v=val(); for(x=0;x<N;x++)for(y=0;y<N;y++)for(o=0;o<2;o++)XB(x,y,o)^=15; if(v!=val())return 0; } return 1; } int gcd(x,y){ if(x<0)x=-x; if(y<0)y=-y; if(y==0)return x; return gcd(y,x%y); } int energyquantum(void){// Works out min gap between energies int g,i,v,v0; v0=val();g=0; for(i=0;i<100;i++){init_state();v=val();g=gcd(g,v-v0);} return g; } void pertstate(double p){ int o,x,y; for(x=0;x<N;x++)for(y=0;y<N;y++)if(randfloat()<p)for(o=0;o<2;o++)XB(x,y,o)=randnib(); } int tree1exhaust(int d,int p,int r0,int upd){ // If d=0 exhaust the (induced) tree consisting of all columns of parity p, // the o=1 (vertically connected) verts of the other columns, and row r0. // If d=1 then same with rows <-> columns. // Comments and variable names are as if in the column case (d=0) // upd=1 <-> the optimum is written back into the global state // Returns value of tree, which is global value because tree contains or is adjacent to every edge int b,c,f,r,s,v,dir,smin,vmin,ps[16],v0[16],v1[16],v2[16],v3[16],v4[16],hc[N][N][16],hs[N][N][16],hr[N][16]; // v0[s] = value of current column fragment given that (c,r,1) = s // v2[s] = value of current column (apart from (c,r0,0)) given that (c,r0,1) = s // v3[s] = value of tree to left of column c given that (c,r0,0) = s for(s=0;s<16;s++)v3[s]=0; for(c=0;c<N;c++){ for(s=0;s<16;s++)ps[s]=s; if(upd)shuf(ps,16); for(s=0;s<16;s++)v2[s]=0; for(dir=0;dir<2;dir++){// dir=0 <-> increasing r, dir=1 <-> decreasing r for(s=0;s<16;s++)v0[s]=0; for(r=dir*(N-1);r!=r0;r+=1-2*dir){ // Here v0[b] = value of (c,previous,*) given that (c,r,1)=b if((c-p)&1){ for(b=0;b<16;b++){// b = state of (c,r,1) v1[b]=v0[b]+QBI(d,c,r,0,0,XBI(d,c,r,0),b); } } else { for(b=0;b<16;b++){// b = state of (c,r,1) vmin=1000000000;smin=0; for(s=0;s<16;s++){// s = state of (c,r,0) v=(QBI(d,c,r,0,0,s,b)+ QBI(d,c,r,0,1,s,XBI(d,c-1,r,0))+ QBI(d,c,r,0,2,s,XBI(d,c+1,r,0)))<<4|ps[s]; if(v<vmin){vmin=v;smin=s;} } v1[b]=v0[b]+(vmin>>4); hc[c][r][b]=smin; } } for(b=0;b<16;b++){// b = state of (c,r+1-2*dir,1) vmin=1000000000;smin=0; for(s=0;s<16;s++){// s = state of (c,r,1) v=(v1[s]+QBI(d,c,r,1,2-dir,s,b))<<4|ps[s]; if(v<vmin){vmin=v;smin=s;} } v0[b]=vmin>>4; hs[c][r][b]=smin; } }//r for(s=0;s<16;s++)v2[s]+=v0[s]; }//dir for(b=0;b<16;b++){// b = state of (c,r0,0) vmin=1000000000;smin=0; for(s=0;s<16;s++){// s = state of (c,r0,1) v=(v2[s]+QBI(d,c,r0,1,0,s,b))<<4|ps[s]; if(v<vmin){vmin=v;smin=s;} } v4[b]=v3[b]+(vmin>>4); hc[c][r0][b]=smin; } for(b=0;b<16;b++){// b = state of (c+1,r0,0) vmin=1000000000;smin=0; for(s=0;s<16;s++){// s = state of (c,r0,0) v=(v4[s]+QBI(d,c,r0,0,2,s,b))<<4|ps[s]; if(v<vmin){vmin=v;smin=s;} } v3[b]=vmin>>4; hr[c][b]=smin; } }//c if(upd){ for(c=N-1;c>=0;c--){ f=!((c-p)&1); XBI(d,c,r0,0)=hr[c][c==N-1?0:XBI(d,c+1,r0,0)]; XBI(d,c,r0,1)=hc[c][r0][XBI(d,c,r0,0)]; for(r=r0+1;r<N;r++){ XBI(d,c,r,1)=hs[c][r][XBI(d,c,r-1,1)]; if(f)XBI(d,c,r,0)=hc[c][r][XBI(d,c,r,1)]; } for(r=r0-1;r>=0;r--){ XBI(d,c,r,1)=hs[c][r][XBI(d,c,r+1,1)]; if(f)XBI(d,c,r,0)=hc[c][r][XBI(d,c,r,1)]; } } } return v3[0]-((QC+1)>>1); } double tree1gibbs_slow(int d,int p,int r0,double beta){ // If d=0 sample the (induced) tree consisting of all verts of columns of parity p, // the o=1 (vertically connected) verts of the other columns, and row r0. // If d=1 then same with rows <-> columns. // Comments and variable names are as if in the column case (d=0) // Updates tree to new sample and returns log(Z) of tree int b,c,f,r,s,dir,hc[N][N][16],hs[N][N][16],hr[N][16]; double w,Z,max,lp[16],pr[16],W0[16],W1[16],W2[16],W3[16],W4[16]; // W0[s] = log(Z) of current column fragment given that (c,r,1) = s // W2[s] = log(Z) of current column (apart from (c,r0,0)) given that (c,r0,1) = s // W3[s] = log(Z) of tree to left of column c given that (c,r0,0) = s for(s=0;s<16;s++)W3[s]=0; for(c=0;c<N;c++){ for(s=0;s<16;s++)W2[s]=0; for(dir=0;dir<2;dir++){// dir=0 <-> increasing r, dir=1 <-> decreasing r for(s=0;s<16;s++)W0[s]=0; for(r=dir*(N-1);r!=r0;r+=1-2*dir){ // Here W0[b] = log(Z) of (c,previous,*) given that (c,r,1)=b if((c-p)&1){ for(b=0;b<16;b++){// b = state of (c,r,1) W1[b]=W0[b]-beta*QBI(d,c,r,0,0,XBI(d,c,r,0),b); } } else { for(b=0;b<16;b++){// b = state of (c,r,1) for(s=0,max=-1e9;s<16;s++){// s = state of (c,r,0) lp[s]=-beta*(QBI(d,c,r,0,0,s,b)+ QBI(d,c,r,0,1,s,XBI(d,c-1,r,0))+ QBI(d,c,r,0,2,s,XBI(d,c+1,r,0))); if(lp[s]>max)max=lp[s]; } for(s=0,Z=0;s<16;s++){pr[s]=exp(lp[s]-max);Z+=pr[s];} for(w=randfloat()*Z,s=0;s<16;s++){w-=pr[s];if(w<=0)break;} assert(s<16); W1[b]=W0[b]+max+log(Z); hc[c][r][b]=s; } } for(b=0;b<16;b++){// b = state of (c,r+1-2*dir,1) for(s=0,max=-1e9;s<16;s++){// s = state of (c,r,1) lp[s]=W1[s]-beta*QBI(d,c,r,1,2-dir,s,b); if(lp[s]>max)max=lp[s]; } for(s=0,Z=0;s<16;s++){pr[s]=exp(lp[s]-max);Z+=pr[s];} for(w=randfloat()*Z,s=0;s<16;s++){w-=pr[s];if(w<=0)break;} assert(s<16); W0[b]=max+log(Z); hs[c][r][b]=s; } }//r for(s=0;s<16;s++)W2[s]+=W0[s]; }//dir for(b=0;b<16;b++){// b = state of (c,r0,0) for(s=0,max=-1e9;s<16;s++){// s = state of (c,r0,1) lp[s]=W2[s]-beta*QBI(d,c,r0,1,0,s,b); if(lp[s]>max)max=lp[s]; } for(s=0,Z=0;s<16;s++){pr[s]=exp(lp[s]-max);Z+=pr[s];} for(w=randfloat()*Z,s=0;s<16;s++){w-=pr[s];if(w<=0)break;} assert(s<16); W4[b]=W3[b]+max+log(Z); hc[c][r0][b]=s; } for(b=0;b<16;b++){// b = state of (c+1,r0,0) for(s=0,max=-1e9;s<16;s++){// s = state of (c,r0,0) lp[s]=W4[s]-beta*QBI(d,c,r0,0,2,s,b); if(lp[s]>max)max=lp[s]; } for(s=0,Z=0;s<16;s++){pr[s]=exp(lp[s]-max);Z+=pr[s];} for(w=randfloat()*Z,s=0;s<16;s++){w-=pr[s];if(w<=0)break;} assert(s<16); W3[b]=max+log(Z); hr[c][b]=s; } }//c for(c=N-1;c>=0;c--){ f=!((c-p)&1); XBI(d,c,r0,0)=hr[c][c==N-1?0:XBI(d,c+1,r0,0)]; XBI(d,c,r0,1)=hc[c][r0][XBI(d,c,r0,0)]; for(r=r0+1;r<N;r++){ XBI(d,c,r,1)=hs[c][r][XBI(d,c,r-1,1)]; if(f)XBI(d,c,r,0)=hc[c][r][XBI(d,c,r,1)]; } for(r=r0-1;r>=0;r--){ XBI(d,c,r,1)=hs[c][r][XBI(d,c,r+1,1)]; if(f)XBI(d,c,r,0)=hc[c][r][XBI(d,c,r,1)]; } } return W3[0]; } int checkbound(long double ZZ[16],long double max){ int b; for(b=0;b<16;b++)if(MAX(ZZ[b],1/ZZ[b])>max*(1+1e-10))return 0; return 1; } double tree1gibbs(int d,int ph,int r0,gibbstables*gt){ // If d=0 sample the (induced) tree consisting of all verts of columns of parity ph, // the o=1 (vertically connected) verts of the other columns, and row r0. // If d=1 then same with rows <-> columns. // Comments and variable names are as if in the column case (d=0) // Updates tree to new sample and returns Z of tree // etab[r]=expl(-beta*r) (r can be negative) int b,c,f,r,s,dir,hc[N][N][16],hs[N][N][16],hr[N][16],id[16]; const int check=0; long double z,Z,max,ff,m0,m1,pr[16],Z0[16],Z0a[16],Z1[16],Z2[16],Z3a[16],Z3[16],Z4[16]; long double *etab=gt->etab; unsigned int *ftab=gt->ftab; unsigned char (*septab0)[16][4]=gt->septab0; unsigned int (*septab1)[16][16]=gt->septab1; signed char (*septab1a)[16][16]=gt->septab1a; long double (*septab2)[16][16]=gt->septab2; signed char (*septab2a)[16][16][2]=gt->septab2a; long double (*septab3)[4][2][2]=gt->septab3; signed char (*septab3a)[4][2][2]=gt->septab3a; // Z0[s] = const*(Z of current column fragment given that (c,r,1) = s) // Z1[s] = const*(Z of current column fragment, including (c,r,0), given that (c,r,1) = s) // Z2[s] = const*(Z of current column (apart from (c,r0,0)) given that (c,r0,1) = s) // Z3[s] = const*(Z of tree at columns <c given that (c,r0,0) = s) // Z4[s] = const*(Z of tree at columns <=c given that (c,r0,0) = s) // If |Z|<=m is abuse of notation for m^{-1}<=Z<=m, then after centring // |Z0|<=m1 // |Z1|<=m0.m1 // |Z2|<=m1^2 // |Z3|<=m1 // |Z4|<=m0.m1 TICK(0); for(b=0;b<16;b++)id[b]=b; for(s=0;s<16;s++)Z3[s]=1; m0=gt->m0;m1=gt->m1; for(c=0;c<N;c++){ for(s=0;s<16;s++)Z2[s]=1; TICK(1); for(dir=0;dir<2;dir++){// dir=0 <-> increasing r, dir=1 <-> decreasing r for(s=0;s<16;s++)Z0[s]=1; for(r=dir*(N-1);r!=r0;r+=1-2*dir){ // Here Z0[b] = const*(Z of (c,previous,*) given that (c,r,1)=b) // (c,r,0) -> (c,r,1) if((c-ph)&1){ TICK(2); for(b=0,max=0;b<16;b++){// b = state of (c,r,1) Z1[b]=Z0[b]*etab[QBI(d,c,r,0,0,XBI(d,c,r,0),b)]; if(Z1[b]>max)max=Z1[b]; } if(check)assert(checkbound(Z1,16*gt->Q0*m1)); ff=m0*m1/max;for(b=0;b<16;b++)Z1[b]*=ff; if(check)assert(checkbound(Z1,m0*m1)); TOCK(2); } else { TICK(3); if(randptr>randlength-64)randptr=randint(randlength-63); for(b=0,max=0;b<16;b++){// b = state of (c,r,1) int i; unsigned char *p0; unsigned int *p1; signed char *p1a; p0=septab0[XBI(d,c-1,r,0)][XBI(d,c+1,r,0)]; if(septab1a_compact&&septab2a_compact){ signed char (*p2a)[2]; p1a=septab1a[encI(d,c,r,0)][b]; p2a=septab2a[encI(d,c,r,0)][b]; for(i=0,s=0,Z=1;i<4;i++){Z*=etab[p2a[p0[i]][0]]+etab[p2a[p0[i]][1]];if(randtab[randptr++]>=ftab[p1a[p0[i]]])s|=1<<i;} }else{ long double *p2; p1=septab1[encI(d,c,r,0)][b]; p2=septab2[encI(d,c,r,0)][b]; for(i=0,s=0,Z=1;i<4;i++){Z*=p2[p0[i]];if(randtab[randptr++]>=p1[p0[i]])s|=1<<i;} } assert(s<16); hc[c][r][b]=s; if(Z>max)max=Z; Z0a[b]=Z; } if(check)assert(checkbound(Z0a,16*gt->Q2)); ff=m0/max;for(b=0;b<16;b++)Z0a[b]*=ff; if(check)assert(checkbound(Z0a,m0)); for(b=0;b<16;b++)Z1[b]=Z0[b]*Z0a[b];// b = state of (c,r,1) if(check)assert(checkbound(Z1,m0*m1)); TOCK(3); } TICK(4); // (c,r,1) -> (c,r+1-2*dir,1) if(randptr>randlength-64)randptr=randint(randlength-63); { long double Zx[16],ZZ0,ZZ1; int q,lh0[16],lh1[16]; q=encI(d,c,r-dir,1); #define T1Gstrut(i,Zf,Zt,lf,lt) \ for(b=0;b<16;b++){ \ ZZ0=Zf[b&~(1<<i)]*septab3[q][i][b>>i&1][0]; \ ZZ1=Zf[b|(1<<i)]*septab3[q][i][b>>i&1][1]; \ Zt[b]=ZZ0+ZZ1; \ lt[b]=lf[RANDFLOAT*(ZZ0+ZZ1)<ZZ0?b&~(1<<i):b|(1<<i)]; \ } #define T1Gstruta(i,Zf,Zt,lf,lt) \ for(b=0;b<16;b++){ \ ZZ0=Zf[b&~(1<<i)]*etab[septab3a[q][i][b>>i&1][0]]; \ ZZ1=Zf[b|(1<<i)]*etab[septab3a[q][i][b>>i&1][1]]; \ Zt[b]=ZZ0+ZZ1; \ lt[b]=lf[RANDFLOAT*(ZZ0+ZZ1)<ZZ0?b&~(1<<i):b|(1<<i)]; \ } if(septab3a_compact){ T1Gstruta(0,Z1,Zx,id,lh1); T1Gstruta(1,Zx,Z1,lh1,lh0); T1Gstruta(2,Z1,Zx,lh0,lh1); T1Gstruta(3,Zx,Z0,lh1,hs[c][r]); }else{ T1Gstrut(0,Z1,Zx,id,lh1); T1Gstrut(1,Zx,Z1,lh1,lh0); T1Gstrut(2,Z1,Zx,lh0,lh1); T1Gstrut(3,Zx,Z0,lh1,hs[c][r]); } for(b=0;b<16;b++)if(Z0[b]>max)max=Z0[b]; } if(check)assert(checkbound(Z0,16*gt->Q1*m0*m1)); ff=m1/max;for(b=0;b<16;b++)Z0[b]*=ff; if(check)assert(checkbound(Z0,m1)); TOCK(4); }//r for(s=0;s<16;s++)Z2[s]*=Z0[s]; }//dir TOCK(1); if(check)assert(checkbound(Z2,m1*m1)); TICK(5); // (c,r0,1) -> (c,r0,0) if(randptr>randlength-16)randptr=randint(randlength-15); for(b=0,max=0;b<16;b++){// b = state of (c,r0,0) for(s=0,Z=0;s<16;s++){// s = state of (c,r0,1) pr[s]=Z2[s]*etab[QBI(d,c,r0,1,0,s,b)]; Z+=pr[s]; } for(z=RANDFLOAT*Z,s=0;s<16;s++){z-=pr[s];if(z<=0)break;} assert(s<16); hc[c][r0][b]=s; if(Z>max)max=Z; Z3a[b]=Z; } if(check)assert(checkbound(Z3a,16*gt->Q0*m1*m1)); ff=m0/max;for(b=0;b<16;b++)Z3a[b]*=ff; if(check)assert(checkbound(Z3a,m0)); for(b=0;b<16;b++)Z4[b]=Z3[b]*Z3a[b];// b = state of (c,r0,0) if(check)assert(checkbound(Z4,m0*m1)); TOCK(5); TICK(6); if(randptr>randlength-64)randptr=randint(randlength-63); // (c,r0,0) -> (c+1,r0,0) { long double Zx[16],ZZ0,ZZ1; int q,lh0[16],lh1[16]; q=encI(d,c,r0,0); T1Gstrut(0,Z4,Zx,id,lh1); T1Gstrut(1,Zx,Z4,lh1,lh0); T1Gstrut(2,Z4,Zx,lh0,lh1); T1Gstrut(3,Zx,Z3,lh1,hr[c]); for(b=0;b<16;b++)if(Z3[b]>max)max=Z3[b]; } TOCK(6); if(check)assert(checkbound(Z3,16*gt->Q1*m0*m1)); ff=m1/max;for(b=0;b<16;b++)Z3[b]*=ff; if(check)assert(checkbound(Z3,m1)); }//c for(c=N-1;c>=0;c--){ f=!((c-ph)&1); XBI(d,c,r0,0)=hr[c][c==N-1?0:XBI(d,c+1,r0,0)]; XBI(d,c,r0,1)=hc[c][r0][XBI(d,c,r0,0)]; for(r=r0+1;r<N;r++){ XBI(d,c,r,1)=hs[c][r][XBI(d,c,r-1,1)]; if(f)XBI(d,c,r,0)=hc[c][r][XBI(d,c,r,1)]; } for(r=r0-1;r>=0;r--){ XBI(d,c,r,1)=hs[c][r][XBI(d,c,r+1,1)]; if(f)XBI(d,c,r,0)=hc[c][r][XBI(d,c,r,1)]; } } TOCK(0); return Z3[0]; } double tree1gibbs_sqa(int d,int ph,int r0,gibbstables*gt,double J0,double J1,int*X0,int*X1){ // As tree1gibbs, but applies external attractive couplings J0 to grid X0 and J1 to grid X1 // J0, J1 have already been exponentiated // If d=0 sample the (induced) tree consisting of all verts of columns of parity ph, // the o=1 (vertically connected) verts of the other columns, and row r0. // If d=1 then same with rows <-> columns. // Comments and variable names are as if in the column case (d=0) // Updates tree to new sample and returns Z of tree // etab[r]=expl(-beta*r) (r can be negative) int b,c,f,r,s,x0,x1,dir,hc[N][N][16],hs[N][N][16],hr[N][16],id[16]; const int check=0; long double z,Z,max,ff,m0,m1,pr[16],Z0[16],Z0a[16],Z1[16],Z2[16],Z3a[16],Z3[16],Z4[16],J0pow[16],J1pow[16]; long double *etab=gt->etab; unsigned char (*septab0)[16][4]=gt->septab0; unsigned int (*septab1)[16][16]=gt->septab1; long double (*septab2)[16][16]=gt->septab2; signed char (*septab2a)[16][16][2]=gt->septab2a; long double (*septab3)[4][2][2]=gt->septab3; signed char (*septab3a)[4][2][2]=gt->septab3a; // Z0[s] = const*(Z of current column fragment given that (c,r,1) = s) // Z1[s] = const*(Z of current column fragment, including (c,r,0), given that (c,r,1) = s) // Z2[s] = const*(Z of current column (apart from (c,r0,0)) given that (c,r0,1) = s) // Z3[s] = const*(Z of tree at columns <c given that (c,r0,0) = s) // Z4[s] = const*(Z of tree at columns <=c given that (c,r0,0) = s) // Using |Z|<=m as abuse of notation for m^{-1}<=Z<=m, then after centring // |Z0|<=m1 // |Z1|<=m0.m1 // |Z2|<=m1^2 // |Z3|<=m1 // |Z4|<=m0.m1 TICK(0); J0pow[0]=J0*J0*J0*J0; J1pow[0]=J1*J1*J1*J1; for(b=1;b<16;b++){J0pow[b]=J0pow[b>>1];J1pow[b]=J1pow[b>>1];if(b&1){J0pow[b]/=J0*J0;J1pow[b]/=J1*J1;}} // J0pow[b] = J0^(#0bits-#1bits), so J0pow[s0^s1] = Prod_{i<4} J0^(+/- spin i of s0 * +/- spin i of s1) for(b=0;b<16;b++)id[b]=b; for(s=0;s<16;s++)Z3[s]=1; m0=gt->m0;m1=gt->m1; for(c=0;c<N;c++){ for(s=0;s<16;s++)Z2[s]=1; TICK(1); for(dir=0;dir<2;dir++){// dir=0 <-> increasing r, dir=1 <-> decreasing r (comments in this loop refer to dir=0) for(s=0;s<16;s++)Z0[s]=1; for(r=dir*(N-1);r!=r0;r+=1-2*dir){ // Here Z0[b] = const*(Z of (c,<r,*)_all given that (c,r,1)=b) // (c,r,0) -> (c,r,1) if((c-ph)&1){// thin strand case TICK(2); x0=X0[encI(d,c,r,1)];x1=X1[encI(d,c,r,1)]; for(b=0,max=0;b<16;b++){// b = state of (c,r,1) // Doing edge (c,r,0)---(c,r,1) and (c,r,1)---externals Z1[b]=Z0[b]*etab[QBI(d,c,r,0,0,XBI(d,c,r,0),b)]*J0pow[b^x0]*J1pow[b^x1]; if(Z1[b]>max)max=Z1[b]; } // Now Z1[b] = const*(Z of (c,<r,*)_all+(c,r,0)_int+(c,r,1)_ext given that (c,r,1)=b) if(check)assert(checkbound(Z1,16*gt->Q0*m1)); ff=m0*m1/max;for(b=0;b<16;b++)Z1[b]*=ff; if(check)assert(checkbound(Z1,m0*m1)); TOCK(2); } else { TICK(3); if(randptr>randlength-64)randptr=randint(randlength-63); int i; long double ext[4]; x0=X0[encI(d,c,r,0)];x1=X1[encI(d,c,r,0)]; for(i=0;i<4;i++)ext[i]=(((x0>>i)&1)?1/J0:J0)*(((x1>>i)&1)?1/J1:J1);// J0^((-1)^b0)*J1^((-1)^b1) x0=X0[encI(d,c,r,1)];x1=X1[encI(d,c,r,1)]; for(b=0,max=0;b<16;b++){// b = state of (c,r,1) unsigned char *p0; unsigned int *p1; p0=septab0[XBI(d,c-1,r,0)][XBI(d,c+1,r,0)]; Z=J0pow[b^x0]*J1pow[b^x1]; if(septab2a_compact){ signed char (*p2a)[2]; p2a=septab2a[encI(d,c,r,0)][b]; for(i=0,s=0;i<4;i++){// Considering (c,r,0,i) long double ZZ0,ZZ1; ZZ0=etab[p2a[p0[i]][0]]*ext[i];// (c,r,0,i)=0 ZZ1=etab[p2a[p0[i]][1]]/ext[i];// (c,r,0,i)=1 Z*=ZZ0+ZZ1; if(RANDFLOAT*(ZZ0+ZZ1)>=ZZ0)s|=1<<i; } }else{ assert(0);// not done non-compact version long double *p2; p1=septab1[encI(d,c,r,0)][b]; p2=septab2[encI(d,c,r,0)][b]; for(i=0,s=0;i<4;i++){Z*=p2[p0[i]];if(randtab[randptr++]>=p1[p0[i]])s|=1<<i;} } assert(s<16); hc[c][r][b]=s; if(Z>max)max=Z; Z0a[b]=Z; // Z0a[b] = Z( (c-1,r,0)_int + (c,r,0)_all + (c+1,r,0)_int + (c,r,1)_ext given that (c,r,1)=b ) } if(check)assert(checkbound(Z0a,16*gt->Q2)); ff=m0/max;for(b=0;b<16;b++)Z0a[b]*=ff; if(check)assert(checkbound(Z0a,m0)); for(b=0;b<16;b++)Z1[b]=Z0[b]*Z0a[b];// b = state of (c,r,1) // Z1[b] = const*(Z of (c,<=r,*)_all given that (c,r,1)=b) if(check)assert(checkbound(Z1,m0*m1)); TOCK(3); } TICK(4); // (c,r,1) -> (c,r+1,1) if(randptr>randlength-64)randptr=randint(randlength-63); { long double Zx[16],ZZ0,ZZ1; int q,lh0[16],lh1[16]; q=encI(d,c,r-dir,1); // #define T1Gstrut(i,Zf,Zt,lf,lt) \ see tree1gibbs // #define T1Gstruta(i,Zf,Zt,lf,lt) / if(septab3a_compact){ T1Gstruta(0,Z1,Zx,id,lh1); T1Gstruta(1,Zx,Z1,lh1,lh0); T1Gstruta(2,Z1,Zx,lh0,lh1); T1Gstruta(3,Zx,Z0,lh1,hs[c][r]); }else{ T1Gstrut(0,Z1,Zx,id,lh1); T1Gstrut(1,Zx,Z1,lh1,lh0); T1Gstrut(2,Z1,Zx,lh0,lh1); T1Gstrut(3,Zx,Z0,lh1,hs[c][r]); } for(b=0;b<16;b++)if(Z0[b]>max)max=Z0[b]; } // Z0[b] = const*(Z of (c,<=r,*)_all given that (c,r+1,1)=b) if(check)assert(checkbound(Z0,16*gt->Q1*m0*m1)); ff=m1/max;for(b=0;b<16;b++)Z0[b]*=ff; if(check)assert(checkbound(Z0,m1)); TOCK(4); }//r for(s=0;s<16;s++)Z2[s]*=Z0[s]; }//dir // Z2[b] = const*(Z of (c,not r0,*)_all given that (c,r0,1)=b) TOCK(1); if(check)assert(checkbound(Z2,m1*m1)); TICK(5); // (c,r0,1) -> (c,r0,0) x0=X0[encI(d,c,r0,1)];x1=X1[encI(d,c,r0,1)]; for(s=0;s<16;s++)Z2[s]*=J0pow[s^x0]*J1pow[s^x1];// s = state of (c,r0,1) // Z2[s] = const*(Z of (c,not r0,*)_all + (c,r0,1)_ext given that (c,r0,1)=b) if(randptr>randlength-16)randptr=randint(randlength-15); x0=X0[encI(d,c,r0,0)];x1=X1[encI(d,c,r0,0)]; for(b=0,max=0;b<16;b++){// b = state of (c,r0,0) for(s=0,Z=0;s<16;s++){// s = state of (c,r0,1) pr[s]=Z2[s]*etab[QBI(d,c,r0,1,0,s,b)]; Z+=pr[s]; } for(z=RANDFLOAT*Z,s=0;s<16;s++){z-=pr[s];if(z<=0)break;} assert(s<16); hc[c][r0][b]=s; Z*=J0pow[b^x0]*J1pow[b^x1]; if(Z>max)max=Z; Z3a[b]=Z; } // Z3a[b] = const*(Z of (c,*,*)_all given that (c,r0,0)=b) if(check)assert(checkbound(Z3a,16*gt->Q0*m1*m1)); ff=m0/max;for(b=0;b<16;b++)Z3a[b]*=ff; if(check)assert(checkbound(Z3a,m0)); for(b=0;b<16;b++)Z4[b]=Z3[b]*Z3a[b];// b = state of (c,r0,0) if(check)assert(checkbound(Z4,m0*m1)); // Z4[b] = const*(Z of (<=c,*,*)_all given that (c,r0,0)=b) TOCK(5); TICK(6); if(randptr>randlength-64)randptr=randint(randlength-63); // (c,r0,0) -> (c+1,r0,0) { long double Zx[16],ZZ0,ZZ1; int q,lh0[16],lh1[16]; q=encI(d,c,r0,0); T1Gstrut(0,Z4,Zx,id,lh1); T1Gstrut(1,Zx,Z4,lh1,lh0); T1Gstrut(2,Z4,Zx,lh0,lh1); T1Gstrut(3,Zx,Z3,lh1,hr[c]); for(b=0;b<16;b++)if(Z3[b]>max)max=Z3[b]; } TOCK(6); if(check)assert(checkbound(Z3,16*gt->Q1*m0*m1)); ff=m1/max;for(b=0;b<16;b++)Z3[b]*=ff; if(check)assert(checkbound(Z3,m1)); // Z3[b] = const*(Z of (<=c,*,*)_all given that (c+1,r0,0)=b) }//c for(c=N-1;c>=0;c--){ f=!((c-ph)&1); XBI(d,c,r0,0)=hr[c][c==N-1?0:XBI(d,c+1,r0,0)]; XBI(d,c,r0,1)=hc[c][r0][XBI(d,c,r0,0)]; for(r=r0+1;r<N;r++){ XBI(d,c,r,1)=hs[c][r][XBI(d,c,r-1,1)]; if(f)XBI(d,c,r,0)=hc[c][r][XBI(d,c,r,1)]; } for(r=r0-1;r>=0;r--){ XBI(d,c,r,1)=hs[c][r][XBI(d,c,r+1,1)]; if(f)XBI(d,c,r,0)=hc[c][r][XBI(d,c,r,1)]; } } TOCK(0); return Z3[0]; } void simplegibbssweep_slow(double beta){ int d,s,x,y; double z,Z,pr[16]; double max,lp[16]; for(d=0;d<2;d++)for(y=0;y<N;y++)for(x=0;x<N;x++){ // Do Gibbs iteration to a single "bigvertex" (4 spins) // If d=0 then v=the bigvertex (x,y,0) // If d=1 then v=the bigvertex (y,x,1) // Replaces bigvertex v (4 spins) with a random value given by the Gibbs // distribution at inverse temperature beta conditioned on the rest of the graph. for(s=0,Z=0,max=-1e9;s<16;s++){ lp[s]=-beta*(QBI(d,x,y,0,0,s,XBI(d,x,y,1))+ QBI(d,x,y,0,1,s,XBI(d,x-1,y,0))+ QBI(d,x,y,0,2,s,XBI(d,x+1,y,0))); if(lp[s]>max)max=lp[s]; } for(s=0,Z=0;s<16;s++){pr[s]=exp(lp[s]-max);Z+=pr[s];} for(z=randfloat()*Z,s=0;s<16;s++){z-=pr[s];if(z<=0)break;} assert(s<16); XBI(d,x,y,0)=s; } } void shuf2(int*a,int n){ int i,j,t; for(i=0;i<n-1;i++){ j=i+randtab[randptr++]%(n-i);t=a[i];a[i]=a[j];a[j]=t; } } #define RANDSTART 0 void simplegibbssweep(gibbstables*gt){ int d,i,s,x,y; unsigned char *p0; unsigned int *p1; signed char *p1a; unsigned char (*septab0)[16][4]=gt->septab0; unsigned int (*septab1)[16][16]=gt->septab1; signed char (*septab1a)[16][16]=gt->septab1a; unsigned int *ftab=gt->ftab; randptr=randint(randlength-2*N*N*4-2*N-2); // Do Gibbs iteration to each "bigvertex" (d,x,y) (4 spins) // If d=0 then v=the bigvertex (x,y,0) // If d=1 then v=the bigvertex (y,x,1) // Replaces bigvertex v (4 spins) with a random value given by the Gibbs // distribution at inverse temperature beta conditioned on the rest of the graph. int d0[2],x0[N],y0[N],d1,x1,y1; switch(RANDSTART){ case 0: for(d=0;d<2;d++)d0[d]=d; for(x=0;x<N;x++)x0[x]=x; for(y=0;y<N;y++)y0[y]=y; break; case 1: d1=randtab[randptr++]&1; x1=randtab[randptr++]%N; y1=randtab[randptr++]%N; for(d=0;d<2;d++)d0[d]=(d1+d)&1; for(x=0;x<N;x++)x0[x]=(x1+x)%N; for(y=0;y<N;y++)y0[y]=(y1+y)%N; break; case 2: for(d=0;d<2;d++)d0[d]=d;shuf2(d0,2); for(x=0;x<N;x++)x0[x]=x;shuf2(x0,N); for(y=0;y<N;y++)y0[y]=y;shuf2(y0,N); break; } if(septab1a_compact){ for(d1=0;d1<2;d1++){ d=d0[d1]; for(y1=0;y1<N;y1++){ y=y0[y1]; for(x1=0;x1<N;x1++){ x=x0[x1]; p0=septab0[XBI(d,x-1,y,0)][XBI(d,x+1,y,0)]; // p0[i] = aabc (bits), aa=i, b=XBI(d,x-1,y,0) bit i, c=XBI(d,x+1,y,0) bit i p1a=septab1a[encI(d,x,y,0)][XBI(d,x,y,1)]; for(i=0,s=0;i<4;i++)if(randtab[randptr++]>=ftab[p1a[p0[i]]])s|=1<<i; XBI(d,x,y,0)=s; } } } }else{ for(d1=0;d1<2;d1++){ d=d0[d1]; for(y1=0;y1<N;y1++){ y=y0[y1]; for(x1=0;x1<N;x1++){ x=x0[x1]; p0=septab0[XBI(d,x-1,y,0)][XBI(d,x+1,y,0)]; // p0[i] = aabc (bits), aa=i, b=XBI(d,x-1,y,0) bit i, c=XBI(d,x+1,y,0) bit i p1=septab1[encI(d,x,y,0)][XBI(d,x,y,1)]; for(i=0,s=0;i<4;i++)if(randtab[randptr++]>=p1[p0[i]])s|=1<<i; XBI(d,x,y,0)=s; } } } } } typedef int treestriptype;// Use int if range of values exceeds 16 bits, or use short to save memory on a very wide exhaust. int treestripexhaust(int d,int w,int ph,int upd,int fixedrow){ // w=width, ph=phase (0,...,w) // If d=0 exhaust the (induced) treewidth w subgraph consisting of: // all columns, c, s.t. c+ph is congruent to 0,1,...,w-1 mod w+1 ("full"), // the o=1 (vertically connected) verts of columns where c+ph is congruent to w mod w+1 ("spike"), // and horizontal joiners of randomly chosen height either side of each c+ph=w mod w+1 column. // If d=1 then same with rows <-> columns. // Comments and variable names are as if in the column case (d=0) // upd=1 <-> the optimum is written back into the global state // E.g., w=2, N=8: // ph=0: FFSFFSFF \ F=full, S=spike (o=1) // ph=1: FSFFSFFS | // ph=2: SFFSFFSF / int b,c,f,i,r,s,x,v,nf,b1,s0,s1,bc,lw,dir,inc,mul,phl,smin,vmin,pre2[16][16],ps[N][16]; int64 bi,br,bm,size0,size1; int jr0,jr1,jv0[16],jv1[16];// join row, value. treestriptype*v0,*v1,*v2,*vold,*vnew; double t0,t1,t2; size0=1LL<<4*w; size1=16*(size0-1)/15; v0=(treestriptype*)malloc(size0*sizeof(treestriptype)); v1=(treestriptype*)malloc(size1*sizeof(treestriptype)); v2=(treestriptype*)malloc(size0*sizeof(treestriptype)); nf=(N-1+ph)/(w+1)-(ph+1)/(w+1)+1;// Number of full (non-spike) exhausts (the end ones could be narrow, but still called full) bm=1LL<<4*(w-1); // Spike labels are f = 0...nf-1 or 0...nf; Full labels are f = 0...nf-1 int jr[N];// jr[c] = join row associated to column c UC hs0[nf+1][N][16],hs1[nf+1][16],(*hf0)[N][w][bm*16],(*hf1)[N][w][bm][16],(*hf2)[w][bm][16],(*hf3)[w][bm][16]; hf0=0;hf1=0;hf2=hf3=0; if(upd){ hf0=(UC(*)[N][w][bm*16])malloc(nf*N*w*bm*16); hf1=(UC(*)[N][w][bm][16])malloc(nf*N*w*bm*16); hf2=(UC(*)[w][bm][16])malloc(nf*w*bm*16); hf3=(UC(*)[w][bm][16])malloc(nf*w*bm*16); } if(!(v0&&v1&&v2&&(upd==0||(hf0&&hf1&&hf2&&hf3)))){ fprintf(stderr,"Couldn't allocate %gGiB in treestripexhaust()\n", (double)((size0*2+size1)*sizeof(treestriptype)+!!upd*(nf*(2*N+2)*w*bm*16))/(1<<30));return 1;} t0=t1=t2=0; for(c=0;c<N;c++){for(s=0;s<16;s++)ps[c][s]=s;if(upd)shuf(ps[c],16);} jr0=randint(N);for(i=0;i<16;i++)jv0[i]=0; if(fixedrow>=0)jr0=fixedrow; for(c=f=0;c<N;){// f=full exhaust no. // jv0[s] = value of stuff to the left given that (c-1,jr0,0)=s phl=(c+ph)%(w+1); jr[c]=jr0; if(phl==w){// Spike t0-=cpu(); jr1=jr0; for(s=0;s<16;s++)v2[s]=0; for(dir=0;dir<2;dir++){ for(s=0;s<16;s++)v0[s]=0; for(r=dir*(N-1);r!=jr0;r+=1-2*dir){ // Here v0[b] = value of (c,previous,*) given that (c,r,1)=b for(b=0;b<16;b++)v1[b]=v0[b]+QBI(d,c,r,0,0,XBI(d,c,r,0),b);// b = state of (c,r,1) for(b=0;b<16;b++){// b = state of (c,r+1-2*dir,1) vmin=1000000000;smin=0; for(s=0;s<16;s++){// s = state of (c,r,1) v=(v1[s]+QBI(d,c,r,1,2-dir,s,b))<<4|ps[c][s]; if(v<vmin){vmin=v;smin=s;} } v0[b]=vmin>>4; if(upd)hs0[f][r][b]=smin; } }//r for(s=0;s<16;s++)v2[s]+=v0[s]; }//dir for(b=0;b<16;b++){// b = state of (c,jr0,0) vmin=1000000000;smin=0; for(s=0;s<16;s++){// s = state of (c,jr0,1) v=(v2[s]+QBI(d,c,jr0,1,0,s,b))<<4|ps[c][s]; if(v<vmin){vmin=v;smin=s;} } v0[b]=jv0[b]+(vmin>>4); if(upd)hs0[f][jr0][b]=smin; } for(b=0;b<16;b++){// b = state of (c+1,jr0,0) vmin=1000000000;smin=0; for(s=0;s<16;s++){// s = state of (c,jr0,0) v=(v0[s]+QBI(d,c,jr0,0,2,s,b))<<4|ps[c][s]; if(v<vmin){vmin=v;smin=s;} } jv1[b]=vmin>>4; if(upd)hs1[f][b]=smin; } c++; t0+=cpu(); }else{// Full assert(f<nf); lw=MIN(w-phl,N-c);assert(lw>=1&&lw<=w);// Local width jr1=randint(N); if(fixedrow>=0)jr1=fixedrow; jr[c+lw-1]=jr1; // Width lw exhaust, incoming jv0[] at row jr0, outgoing jv1[] at row jr1 memset(v2,0,size0*sizeof(treestriptype)); for(dir=0;dir<2;dir++){ memset(v0,0,size0*sizeof(treestriptype)); for(r=dir*(N-1);r!=jr1;r+=1-2*dir){ // Comb exhaust // At this point: v0 maps (*,r,1) to value of (*,r,1), (*,<r,*) // // *b0 *b1 *b2 *b3 // / / / / // / / / / // ------*---------*---------*---------*------ // s0 s1 s2 s3 // // vc3[s3] = Qext(s3,X4) // vc2[s2,b3] = min_{s3} vc3[s3]+Q(s3,b3)+Q(s3,s2) // vc1[s1,b2,b3] = min_{s2} vc2[s2,b3]+Q(s2,b2)+Q(s2,s1) // vc0[s0,b1,b2,b3] = min_{s1} vc1[s1,b2,b3]+Q(s1,b1)+Q(s1,s0) (variable names s0,s1,b1 correspond to this x=1 case) // v0[b0,b1,b2,b3] += min_{s0} vc0[s0,b1,b2,b3]+Q(s0,b0)+{Qext(X_{-1},s0), or jv0[s0] if r=jr0} t1-=cpu(); vold=v1; for(s=0;s<16;s++)vold[s]=QBI(d,c+lw-1,r,0,2,s,XBI(d,c+lw,r,0));// right boundary interaction for(x=lw-1,bm=1;x>=0;bm*=16,x--){ // Loop over br = (b_{x+1},...,b_{lw-1}) { // the irrelevant parameters // Loop over s_{x-1} { // Loop over b_x { // vc[x-1][s_{x-1},b_x,br] = min over s_x of vc[x][s_x,br]+Q(s_x,b_x)+Q(s_x,s_{x-1}) // } // } // } int ql[16];// left boundary interaction if(r==jr0)memcpy(ql,jv0,16*sizeof(int)); else for(s=0;s<16;s++)ql[s]=QBI(d,c,r,0,1,s,XBI(d,c-1,r,0)); vnew=vold+16*bm; mul=(x>0?16:1); for(br=0;br<bm;br++){// br is state of (c+x+1,r,1),...,(c+lw-1,r,1) for(s0=0;s0<mul;s0++){// s0 is state of (c+x-1,r,0) (doesn't exist if x=0) for(b1=0;b1<16;b1++){// b1 is state of (c+x,r,1) vmin=1000000000;smin=0; for(s1=0;s1<16;s1++){// s1 is state of (c+x,r,0) v=(vold[s1+16*br]+QBI(d,c+x,r,0,0,s1,b1)+(x>0?QBI(d,c+x,r,0,1,s1,s0):ql[s1]))<<4|ps[c][s1]; if(v<vmin){vmin=v;smin=s1;} } bi=s0+mul*(b1+16*br); if(x>0)vnew[bi]=vmin>>4; else v0[bi]+=vmin>>4; //printf("comb c=%d f=%d r=%d x=%d br=%lld s0=%d b1=%d bi=%lld smin=%d vmin=%d\n",c,f,r,x,br,s0,b1,bi,smin,vmin>>4); if(upd)hf0[f][r][x][bi]=smin; } } } vold=vnew; }//x assert(vnew-v1<=size1); if(lw==w)assert(vnew-v1==size1); // At this point v0 maps (*,r,1) to value of (*,<=r,*) t1+=cpu(); // Strut exhaust // // *b0 *bc * * // | | | | // | ^ | | // | | | | // * *s *b2 *b3 // // (c=1 dir=0 picture) t2-=cpu(); bm=1LL<<4*(lw-1); for(x=0;x<lw;x++){ if(x&1){vold=v1;vnew=v0;} else {vold=v0;vnew=v1;} // At this point vold maps (>=c+x,r,1), (<c+x,r+1,1) to the value below these vertices // (r+1 corresponds to dir=0; r-1 and mutatis mutandis for dir=1) for(bc=0;bc<16;bc++){// bc = state of (c+x,r+1,1) for(s=0;s<16;s++){// s = state of (c+x,r,1) pre2[bc][s]=QBI(d,c+x,r,1,2-dir,s,bc); } } for(br=0;br<bm;br++){// br = state of non-(c+x) columns in cyclic order x+1,...,lw-1,0,...,x-1 // i.e., cols c+x+1,...,c+lw-1 at row r // then cols c,...,c+x-1 at row r+1 for(bc=0;bc<16;bc++){// bc = state of (c+x,r+1,1) vmin=1000000000;smin=0; for(s=0;s<16;s++){// s = state of (c+x,r,1) v=(vold[s+16*br]+pre2[bc][s])<<4|ps[c][s]; if(v<vmin){vmin=v;smin=s;} } vnew[br+bm*bc]=vmin>>4; if(upd)hf1[f][r][x][br][bc]=smin; } } }//x if(lw&1)memcpy(v0,v1,size0*sizeof(treestriptype)); // Now v0 maps (*,r+1,1) to value of (*,r+1,1),(*,<=r,*) t2+=cpu(); }//r for(br=0;br<size0;br++)v2[br]+=v0[br]; }//dir // Now v2 maps (*,jr1,1) to value of (*,r!=jr1,*) // v2[b0,b1,b2,b3]=val(above and below) // Think of this as v2[s0,b0,b1,b2,b3] but not depending on s0 // // v2 // .------------+-------------. // . . . . // *b0 *b1 *b2 *b3 // / / / / // / / / / // *--------*---------*---------*---------*--------* // ext or s0 s1 s2 s3 jv1[s4] // jv0[s0] for(x=0;x<lw;x++){ // v0[s_x,b_{x+1},..,b_{lw-1}] = min_{b_x} v2[s_x,b_x,...,b_{lw-1}] // + Q(s_x,b_x) + (if x=0) Qext(X_{-1},s_x) or jv0[s_x] if jr1=jr0 // s_x is (c+x,jr1,0) // b_x is (c+x,jr1,1) // bm=1LL<<4*(lw-1-x); for(br=0;br<bm;br++){// br = state of b_{x+1},...,b_{lw-1} for(s=0;s<16;s++){// s = state of s_x vmin=1000000000;smin=0; for(bc=0;bc<16;bc++){// bc = state of b_x if(x==0)v=v2[bc+16*br]; else v=v2[s+16*(bc+16*br)]; v+=QBI(d,c+x,jr1,0,0,s,bc); v=(v<<4)|ps[c][bc]; if(v<vmin){vmin=v;smin=bc;} } vmin>>=4; if(x==0){ if(jr1==jr0)vmin+=jv0[s]; else vmin+=QBI(d,c,jr1,0,1,s,XBI(d,c-1,jr1,0)); } v0[s+16*br]=vmin; if(upd)hf2[f][x][br][s]=smin; }//s }//br // v2[s_{x+1},b_{x+1},...,b_{lw-1}] = min_{s_x} v0[s_x,b_{x+1},...,b_{lw-1}] + Q(s_x,s_{x+1}) for(br=0;br<bm;br++){// br = state of b_{x+1},...,b_{lw-1} for(s1=0;s1<16;s1++){// s = state of s_{x+1} vmin=1000000000;smin=0; for(s0=0;s0<16;s0++){// s = state of s_x v=(v0[s0+16*br]+QBI(d,c+x,jr1,0,2,s0,s1))<<4|ps[c][s0]; if(v<vmin){vmin=v;smin=s0;} } v2[s1+16*br]=vmin>>4; if(upd)hf3[f][x][br][s1]=smin; }//s1 }//br }//x for(s=0;s<16;s++)jv1[s]=v2[s]; c+=lw;f++; } for(s=0;s<16;s++)jv0[s]=jv1[s]; jr0=jr1; }//c assert(f==nf&&c==N); if(upd){ for(c=N;c>0;){ // Incoming info is state of (c,jr[c-1],0) phl=(c+w+ph)%(w+1); jr1=jr[c-1]; if(phl==w){// Came from spike c--; XBI(d,c,jr1,0)=hs1[f][c<N?XBI(d,c+1,jr1,0):0]; XBI(d,c,jr1,1)=hs0[f][jr1][XBI(d,c,jr1,0)]; for(dir=0;dir<2;dir++){ inc=1-2*dir; for(r=jr1-inc;r>=0&&r<N;r-=inc)XBI(d,c,r,1)=hs0[f][r][XBI(d,c,r+inc,1)]; } }else{ f--;lw=MIN(phl+1,c);c-=lw;jr0=jr[c]; br=0; for(x=lw-1;x>=0;x--){ XBI(d,c+x,jr1,0)=hf3[f][x][br][XBI(d,c+x+1,jr1,0)]; XBI(d,c+x,jr1,1)=hf2[f][x][br][XBI(d,c+x,jr1,0)]; br=(br<<4)|XBI(d,c+x,jr1,1); } // Info is (c...c+lw-1,jr1,*) for(dir=0;dir<2;dir++){ inc=1-2*dir; for(r=jr1-inc;r>=0&&r<N;r-=inc){ // Info is (c...c+lw-1,r+inc,1) // De-strut for(x=lw-2,br=0;x>=0;x--)br=(br<<4)|XBI(d,c+x,r+inc,1); for(x=lw-1;x>=0;x--){ XBI(d,c+x,r,1)=hf1[f][r][x][br][XBI(d,c+x,r+inc,1)]; br=((br<<4)&~(15LL<<4*(lw-1)))|XBI(d,c+x,r,1); } // Info is (c...c+lw-1,r,1) // De-comb for(x=lw-1,br=0;x>=0;x--)br=(br<<4)|XBI(d,c+x,r,1); for(x=0;x<lw;x++){ if(x==0)bi=br; else bi=(br<<4)|XBI(d,c+x-1,r,0); XBI(d,c+x,r,0)=hf0[f][r][x][bi]; //printf("de-comb c=%d f=%d r=%d x=%d br=%lld bi=%lld smin=%d\n",c,f,r,x,br,bi,XBI(d,c+x,r,0)); br>>=4; } }//r }//dir } }//c assert(f==0&&c==0); free(hf3);free(hf2);free(hf1);free(hf0); } free(v2);free(v1);free(v0); //printf("Times %.2fs %.2fs %.2fs\n",t0,t1,t2); return jv0[0]; } int stablek44exhaust(int cv){// A round of exhausts on each K44 (big vertex) int i,r,v,x,y,ord[N*N]; r=0; for(i=0;i<N*N;i++)ord[i]=i;shuf(ord,N*N); while(1){ for(i=0;i<N*N;i++){ x=ord[i]%N;y=ord[i]/N;k44exhaust(x,y); v=val();assert(v<=cv); if(v<cv){cv=v;r=0;}else{r+=1;if(r==N*N)return cv;} } } } int stablestripexhaust(int cv,int wid){// Repeated strip exhausts until no more improvement likely int c,i,o,r,v,nc,ord[2*(N-wid+1)]; nc=N-wid+1;r=0; while(1){ for(i=0;i<2*nc;i++)ord[i]=i; //shuf(ord,2*nc); shuf(ord,nc);shuf(ord+nc,nc); for(i=0;i<2*nc;i++){ c=ord[i]%nc;o=ord[i]/nc; stripexhaust(o,c,c+wid,1); v=val();assert(v<=cv); if(v<cv){cv=v;r=0;}else{r+=1;if(r==2*nc)return cv;} } } } int stabletreeexhaust(int cv,int wid,int64*ntr){// Repeated tree exhausts until no more improvement likely int d,n,ph,ph0,r,v; n=0;d=randint(2);ph=ph0=randint(wid+1);r=randint(N); while(1){ if(ntr)(*ntr)++; if(wid==1)v=tree1exhaust(d,ph,r,1);//{tree1gibbs_slow(d,ph,r,genp[0]);v=val();} else v=treestripexhaust(d,wid,ph,1,r); if(v<cv){cv=v;n=0;}else{n+=1;if(n==(wid+1)*2)return cv;} r=(r+1)%N; ph=(ph+1)%(wid+1);if(ph==ph0)d=1-d; } } void resizecumsumleft(int64*cst,int size){// insert cst[0..size-1] into right of cst[0..2*size-1] and clear left int s; for(s=size/2;s>=1;s>>=1){ memcpy(cst+3*s,cst+s,s*sizeof(int64)); memset(cst+2*s,0,s*sizeof(int64)); } cst[1]=cst[0]; } void resizecumsumright(int64*cst,int size){// insert cst[0..size-1] into left of cst[0..2*size-1] and clear right int s; for(s=size/2;s>=1;s>>=1){ memcpy(cst+2*s,cst+s,s*sizeof(int64)); memset(cst+3*s,0,s*sizeof(int64)); } cst[1]=0; } void inccumsum(int64*cst,int size,int v){// effectively increment all of [0,v) for(v+=size;v>0;v/=2)if(v&1)cst[v/2]++; } int64 querycumsumgt(int64*cst,int size,int v){// query how many increments were greater than v int64 t; if(v<0)return cst[0]; if(v>=size)return 0; for(v+=size,t=0;v>0;v/=2)if(!(v&1))t+=cst[v/2]; return t; } int64 querycumsumle(int64*cst,int size,int v){return cst[0]-querycumsumgt(cst,size,v);}// query how many increments were less than or equal to v int64 querycumsumlt(int64*cst,int size,int v){return querycumsumle(cst,size,v-1);}// ditto, less than v int64 querycumsumeq(int64*cst,int size,int v){return querycumsumle(cst,size,v)-querycumsumlt(cst,size,v);}// ditto, equal to v #define MAXST (1<<18) // For stats. int opt1(double mint,double maxt,int pr,int tns,double *findtts,int strat,int bv,int targenergy){ // // Heuristic optimisation, writing back best value found. Can be used to find TTS, the // expected time to find an optimum solution, using the strategy labelled by 'strat'. // 'strat' is assumed to be a fixed strategy running forever which does not know what // the optimum value is. I.e., it is not allowed to make decisions as to how to search // based on outside knowledge of the optimum value. In "findtts" mode, the aim is to get // an accurate estimate of the expected time for 'strat' to find its first optimum // state. // // opt1() returns a pair (presumed optimum, estimate of TTS), the "presumed optimum" // being the smallest value found in its searching. If the presumed optimum is wrong // (not actually the optimum) then the estimate of TTS is allowed to be anything. If the // presumed optimum is correct then the estimate of TTS must be unbiased. So for the // purposes of reasoning whether or not opt1 is behaving correctly, we only care about // the case when the presumed optimum is the actual optimum. Of course, we also want to // make it very likely that the presumed optimum is the actual optimum, but we don't // seek to quantify what constitutes very likely here. // // Strategies maintain some "state" in addition to the spin configuration. They use // information from previous iterations to guide the present iteration. This means that // to get unbiased samples of "time to solve", you need to stop the strategy with it // hits an optimum and then restart it cleanly, clearing all state. Only that way can // you be sure that you are averaging unbiased runs when taking the average TTS. // // Notionally it runs as if it is presided over by an oracle that resets its state // whenever it hits the optimal value. The wrinkle is that the strategy itself is the // thing that is deciding the optimal value. Since the strategy doesn't actually know // the optimum value for certain, and is not allowed to use it to change its behaviour, // it has to restart itself every time it hits a "presumed optimum", i.e., a // (equal-)lowest value found so far. For external convenience, a record of this optimum // is carried over from independently restarted runs, but this value is "unauthorised // information" - the strategy is not allowed to use it to make a decision. I.e., you // have to imagine the run carrying on forever, but the presumed optimum just dictates // when to take the time reading (at the point the run finds an equally good value). // // If the presumed optimum is bettered, so making a new presumed optimum, then all // previous statistics have to be discarded, including the current run which found the // new presumed optimum. This is because the new presumed optimum was not found under // "infinity run" conditions: the early stopping at the old presumed optimum might have // biased it. So you actually need to find n+1 optima to generate n samples. // // S0: Randomise configuration; stablek44exhaust; repeat // S1: Randomise configuration; stablelineexhaust; repeat // S2: No longer used // S3: Randomise configuration; stabletree1exhaust // S4: Randomise configuration; stabletree2exhaust // S(10+n): Do Sn but randomly perturb configuration instead of randomise it entirely int v,nis,cmin,cv,nv,ns,new,last,reset,ssize,copied,Xbest[NBV]; int64 nn,rep,stt,ntr,*stats; double t0,t1,t2,t3,tt,now; double parms[6][2]={{0.5,0.3},{0.25,0.25},{0.5,0.25},{0.5,0.35},{0.25,0.2},{0.25,0.2}}; if(pr){ printf("Target number of presumed optima: %d\n",tns); printf("Min time to run: %gs\nMax time to run: %gs\n",mint,maxt); printf("Solutions are %sdependent\n",findtts?"in":""); printf(" Nodes bv nsol t_bv t_all t_bv/nsol nts/nis nis/nsol\n"); } nn=0; t0=cpu();// Initial time t1=0;// Elapsed time threshold for printing update // "presumed solution" means "minimum value found so far" ns=0;// Number of presumed solutions ntr=0;// Number of treeexhausts (which is nearly number of sweeps) t2=t0;// t2 = Time of last clean start (new minimum value in "independent" mode) stats=(int64*)malloc(MAXST*sizeof(int64));assert(stats); memset(Xbest,0,NBV*sizeof(int));copied=0; reset=1; rep=cv=nis=0; if(targenergy>-1000000000)bv=targenergy; do{ if(reset){// Forcibly reset all state after a (presumed) solution, so that runs are independent init_state();nis++;cv=val(); cmin=1000000000;ssize=1024;assert(ssize<=MAXST);memset(stats,0,ssize*sizeof(int64)); stt=0;// Total count of values found rep=0; reset=0; t3=cpu(); } //printf("%10d %10d %10lld %10lld\n",cv,bv,rep,stt); if(rep>=stt*parms[strat%10][0]){ if(strat<10){init_state();nis++;} else pertstate(parms[strat%10][1]); cv=val();rep=0; } switch(strat%10){ case 0: nv=stablek44exhaust(cv);// Simple "local" strategy break; case 1: nv=stablestripexhaust(cv,1); break; case 3: nv=stabletreeexhaust(cv,1,&ntr); break; case 4: nv=stabletreeexhaust(cv,2,&ntr); break; case 5: nv=stabletreeexhaust(cv,3,&ntr); break; default: fprintf(stderr,"Unknown strategy %d\n",strat);exit(1); } nv=MAX(nv,targenergy); if(nv<cv)rep=0; cv=nv; if(cmin==1000000000)cmin=cv-ssize/2; // [cmin,cmin+ssize) corresponds to [0,ssize) in stats[], as encoded by cumsum tree while(cv<cmin){assert(ssize*2<=MAXST);resizecumsumleft(stats,ssize);cmin-=ssize;ssize*=2;} while(cv>=cmin+ssize){assert(ssize*2<=MAXST);resizecumsumright(stats,ssize);ssize*=2;} if(rep==0){inccumsum(stats,ssize,cv-cmin);stt++;}// possibly change rep=0 condition rep+=querycumsumle(stats,ssize,cv-cmin); // stt=number of new minima since last reset // qqq=number of new minima since last reset that were <= current cv // rep=sum of qqq since last new minimum // idea is that by putting a limit on rep/stt, the time spent at energy cv is // inversely proportional to the probability that a new minimum is <= cv. if((pr>=3&&cv<=bv)||pr>=5){printf("\nSTATE cv=%d bv=%d\n",cv,bv);prstate(stdout,0,0);printf("DIFF\n");prstate(stdout,1,Xbest);} nn++; now=cpu(); new=(cv<bv); if(new){bv=cv;ns=0;ntr=0;} if(new||(cv==bv&&!copied)){memcpy(Xbest,XBa,NBV*sizeof(int));copied=1;}// this logic ensures a copy if initial bv is optimum if(cv==bv){ if(new&&findtts)t2=now; else ns++; if(0){if(new&&findtts){t2=now;printf("NEW BEST\n");} else {ns++;printf("%12g Time to find\n",now-t3);}} if(findtts)reset=1; } tt=now-t0; last=(now-t2>=mint&&ns>=tns)||tt>=maxt; if(new||tt>=t1||last){ t1=MAX(tt*1.1,tt+5); if(pr>=1){ if(findtts)printf("%12lld %10d %10d %8.2f %8.2f %10.3g %10.3g %10.3g\n",nn,bv,ns,now-t2,tt,(now-t2)/ns,ntr/(double)nis,nis/(double)ns); else printf("%12lld %10d %8.2f\n",nn,bv,tt); if(pr>=2){ printf("Tot stat %lld\n",stt); for(v=cmin+ssize-1;v>=cmin;v--){ int64 n=querycumsumeq(stats,ssize,v-cmin); if(n)printf("%6d %12lld\n",v,n); } if(pr>=4){printf("STATE cv=%d bv=%d\n",cv,bv);prstate(stdout,0,0);printf("DIFF\n");prstate(stdout,1,Xbest);printf("\n");} } fflush(stdout); } } }while(!last); if(findtts)*findtts=(now-t2)/ns; memcpy(XBa,Xbest,NBV*sizeof(int)); free(stats); return bv; } int cmpint(const void*p,const void*q){return *(int*)p-*(int*)q;} int cmpd(const void*p,const void*q){double z=*(double*)p-*(double*)q;return (z>0)-(z<0);} int okinv(int c,int r,int o,int s){// aborts if s isn't on the OK list int i; if(c<0||c>=N||r<0||r>=N){assert(s==0);return 0;} for(i=0;i<nok[enc(c,r,o)];i++)if(ok[enc(c,r,o)][i]==s)return i; assert(0); } int ok2inv(int c,int r,int s){// returns -1 if s isn't on the OK2 list int i; if(c<0||c>=N||r<0||r>=N)return s==0?0:-1; for(i=0;i<nok2[enc2(c,r)];i++)if(ok2[enc2(c,r)][i]==s)return i; return -1; } void getrestrictedsets(void){ int i,j,o,s,v,x,y,bb,s0,s1,s0b,s1b,tt,tt0,v0,x0,x1,y0,y1,max,vmin,tv[16],meet[16][16],ll0,ll[65536]; UC (*ok0)[16][16]; ok0=(UC(*)[16][16])malloc(65536*16*16);assert(ok0); for(v=0;v<NBV;v++)for(s=0;s<16;s++)ok[v][s]=1; tt0=1000000000; for(x=0;x<N;x++)for(y=0;y<N;y++){for(i=0;i<256;i++)ok2[enc2(x,y)][i]=i;nok2[enc2(x,y)]=256;} while(1){ tt=0; for(x=0;x<N;x++)for(y=0;y<N;y++)for(o=0;o<2;o++){for(s=0;s<16;s++)tt+=ok[enc(x,y,o)][s];} if(deb>=1)printf("Total %4d / %4d\n",tt,N*N*2*16); if(tt>=tt0)break; tt0=tt; for(x=0;x<N;x++)for(y=0;y<N;y++){ bb=0; for(x0=0;x0<16;x0++)if((x==0&&x0==0)||(x>0&&ok[enc(x-1,y,0)][x0])){ for(x1=0;x1<16;x1++)if((x==N-1&&x1==0)||(x<N-1&&ok[enc(x+1,y,0)][x1])){ for(y0=0;y0<16;y0++)if((y==0&&y0==0)||(y>0&&ok[enc(x,y-1,1)][y0])){ for(y1=0;y1<16;y1++)if((y==N-1&&y1==0)||(y<N-1&&ok[enc(x,y+1,1)][y1])){ for(s1=0;s1<16;s1++)tv[s1]=QB(x,y,1,1,s1,y0)+QB(x,y,1,2,s1,y1); vmin=1000000000; for(s0=0;s0<16;s0++){ //if(ok[enc(x,y,0)][s0]==0)continue;//possible v0=QB(x,y,0,1,s0,x0)+QB(x,y,0,2,s0,x1); for(s1=0;s1<16;s1++){ //if(ok[enc(x,y,1)][s1]==0)continue;//possible v=QB(x,y,0,0,s0,s1)+v0+tv[s1]; if(v<vmin){memset(ok0[bb],0,16*16);vmin=v;} if(v==vmin)ok0[bb][s0][s1]=1; }//s1 }//s0 bb++; }//y1 }//y0 }//x1 }//x0 //printf("bb=%d\n",bb); for(o=0;o<2;o++)for(s=0;s<16;s++)ok[enc(x,y,o)][s]=0; for(i=0;i<bb-1;i++)ll[i]=i+1;ll0=0;ll[bb-1]=-1; nok2[enc2(x,y)]=0; while(1){ memset(meet,0,sizeof(meet)); for(i=ll0;i>=0;i=ll[i])for(s0=0;s0<16;s0++)for(s1=0;s1<16;s1++)meet[s0][s1]+=ok0[i][s0][s1]; //for(s0=0;s0<16;s0++){for(s1=0;s1<16;s1++)printf("%10d ",meet[s0][s1]);printf("\n");} s0b=s1b=-1;// To shut compiler up for(s0=0,max=0;s0<16;s0++)for(s1=0;s1<16;s1++)if(meet[s0][s1]>max){max=meet[s0][s1];s0b=s0;s1b=s1;} // ^ Can use better method. Should include cartesian product of projections first. if(max==0)break; ok[enc(x,y,0)][s0b]=ok[enc(x,y,1)][s1b]=1; ok2[enc2(x,y)][nok2[enc2(x,y)]++]=s0b+(s1b<<4); for(i=ll0;i>=0&&ok0[i][s0b][s1b];i=ll[i]); if(i<0)break; ll0=i; while(i>=0){ for(j=ll[i];j>=0&&ok0[j][s0b][s1b];j=ll[j]); ll[i]=j;i=j; } }// subset-choosing loop //printf("%2d %2d:",x,y);for(o=0;o<2;o++){printf(" ");for(s=0;s<16;s++)printf("%d ",ok[enc(x,y,o)][s]);}printf("\n"); }//x,y } free(ok0); // Convert indicator map to list for(v=0;v<NBV;v++){for(s=0,i=0;s<16;s++)if(ok[v][s])ok[v][i++]=s;nok[v]=i;} if(deb>=2)for(x=0;x<N;x++)for(y=0;y<N;y++)for(o=0;o<2;o++){ printf("%d %d %d :",x,y,o); for(i=0;i<nok[enc(x,y,o)];i++)printf(" %2d",ok[enc(x,y,o)][i]); printf("\n"); } // Sort ok2[] to facilitate fullexhaust() for(x=0;x<N;x++)for(y=0;y<N;y++)qsort(ok2[enc2(x,y)],nok2[enc2(x,y)],sizeof(int),cmpint); ok[NBV][0]=0;nok[NBV]=1; // Special entries at the end to cater for off-grid cells ok2[N*N][0]=0;nok2[N*N]=1; // if(deb>=1){ for(y=N-1;y>=0;y--){ for(x=0;x<N;x++){ for(o=0;o<2;o++){ v=nok[enc(x,y,o)]; if(v<16)printf("%x",v); else printf("g"); } printf(" "); } printf("\n"); } for(y=N-1;y>=0;y--){ for(x=0;x<N;x++)printf("%3d ",nok2[enc2(x,y)]); printf("\n"); } } } void applyam(int a,int*XBa0,intqba(*QBa0)[3][16][16],int(*ok0)[16],int*nok0,int(*ok20)[256],int*nok20){ // Apply automorphism a=0,1,...,7 int d,i,o,t,v,x,y,o1,x1,y1,dx,dy,d1,v1,s0,s1; for(x=0;x<N;x++)for(y=0;y<N;y++){ x1=x;y1=y; if(a&1){x1=y;y1=x;} if(a&2)x1=N-1-x1; if(a&4)y1=N-1-y1; for(o=0;o<2;o++){ v=enc(x,y,o); o1=o^(a&1); v1=enc(x1,y1,o1); XBa[v]=XBa0[v1]; for(d=0;d<3;d++){ d1=d; if(d){ if(o==0){dx=2*d-3;dy=0;}else{dx=0;dy=2*d-3;} if(a&1){t=dx;dx=dy;dy=t;} if(a&2){dx=-dx;} if(a&4){dy=-dy;} if(o1==0){assert(dy==0);d1=(dx+3)/2;}else{assert(dx==0);d1=(dy+3)/2;} } for(s0=0;s0<16;s0++)for(s1=0;s1<16;s1++)QBa[v][d][s0][s1]=QBa0[v1][d1][s0][s1]; } nok[v]=nok0[v1]; for(i=0;i<nok[v];i++)ok[v][i]=ok0[v1][i]; }//o v=enc2(x,y);v1=enc2(x1,y1); nok2[v]=nok20[v1]; for(i=0;i<nok2[v];i++)if(a&1){t=ok20[v1][i];ok2[v][i]=(t>>4)|((t&15)<<4);} else ok2[v][i]=ok20[v1][i]; qsort(ok2[v],nok2[v],sizeof(int),cmpint); }//x,y } int fullexhaust(){ // Uses restricted sets to cut down possibilities // and full automorphism group to choose best orientation int a,c,r,s,v,x,bc,bc0,A,s0,mul0,mul1,offset, XBa0[NBV],ok0[NBV][16],nok0[NBV],ok20[N*N][256],nok20[N*N], pre[4096][4],pre2[16][16]; intqba QBa0[NBV][3][16][16]; int64 b,br,bm,nc,ns,nc0,tnc,maxc,maxs,maxt,size0,size1; double t0,t1,t2,tns,ctns,cost,mincost; short*v0,*v1,*vold,*vnew; t0=-cpu(); getrestrictedsets(); memcpy(XBa0,XBa,sizeof(XBa0));memcpy(QBa0,QBa,sizeof(QBa0)); memcpy(ok0,ok,sizeof(ok0));memcpy(nok0,nok,sizeof(nok0)); memcpy(ok20,ok2,sizeof(ok20));memcpy(nok20,nok2,sizeof(nok20)); mincost=1e100;A=-1;size0=size1=1LL<<60; if(deb>=1)printf(" Memory/GiB Time(a.u.) Memory*Time\n"); for(a=0;a<8;a++){// Loop over automorphisms of C_N to choose best representation to exhaust applyam(a,XBa0,QBa0,ok0,nok0,ok20,nok20); maxc=maxs=0;tns=0; for(r=0;r<N;r++){ nc0=1;tnc=0; for(c=N-1;c>=0;c--){ tns+=nok[encp(c-1,r,0)]*nok2[enc2(c,r)]*nc0; nc0*=nok[encp(c+1,r,1)]; nc=nc0*nok[enc(c,r,0)]; tnc+=nc; } if(tnc>maxc)maxc=tnc; for(c=0;c<N;c++){ ns=1; for(x=0;x<c;x++)ns*=nok[encp(x,r+1,1)]; for(x=c;x<N;x++)ns*=nok[enc(x,r,1)]; tns+=ns*nok[encp(c,r+1,1)]; if(ns>maxs)maxs=ns; } }//r maxt=maxs+MAX(maxs,maxc); cost=tns*maxt;// Using cost = time * memory if(deb>=1){double z=(double)maxt*sizeof(short)/(1<<30);printf("Automorphism %d: %12g %12g %12g\n",a,z,tns,z*tns);} if(cost<mincost){mincost=cost;size0=maxs;size1=MAX(maxs,maxc);ctns=tns;A=a;} }//a applyam(A,XBa0,QBa0,ok0,nok0,ok20,nok20); if(deb>=1)printf("Choosing automorphism %d\n",A); printf("Size %.1fGiB\n",(double)(size0+size1)*sizeof(short)/(1<<30)); printf("Time units %g\n",ctns); fflush(stdout); v0=(short*)malloc(size0*sizeof(short)); v1=(short*)malloc(size1*sizeof(short)); if(!(v0&&v1)){fprintf(stderr,"Couldn't allocate %gGiB in fullexhaust()\n", (double)(size0+size1)*sizeof(short)/(1<<30));return 1;} t0+=cpu(); offset=32768/(N*(N+1)); t1=t2=0; memset(v0,0,size0*sizeof(short)); for(r=0;r<N;r++){ // Comb exhaust // At this point: v0 maps (*,r,1) to value of (*,r,1), (*,<r,*) // // *b0 *b1 *b2 *b3 // / / / / // / / / / // *---------*---------*---------* // s0 s1 s2 s3 // // vc3[s3] = 0 // vc2[s2,b3] = min_{s3} vc3[s3]+Q(s3,b3)+Q(s3,s2) // vc1[s1,b2,b3] = min_{s2} vc2[s2,b3]+Q(s2,b2)+Q(s2,s1) // vc0[s0,b1,b2,b3] = min_{s1} vc1[s1,b2,b3]+Q(s1,b1)+Q(s1,s0) // v0[b0,b1,b2,b3] += min_{s0} vc0[s0,b1,b2,b3]+Q(s0,b0) t1-=cpu(); vold=v1; for(c=N-1,bm=1;c>=0;bm*=nok[enc(c,r,1)],c--){ int np,s0i,s1i,b1i,sb1,sb1i,psb1; // Loop over br = (b_{c+1},...,b_{N-1}) { // the irrelevant parameters // Loop over s_{c-1} { // vmin=32767 // Loop over s_c+b_c { // s_c being the fast-changing half of s_c,b_c // v=vc[c][s_c,br]+Q(s_c,b_c)+Q(s_c,s_{c-1}) // if(v<vmin)vmin=v // if(s_c==<last one given b_c>){vc[c-1][s_{c-1},b_c,br]=vmin;vmin=32767;} // } // } // } vnew=vold+nok[enc(c,r,0)]*bm; if(c==N-1)for(b=0;b<vnew-vold;b++)vold[b]=0; np=0; for(s0i=0;s0i<nok[encp(c-1,r,0)];s0i++){ s0=ok[encp(c-1,r,0)][s0i]; psb1=1000; for(sb1i=0;sb1i<nok2[enc2(c,r)];sb1i++){ sb1=ok2[enc2(c,r)][sb1i]; s1i=okinv(c,r,0,sb1&15); b1i=okinv(c,r,1,sb1>>4); pre[np][0]=s1i; pre[np][1]=QB(c,r,0,0,sb1&15,sb1>>4)+QB(c,r,0,1,sb1&15,s0); pre[np][2]=0; if(sb1i>0){assert(np>0);pre[np-1][2]=((sb1>>4)>(psb1>>4));} pre[np][3]=s0i+nok[encp(c-1,r,0)]*b1i; psb1=sb1; np++; } assert(np>0);pre[np-1][2]=1; } mul0=nok[enc(c,r,0)]; mul1=nok[encp(c-1,r,0)]*nok[enc(c,r,1)]; #ifdef PARALLEL #pragma omp parallel for #endif for(br=0;br<bm;br++){ int p,v,vmin; vmin=32767; for(p=0;p<np;p++){ v=vold[pre[p][0]+mul0*br]+pre[p][1]; if(v<vmin)vmin=v; if(pre[p][2]){ if(c>0)vnew[pre[p][3]+mul1*br]=vmin; else v0[pre[p][3]+mul1*br]+=vmin; vmin=32767; } } } vold=vnew; }//c // At this point v0 maps (*,r,1) to value of (*,<=r,*) t1+=cpu(); if(0){ int i,t,v,maxd,nsb[256]; int64 b,p,b0,b1,b2,np,me,stats[4*N+1]; double t0,t1; for(i=1,nsb[0]=0;i<256;i++)nsb[i]=nsb[i>>1]+(i&1); maxd=MIN(4*N,2); for(i=0,np=0,b=1;i<=maxd;i++){np+=b;b=(b*(4*N-i))/(i+1);} int64 sb[np]; for(i=0,p=0;i<=maxd;i++)for(b=0;b<size0;b++){ for(b0=b,t=0;b0;b0>>=8)t+=nsb[b0&255]; if(t==i)sb[p++]=b; } assert(p==np); for(t=0;t<=maxd;t++)stats[t]=0; printf("Row %d\n",r); me=0;t0=t1=0; for(b0=0;b0<size0;b0++){ for(b1=1;b1<p;b1++){ b2=sb[b1]; v=v0[b0]-v0[b0^b2]; if(v>0){ for(b=b2,t=0;b;b>>=8)t+=nsb[b&255]; if(v>=2*t){ stats[t]++;t0+=1;t1+=t; //printf("%08llx dominated by %08llx. Exor %08llx. valdif %d. Distance %d\n",b0,b0^b2,b2,v,t); break; } } }//b1 if(b1==p){me++;if(0){printf(" %0*llX maximal, value %d\n",N,b0,v0[b0]);fflush(stdout);}} }//b0 for(t=0;t<=maxd;t++)if(stats[t])printf("Num %2d = %lld\n",t,stats[t]); printf("%lld maximal element%s\n",me,me==1?"":"s"); printf("Average distance of dominator: %g\n",t1/t0); printf("\n"); fflush(stdout); } // Strut exhaust // // *b0 *bc * * // | | | | // | ^ | | // | | | | // * *s *b2 *b3 // // (c=1 picture) t2-=cpu(); for(c=0;c<N;c++){ if(c&1){vold=v1;vnew=v0;} else {vold=v0;vnew=v1;} // At this point vold maps (>=c,r,1), (<c,r+1,1) to the value below these vertices for(x=c+1,bm=1;x<N;x++)bm*=nok[enc(x,r,1)]; for(x=0;x<c;x++)bm*=nok[encp(x,r+1,1)]; mul0=nok[enc(c,r,1)]; mul1=nok[encp(c,r+1,1)]; assert(bm*MAX(mul0,mul1)<=size0); for(bc0=0;bc0<mul1;bc0++){// bc = state of (c,r+1,1) bc=ok[encp(c,r+1,1)][bc0]; for(s0=0;s0<mul0;s0++){// s = state of (c,r,1) s=ok[enc(c,r,1)][s0]; pre2[bc0][s0]=QB(c,r,1,2,s,bc); } } #ifdef PARALLEL #pragma omp parallel for #endif for(br=0;br<bm;br++){// br = state of non-c columns int v,vmin,bc0,s0; for(bc0=0;bc0<mul1;bc0++){// bc = state of (c,r+1,1) vmin=1000000000; for(s0=0;s0<mul0;s0++){// s = state of (c,r,1) v=vold[s0+mul0*br]+pre2[bc0][s0]; if(v<vmin)vmin=v; } vnew[br+bm*bc0]=vmin+offset;// offset keeps the intermediate numbers smaller, allowing bigger range } } }//c if(N&1)memcpy(v0,v1,size0*sizeof(short)); // Now v0 maps (*,r+1,1) to value of (*,r+1,1),(*,<=r,*) t2+=cpu(); }//r v=v0[0]; free(v1);free(v0); applyam(0,XBa0,QBa0,ok0,nok0,ok20,nok20); printf("Setup time %8.2fs\nComb time %8.2fs\nStrut time %8.2fs\n",t0,t1,t2); return v-N*N*offset; } void pr16(int t[16][16]){ int i,j; for(i=0;i<16;i++){ for(j=0;j<16;j++)printf(" %4d",t[i][j]); printf("\n"); } } void combLB2(int r,int (*f)[16][16]){ // f[N-1][16][16] are the approximators, to be returned int c,v,b0,b1,s0,s1,vmin; int ex[16][16];// excess int t[16][16]; for(s1=0;s1<16;s1++){// s1 = state of (N-1,r,0) for(b1=0;b1<16;b1++){// b1 = state of (N-1,r,1) ex[s1][b1]=QB(N-1,r,0,0,s1,b1); } } for(c=N-2;c>=0;c--){// approximating the (c,r,*), (c+1,r,*) part with f[c][][] // // *b0 *b1 // / / // / / // *----------*---------*---- ... // s_hint s0 s1 // for(s0=0;s0<16;s0++){// s0 = (c,r,0) for(b1=0;b1<16;b1++){// b1 = (c+1,r,1) vmin=1000000000; for(s1=0;s1<16;s1++){// s1 = (c+1,r,0) v=QB(c,r,0,2,s0,s1)+ex[s1][b1]; if(v<vmin)vmin=v; } t[s0][b1]=vmin; }//b1 }//s0 for(b0=0;b0<16;b0++){// b0 = (c,r,1) for(b1=0;b1<16;b1++){// b1 = (c+1,r,1) vmin=1000000000; for(s0=0;s0<16;s0++){// s0 = (c,r,0) v=QB(c,r,0,1,s0,XB(c-1,r,0))+QB(c,r,0,0,s0,b0)+t[s0][b1]; if(v<vmin)vmin=v; } f[c][b0][b1]=vmin; }//b1 }//b0 for(b0=0;b0<16;b0++){// b0 = (c,r,1) for(s0=0;s0<16;s0++){// s0 = (c,r,0) vmin=1000000000; for(b1=0;b1<16;b1++){// b1 = (c+1,r,1) v=QB(c,r,0,0,s0,b0)+t[s0][b1]-f[c][b0][b1]; if(v<vmin)vmin=v; } ex[s0][b0]=vmin; }//s0 }//b0 }//c for(b0=0;b0<16;b0++){// b0 = (c,r,1) vmin=1000000000; for(s0=0;s0<16;s0++){// s0 = (c,r,0) if(ex[s0][b0]<vmin)vmin=ex[s0][b0]; } assert(vmin==0); } } void reducerankLB(int t[16][16],int t0[16],int t1[16]){ int d,i,j,dd,nd,i1,j1,nmax,dmin,imin,jmin,vmin,n0[16],n1[16]; vmin=1000000000;imin=jmin=-1; for(i=0;i<16;i++)for(j=0;j<16;j++)if(t[i][j]<vmin){vmin=t[i][j];imin=i;jmin=j;} for(i=0;i<16;i++){t0[i]=t[i][jmin];t1[i]=t[imin][i]-vmin;} nd=0;for(i=0;i<16;i++)n0[i]=n1[i]=0; for(i=0;i<16;i++)for(j=0;j<16;j++)if(t0[i]+t1[j]>t[i][j]){n0[i]++;n1[j]++;nd++;} while(nd>0){ if(0){ printf(" ");for(j=0;j<16;j++)printf(" %4d",n1[j]);printf("\n"); printf(" ");for(j=0;j<16;j++)printf(" %4d",t1[j]);printf("\n"); for(i=0;i<16;i++){ printf("%4d %4d : ",n0[i],t0[i]); for(j=0;j<16;j++)printf(" %4d",t0[i]+t1[j]-t[i][j]); printf("\n"); } printf("\n"); } nmax=0;dd=-1; for(i=0;i<16;i++){ if(n0[i]>nmax){nmax=n0[i];dd=i;} if(n1[i]>nmax){nmax=n1[i];dd=16+i;} } if(dd<16){ dmin=1000000000;j1=-1; for(j=0;j<16;j++){ d=(t0[dd]+t1[j])-t[dd][j]; if(d>0&&d<dmin){dmin=d;j1=j;} } assert(j1>=0); t0[dd]-=dmin; for(j=0;j<16;j++)if(t0[dd]+t1[j]==t[dd][j]){n0[dd]--;n1[j]--;nd--;assert(n0[dd]>=0&&n1[j]>=0&&nd>=0);} }else{ dd-=16; dmin=1000000000;i1=-1; for(i=0;i<16;i++){ d=(t0[i]+t1[dd])-t[i][dd]; if(d>0&&d<dmin){dmin=d;i1=i;} } assert(i1>=0); t1[dd]-=dmin; for(i=0;i<16;i++)if(t0[i]+t1[dd]==t[i][dd]){n0[i]--;n1[dd]--;nd--;assert(n0[i]>=0&&n1[dd]>=0&&nd>=0);} } } } void combLB(int r,int w,int *f){ // f thought of as f[N-w+1][16^w] are the approximators, to be returned // So f_0(a_0,...,a_{w-1})+f_1(a_1,...,a_w)+...+f_{N-w}(a_{N-w},...,a_{N-1}) <= comb_r(a_0,...,a_{N-1}) // Multiindices encoded low to high, e.g., (a_0,...,a_{w-1}) <-> a_0+16a_1+16^2a_2+... int c,v,s0,s1,vmin,vtot,tt[16]; int64 b,n; n=1LL<<(4*w); int v0[n],v1[n]; if(w==1)for(b=0;b<16;b++)v0[b]=0; else { // Add in (0,r,0) (0,r,1): for(b=0;b<256;b++){// b = (0,r,0) (0,r,1) v0[b]=QB(0,r,0,0,b&15,b>>4); } for(c=1;c<w-1;c++){ // State: v0[ (c-1,r,0) (0,r,1) (1,r,1) ... (c-1,r,1) ] (low - high) // Add in (c,r,0) and minimise over (c-1,r,0): for(b=0;b<(1LL<<(c+1)*4);b++){// b = (c,r,0) (0,r,1) (1,r,1) ... (c-1,r,1) vmin=1000000000; for(s0=0;s0<16;s0++){// s0=(c-1,r,0) v=v0[(b&~15)|s0]+QB(c-1,r,0,2,s0,b&15); if(v<vmin)vmin=v; } v1[b]=vmin; }//b // Add in (c,r,1): for(b=0;b<(1LL<<(c+2)*4);b++){// b = (c,r,0) (0,r,1) (1,r,1) ... (c,r,1) v0[b]=v1[b&~(15LL<<(c+1)*4)]+QB(c,r,0,0,b&15,b>>(c+1)*4); } }//c } for(c=w-1;c<N;c++){ // State: v0[ (c-1,r,0) (c-w+1,r,1) (c-w+2,r,1) ... (c-1,r,1) ] (low - high) // Add in (c,r,0) and minimise over (c-1,r,0): for(b=0;b<n;b++){// b = (c,r,0) (c-w+1,r,1) (c-w+2,r,1) ... (c-1,r,1) vmin=1000000000; for(s0=0;s0<16;s0++){// s0=(c-1,r,0) v=v0[(b&~15)|s0]+QB(c,r,0,1,b&15,s0); if(v<vmin)vmin=v; } v1[b]=vmin; }//b // Add in (c,r,1) and minimise over (c,r,0), using (c+1,r,0)_ave (sidebranch) for(s0=0;s0<16;s0++){ vtot=0; for(s1=0;s1<16;s1++)vtot+=QB(c,r,0,2,s0,s1); tt[s0]=(vtot+8)>>4; } for(b=0;b<n;b++){// b = (c-w+1,r,1) (c-w+2,r,1) ... (c,r,1) vmin=1000000000; for(s0=0;s0<16;s0++){// s0=(c,r,0) v=v1[((b<<4)&~(15LL<<(4*w)))|s0]+QB(c,r,0,0,s0,b>>(4*(w-1)))+tt[s0]; if(v<vmin)vmin=v; } f[(int64)(c-(w-1))<<(4*w)|b]=vmin; }//b if(c==N-1)break; // Add in (c,r,1) again, subtract f[c-(w-1)][] and minimise over (c-w+1,r,1) if(w==1){ for(b=0;b<n;b++){// b = (c,r,0) vmin=1000000000; for(s0=0;s0<16;s0++){// s0=(c,r,1) v=v1[b]+QB(c,r,0,0,b&15,s0)-f[(c<<4)|s0]; if(v<vmin)vmin=v; } v0[b]=vmin; }//b }else{ for(b=0;b<n;b++){// b = (c,r,0) (c-w+2,r,1) (c-w+3,r,1) ... (c,r,1) vmin=1000000000; for(s0=0;s0<16;s0++){// s0=(c-w+1,r,1) v=v1[(((b&~15)<<4)&~(15LL<<4*w))|(b&15)|(s0<<4)]+QB(c,r,0,0,b&15,b>>4*(w-1))-f[(int64)(c-(w-1))<<(4*w)|(b&~15)|s0]; if(v<vmin)vmin=v; } v0[b]=vmin; }//b } }//c } int lin2LB(){ int c,i,j,r,v,vmin,b0,b1,b2,s0,s1; int m0[16],m1[16],t[16][16],t0[16],t1[16],u[16][16],f0[N-1][16][16],f1[N-1][16][16]; for(c=0;c<N-1;c++)for(i=0;i<16;i++)for(j=0;j<16;j++)f0[c][i][j]=0; for(r=0;r<N;r++){ if(0){ combLB2(r,f1); for(c=0;c<N-1;c++)for(i=0;i<16;i++)for(j=0;j<16;j++)f1[c][i][j]+=f0[c][i][j]; }else{ combLB(r,2,(int*)f1); for(c=0;c<N-1;c++)for(i=0;i<16;i++)for(j=0;j<16;j++)f0[c][i][j]=f0[c][i][j]+f1[c][j][i]; for(c=0;c<N-1;c++)for(i=0;i<16;i++)for(j=0;j<16;j++)f1[c][i][j]=f0[c][i][j]; } if(r==N-1)break; // // Struts: build f0 (new row r+1) from f1 (old row r) // // *b0 *b1 *b2 * // | | | | // | | ^ | // | | | | // * * *s2 *b3 // (c=2 picture) // for(b0=0;b0<16;b0++)for(b1=0;b1<16;b1++){ vmin=1000000000; for(s0=0;s0<16;s0++){ v=f1[0][s0][b1]+QB(0,r,1,2,s0,b0); if(v<vmin)vmin=v; } f0[0][b0][b1]=vmin; } for(c=1;c<N-1;c++){ for(b1=0;b1<16;b1++){// b1=(c,r,1) for(b0=0;b0<16;b0++)for(b2=0;b2<16;b2++){// b0=(c-1,r,1), b2=(c+1,r,1) vmin=1000000000; for(s1=0;s1<16;s1++){ v=f0[c-1][b0][s1]+f1[c][s1][b2]+QB(c,r,1,2,s1,b1); if(v<vmin)vmin=v; } t[b0][b2]=vmin; }// b0,b2 reducerankLB(t,t0,t1); for(b0=0;b0<16;b0++)u[b0][b1]=t0[b0]; for(b2=0;b2<16;b2++)f0[c][b1][b2]=t1[b2]; }// b1 for(b0=0;b0<16;b0++)for(b1=0;b1<16;b1++)f0[c-1][b0][b1]=u[b0][b1]; }// c for(b0=0;b0<16;b0++)for(b1=0;b1<16;b1++){ vmin=1000000000; for(s1=0;s1<16;s1++){ v=f0[N-2][b0][s1]+QB(N-1,r,1,2,s1,b1); if(v<vmin)vmin=v; } u[b0][b1]=vmin; } for(b0=0;b0<16;b0++)for(b1=0;b1<16;b1++)f0[N-2][b0][b1]=u[b0][b1]; }// r // Result in f1 for(b0=0;b0<16;b0++)m0[b0]=0; for(c=0;c<N-1;c++){ // m0[] is a map from (c,N-1,1) to minval of (<=c,N-1,1) using the c functions, f1[<c][][] for(b0=0;b0<16;b0++){// b0=(c+1,N-1,1) vmin=1000000000; for(s0=0;s0<16;s0++){// s0=(c,N-1,1) v=m0[s0]+f1[c][s0][b0]; if(v<vmin)vmin=v; } m1[b0]=vmin; }// b0 for(b0=0;b0<16;b0++)m0[b0]=m1[b0]; }// c vmin=1000000000; for(s0=0;s0<16;s0++){// s0=(N-1,N-1,1) v=m0[s0]; if(v<vmin)vmin=v; } return vmin; } int linLB(int w){ int n,r,v,vmin,s0; n=N-w+1;// Using n overlapping width-w functions int64 b,c,d,b0,b1,N1,N2,N3,hit; N1=n*(1LL<<4*w); N2=1LL<<4*(w-1); N3=1LL<<4*2*(w-1); int f0[N1],f1[N1],t0[N3],t1[N3]; int64 hi[N2]; memset(f1,0,N1*sizeof(int)); for(r=0;r<N;r++){ // Here f1[] is the bound on (*,<r,1) combLB(r,w,f0); for(b=0;b<N1;b++)f0[b]+=f1[b]; if(r==N-1)break; // f0 at the bottom (row r) -> construct f1 at the top (row r+1) for(b1=0;b1<N2;b1++)for(b0=0;b0<N2;b0++){// b0=(<w-1,r,1) b1=(<w-1,r+1,1) v=0; for(c=0;c<w-1;c++)v+=QB(c,r,1,2,(b0>>4*c)&15,(b1>>4*c)&15); t0[b0+(b1<<4*(w-1))]=v; } for(c=0;c<n;c++){ // t0[] maps (c..c+w-2,r,1),(c..c+w-2,r+1,1) to sum_{i<c}(f_i'-f_i) + struts for(b1=0;b1<N2;b1++)for(b0=0;b0<N2;b0++){// b0=(c+1..c+w-1,r,1) b1=(c..c+w-2,r+1,1) vmin=1000000000; for(s0=0;s0<16;s0++){// s0=(c,r,1) v=t0[s0+((b0<<4)&~(15LL<<4*(w-1)))+(b1<<4*(w-1))]+f0[s0+(b0<<4)+(c<<4*w)]; if(v<vmin)vmin=v; } t1[b0+(b1<<4*(w-1))]=vmin; } // t1[] maps (c+1..c+w-1,r,1),(c..c+w-2,r+1,1) to sum_{i<c}(f_i'-f_i)+f_c' + struts assert(w>=2);//FTM for(b=0;b<N2;b++)hi[b]=0; for(d=1;d<MIN(w,n-c);d++){ for(b0=0;b0<1LL<<(w-d)*4;b0++){// b0=(c+d..c+w-1,r,1) hit=0; for(b1=0;b1<1LL<<d*4;b1++){// b1=(c+w..c+w+d-1,r,1) hit+=f0[b0+(b1<<(w-d)*4)+((c+d)<<4*w)]; } for(b1=0;b1<1LL<<(d-1)*4;b1++){// b1=(c+1..c+d-1,r,1) hi[b1+(b0<<(d-1)*4)]+=hit<<(w-1-d)*4; } } } for(b=0;b<N2;b++)hi[b]=(hi[b]+(1LL<<((w-1)*4-1)))>>(w-1)*4; for(b1=0;b1<1LL<<4*w;b1++){// b1=(c+w-1,r,1),(c..c+w-2,r+1,1) vmin=1000000000; for(b0=0;b0<1LL<<4*(w-2);b0++){// b0=(c+1..c+w-2,r,1) v=t1[b0+(b1<<4*(w-2))]+hi[b0+((b1&15)<<4*(w-2))]; if(v<vmin)vmin=v; } t0[b1]=vmin; } // t0[] maps (c+w-1,r,1),(c..c+w-2,r+1,1) to sum_{i<c}(f_i'-f_i)+f_c' + struts for(b=0;b<1LL<<4*w;b++){// b=(c..c+w-1,r+1,1) vmin=1000000000; for(s0=0;s0<16;s0++){// s0=(c+w-1,r,1) v=t0[s0+((b<<4)&~(15LL<<4*w))]+QB(c+w-1,r,1,2,s0,b>>4*(w-1)); if(v<vmin)vmin=v; } f1[b+(c<<4*w)]=vmin; } for(b1=0;b1<N2;b1++)for(b0=0;b0<N2;b0++){// b0=(c+1..c+w-1,r,1) b1=(c+1..c+w-1,r+1,1) vmin=1000000000; for(s0=0;s0<16;s0++){// s0=(c,r+1,1) v=t1[b0+((s0+((b1<<4)&~(15LL<<4*(w-1))))<<4*(w-1))]-f1[s0+(b1<<4)+(c<<4*w)]; if(v<vmin)vmin=v; } t0[b0+(b1<<4*(w-1))]=vmin+QB(c+w-1,r,1,2,b0>>4*(w-2),b1>>4*(w-2)); } }//c }//r // Result in f0 for(b=0;b<N2;b++)t0[b]=0; for(c=0;c<n;c++){ // t0[] is a map from (c..c+w-2,N-1,1) to minval of the sum of the c functions, f0[<c] // Add variable (c+w-1,N-1,1) and min over (c,N-1,1) for(b=0;b<N2;b++){// b=(c+1..c+w-1,N-1,1) vmin=1000000000; for(s0=0;s0<16;s0++){// s0=(c,N-1,1) v=t0[s0+((b<<4)&~(15LL<<4*(w-1)))]+f0[s0+(b<<4)+(c<<4*w)]; if(v<vmin)vmin=v; } t1[b]=vmin; } for(b=0;b<N2;b++)t0[b]=t1[b]; } vmin=1000000000; for(b=0;b<N2;b++)if(t0[b]<vmin)vmin=t0[b]; return vmin; } void timingtests(int strat,double mint,double maxt){ int d,n,r,c0,c1,ph,wid,v0,upd; double t0; opt1(mint,maxt,1,1,0,strat,1000000000,-1000000000); init_state(); printf("val=%d\n",val()); upd=0; wid=5; for(d=0;d<2;d++)for(ph=0;ph<=wid;ph++)for(r=0;r<N;r++){ for(n=0,t0=cpu();(n&(n-1))||cpu()-t0<.5;n++)v0=treestripexhaust(d,wid,ph,upd,r); printf("treestripexh %d %d %d %2d %6d %gs\n",d,wid,ph,r,v0,(cpu()-t0)/n); fflush(stdout); } for(d=0;d<2;d++)for(ph=0;ph<2;ph++)for(r=0;r<N;r++){ v0=1000000000; for(n=0,t0=cpu();(n&(n-1))||cpu()-t0<.5;n++)v0=tree1exhaust(d,ph,r,0); printf("tree1 %d %d %2d %6d %gs\n",d,ph,r,v0,(cpu()-t0)/n); fflush(stdout); } for(d=0;d<2;d++)for(c0=0;c0<N-wid+1;c0++){ c1=c0+wid;v0=0; for(n=0,t0=cpu();(n&(n-1))||cpu()-t0<.5;n++)v0=stripexhaust(d,c0,c1,upd); v0+=stripval(d,0,c0)+stripval(d,c1,N); printf("Strip %d %2d %2d %6d %gs\n",d,c0,c1,v0,(cpu()-t0)/n); if(upd)assert(v0==val()); fflush(stdout); } } void consistencychecks2(int weightmode,int centreflag,int strat,double mint,double maxt){ int c,d,o,w,lw,ph,phl,r,v0,v1; //opt1(mint,maxt,1,1,0,strat,1000000000); printf("val=%d\n",val()); if(0){ writeweights("prob"); v0=treestripexhaust(0,1,0,1,0); v1=val(); printf("%6d %6d\n",v0,v1); assert(v0==v1); exit(0); } while(1){ initweights(weightmode,centreflag); init_state(); for(d=0;d<2;d++)for(ph=0;ph<2;ph++)for(r=0;r<N;r++){ v0=treestripexhaust(d,1,ph,0,r); v1=tree1exhaust(d,ph,r,0); printf("tree1 %d %d %2d %6d %6d\n",d,ph,r,v0,v1); assert(v0==v1); } for(w=1;w<=3;w++){ for(d=0;d<2;d++)for(ph=0;ph<=w;ph++)for(r=-1;r<N;r++){ init_state(); opt1(0,maxt,0,1,0,strat,1000000000,-1000000000); v0=treestripexhaust(d,w,ph,0,r); for(c=0;c<N;){ phl=(c+ph)%(w+1); if(phl==w){c++;continue;} lw=MIN(w-phl,N-c); stripexhaust(d,c,c+lw,1); c+=lw; } v1=val(); printf("stripexhcomp %d %d %d %2d %6d %6d\n",w,d,ph,r,v0,v1); assert(v0<=v1); } } for(w=1;w<=3;w++){ for(d=0;d<2;d++)for(ph=0;ph<=w;ph++){ init_state(); opt1(mint,maxt,0,1,0,strat,1000000000,-1000000000); v0=val(); for(c=0;c<N;c++){ phl=(c+ph)%(w+1); for(o=0;o<2;o++){ if(!(phl==w&&o==0))for(r=0;r<N;r++)XBI(d,c,r,o)=randnib(); } } v1=treestripexhaust(d,w,ph,0,-1); printf("stripexhspiketest %d %d %d %2d %6d %6d\n",w,d,ph,r,v0,v1); assert(v1<=v0); } } for(w=1;w<=3;w++){ for(d=0;d<2;d++)for(ph=0;ph<=w;ph++)for(r=0;r<N;r++){ init_state(); v0=treestripexhaust(d,w,ph,1,r); v1=val(); printf("updcomp %d %d %d %2d %6d %6d\n",w,d,ph,r,v0,v1); assert(v0==v1); } } } } void getqbounds(int qb[7]){ // Return bounds rr[] such that accesses etab_centred[i] satisfy rr[0]<=i<=rr[1], // and bounds mm[0], mm[1] controlling the maximum variation in Q(d,b,s) over s. // mm[0] corresponds to d=0 (intra-K44) and mm[1] to d=1,2 (inter-K44). // so qb[0] = rr[0] = min_{n,b} sum_d min (min_s Q(n,d,b,s), 0) // qb[1] = rr[1] = max_{n,b} sum_d max (max_s Q(n,d,b,s), 0) // qb[2] = mm[0] = max_{n,b}(max_s Q(n,0,b,s) - min_s Q(n,0,b,s)) // qb[3] = mm[1] = max_{n,b,d=1,2}(max_s Q(n,d,b,s) - min_s Q(n,d,b,s)) // qb[4] = qq[0] = max_{n,b,s} |Q(n,0,b,s)| // qb[5] = qq[1] = max_{n,d=1,2,b,s} |Q(n,d,b,s)| // qb[6] = qq[2] = max_{n,b} MAX(sum_d max_s Q(n,d,b,s), sum_d max_s -Q(n,d,b,s)) = MAX(-rr[0],rr[1]) // mm[i]/2 <= qq[i], i=0,1 int d,i,n,q,v,s0,s1,min1,max1,min2,max2; for(i=0;i<7;i++)qb[i]=0; for(n=0;n<NBV;n++){ for(s0=0;s0<16;s0++){ min1=max1=0; for(d=0;d<3;d++){ min2=max2=0;// These zeros mean that etab[Q0] and etab[Q1] type accesses will be in bounds // though in general we're ensuring that etab[Q0+Q1+Q2] type accesses are OK. for(s1=0;s1<16;s1++){ q=QBa[n][d][s0][s1]; if(q>max2)max2=q; if(q<min2)min2=q; if(abs(q)>qb[4+MIN(d,1)])qb[4+MIN(d,1)]=abs(q); } v=max2-min2;if(v>qb[2+MIN(d,1)])qb[2+MIN(d,1)]=v; min1+=min2;max1+=max2; } if(min1<qb[0])qb[0]=min1; if(max1>qb[1])qb[1]=max1; } } qb[6]=MAX(-qb[0],qb[1]); if(deb>=2)printf("qbounds rr: %d %d mm: %d %d qq: %d %d %d\n",qb[0],qb[1],qb[2],qb[3],qb[4],qb[5],qb[6]); } double getmaxbeta(int qb[7]){ // Determine whether the floating point type used in tree1gibbs has enough range to // support the fast method. int i; double x,y; long double tx; x=qb[6]; y=qb[5]+(qb[2]+qb[3])/2.;if(y>x)x=y; y=qb[4]+qb[3];if(y>x)x=y; // x = MAX(q2,q1*m0*m1,q0*m1*m1) for(i=0,tx=1;isfinite(tx);i++,tx*=2); // tx = exponent of floating point type used in tree1gibbs return (i-64)*log(2)/x; } gibbstables*initgibbstables(int nt,double *be,int tree){ // Allocate and initialise gibbs tables to be used by simplegibbssweep() and tree1gibbs(). // Use beta values be[0],...,be[nt-1]. // If tree=1 then the extra tables necessary for tree1gibbs() will be initialised. int b,d,i,j,k,l,m,n,o,p,q,s,t,v,w,x,y,z,x0,x1,qb[7],e0[2][2][6],e[16][4][2]; unsigned char (*septab0)[16][4]; signed char (*septab1a)[16][16]=0; signed char (*septab2a)[16][16][2]=0; signed char (*septab3a)[4][2][2]=0; long double Z[2]; gibbstables*gt; getqbounds(qb); septab1a_compact=septab2a_compact=septab3a_compact=1; if(tree){ double maxbeta; maxbeta=getmaxbeta(qb); if(deb>=2)printf("Maximum beta: %g\n",maxbeta); for(t=0;t<nt;t++)if(be[t]>maxbeta){fprintf(stderr,"Beta = %g exceeds maximum beta of %g for tree1gibbs()\n",be[t],maxbeta);exit(1);} } gt=(gibbstables*)malloc(nt*sizeof(gibbstables));assert(gt); if(nt>0){ septab0=(unsigned char(*)[16][4])malloc(16*16*4);assert(septab0); for(i=0;i<16;i++)for(j=0;j<16;j++)for(k=0;k<4;k++)septab0[i][j][k]=(k<<2)|(((i>>k)&1)<<1)|((j>>k)&1); septab1a=(signed char(*)[16][16])malloc(NBV*16ULL*16);assert(septab1a); septab2a=(signed char(*)[16][16][2])malloc(NBV*16ULL*16*2);assert(septab2a); septab3a=(signed char(*)[4][2][2])malloc(NBV*4*2*2);assert(septab3a); } for(t=0;t<nt;t++){ int emin,emax; emin=MIN(qb[0],-128); emax=MAX(qb[1],127); gt[t].emin=emin;gt[t].emax=emax; gt[t].etab0=(long double*)malloc((emax-emin+1)*sizeof(long double)); gt[t].etab=gt[t].etab0-emin; gt[t].ftab0=(unsigned int*)malloc(256*sizeof(unsigned int));assert(gt[t].ftab0); gt[t].ftab=gt[t].ftab0+128; for(n=emin;n<=emax;n++)gt[t].etab[n]=expl(-be[t]*n); for(n=-128;n<128;n++){ double x=n>0?1/(1+exp(-be[t]*n)):exp(be[t]*n)/(exp(be[t]*n)+1); gt[t].ftab[n]=(unsigned int)floor(x*(RAND_MAX+1.)+.5); } gt[t].m0=expl(be[t]*qb[2]/2.); gt[t].m1=expl(be[t]*qb[3]/2.); gt[t].Q0=expl(be[t]*qb[4]); gt[t].Q1=expl(be[t]*qb[5]); gt[t].Q2=expl(be[t]*qb[6]); gt[t].septab0=septab0; gt[t].septab1=(unsigned int(*)[16][16])malloc(NBV*16ULL*16*sizeof(unsigned int));assert(gt[t].septab1); gt[t].septab1a=septab1a; gt[t].septab2a=septab2a; gt[t].septab3a=septab3a; if(tree){ gt[t].septab2=(long double (*)[16][16])malloc(NBV*16ULL*16*sizeof(long double)); gt[t].septab3=(long double (*)[4][2][2])malloc(NBV*4ULL*2*2*sizeof(long double)); assert(gt[t].septab2&&gt[t].septab3); }else {gt[t].septab2=0;gt[t].septab3=0;} } for(x=0;x<N;x++)for(y=0;y<N;y++)for(o=0;o<2;o++)for(i=0;i<4;i++){ p=enc(x,y,o); z=o?y:x; for(l=0;l<2;l++){// (x,y,o,i) is in state l x0=statemap[l]; for(m=0;m<2;m++){// adjacent vertex is in state m x1=statemap[m]; q=enc(x,y,1-o); for(d=0;d<4;d++){ v=(Q[p][i][d]+Q[q][d][i])*x0*x1; if(d==i)v+=Q[p][i][6]*x0*x0+Q[q][i][6]*x1*x1; e0[l][m][d]=v; } if(z>0){q=enc(x-1+o,y-o,o);x1=statemap[m];e0[l][m][4]=(Q[p][i][4]+Q[q][i][5])*x0*x1;} else e0[l][m][4]=0; if(z<N-1){q=enc(x+1-o,y+o,o);x1=statemap[m];e0[l][m][5]=(Q[p][i][5]+Q[q][i][4])*x0*x1;} else e0[l][m][5]=0; } } for(b=0;b<16;b++)for(j=0;j<4;j++){// Do this extra loop to improve sequential memory accesses s=(i<<2)|j; for(l=0;l<2;l++){ v=0; for(d=0;d<4;d++)v+=e0[l][(b>>d)&1][d]; v+=e0[l][j>>1][4]+e0[l][j&1][5]; e[b][j][l]=v; } } for(t=0;t<nt;t++){ for(b=0;b<16;b++)for(j=0;j<4;j++){ s=(i<<2)|j; for(l=0;l<2;l++)Z[l]=gt[t].etab[e[b][j][l]]; gt[t].septab1[p][b][s]=(unsigned int)floor(Z[0]/(Z[0]+Z[1])*(RAND_MAX+1.)+.5); if(tree)gt[t].septab2[p][b][s]=Z[0]+Z[1]; if(t==0){ int del=e[b][j][1]-e[b][j][0]; if(del<-128||del>127)septab1a_compact=0; septab1a[p][b][s]=del; for(l=0;l<2;l++){ v=e[b][j][l]; if(v<-128||v>127)septab2a_compact=0; septab2a[p][b][s][l]=v; } } } } if(tree){ q=enc(x+1-o,y+o,o); for(t=0;t<nt;t++){ w=z<N-1?Q[p][i][5]+Q[q][i][4]:0; for(l=0;l<2;l++){// (x,y,o,i) is in state l x0=statemap[l]; for(m=0;m<2;m++){// adjacent vertex is in state m x1=statemap[m]; v=x0*x1*w; gt[t].septab3[p][i][l][m]=gt[t].etab[v]; if(t==0){ if(v<-128||v>127)septab3a_compact=0; septab3a[p][i][l][m]=v; } } } } } }// x,y,o,i if(!septab1a_compact)printf("Note: cannot use septab1a due to overflow\n"); if(!septab2a_compact)printf("Note: cannot use septab2a due to overflow\n"); if(!septab3a_compact)printf("Note: cannot use septab3a due to overflow\n"); return gt; } void freegibbstables(int nt,gibbstables*gt){ int t; if(nt>0){free(gt[0].septab0);free(gt[0].septab1a);free(gt[0].septab2a);free(gt[0].septab3a);} for(t=0;t<nt;t++){free(gt[t].etab0);free(gt[t].ftab0);free(gt[t].septab1);free(gt[t].septab2);free(gt[t].septab3);} free(gt); } void gibbstests(int weightmode){ if(1){// Burn-in test int i,n,v,nn=20; double mu,va,beta,s0[nn],s1[nn],s2[nn]; beta=3.0; for(i=0;i<nn;i++)s0[i]=s1[i]=s2[i]=0; for(n=0;n<10000;n++){ init_state(); for(i=0;i<nn;i++){ tree1gibbs_slow(randint(2),randint(2),randint(N),beta); v=val();assert(isfinite(v)); s0[i]+=1;s1[i]+=v;s2[i]+=v*v; } } for(i=0;i<nn;i++){ mu=s1[i]/s0[i]; va=(s2[i]-s1[i]*s1[i]/s0[i])/(s0[i]-1); printf("%5d %12g %12g\n",i,mu,sqrt(va/s0[i])); } } if(0){// Autocorrelation test; assumes weightmode 0, statemap[0]=-1 (Ising form, uniform +/-1, no fields) int i,j,k,it,bp,nb=20,rep; int sbuf[nb][NBV],btab[16]; double t,mu,va,beta,s0[nb],s1[nb],s2[nb]; if(weightmode!=0||statemap[0]!=-1)fprintf(stderr,"Warning: expect weightmode=0, statemap[0]=-1\n"); beta=.8;rep=10000; init_state(); for(i=0;i<(beta+1)*20;i++)tree1gibbs_slow(randint(2),randint(2),randint(N),beta);// burn-in guess for(i=0;i<nb;i++)s0[i]=s1[i]=s2[i]=0; bp=0;it=-nb; for(i=1,btab[0]=4;i<16;i++)btab[i]=btab[i>>1]-2*(i&1);// (# 0 bits) - (# 1 bits) while(1){ for(i=0;i<rep;i++)tree1gibbs_slow(randint(2),randint(2),randint(N),beta); memcpy(sbuf[bp],XBa,NBV*sizeof(int)); if(it>=0){ for(i=0;i<nb;i++){// Correlate current with "i" ago j=bp-i;if(j<0)j+=nb; for(k=0,t=0;k<NBV;k++)t+=btab[XBa[k]^sbuf[j][k]]; s0[i]+=1;s1[i]+=t;s2[i]+=t*t; } if(it%100==0){ printf("it=%d\n",it); for(i=0;i<nb;i++){ mu=s1[i]/s0[i]; va=(s2[i]-s1[i]*s1[i]/s0[i])/(s0[i]-1); printf("%6d %12g %12g\n",i*rep,mu,sqrt(va/s0[i])); } printf("\n"); } } it++;bp++;if(bp==nb)bp=0; } } } void binderparamestimate(int weightmode,int centreflag){ int i,j,k,n,nd,nb=6,burnin; int sbuf[nb][NBV],btab[16]; double q,x,t0,t1,beta,sp[9]; for(i=1,btab[0]=4;i<16;i++)btab[i]=btab[i>>1]-2*(i&1);// (# 0 bits) - (# 1 bits) beta=0.1; burnin=0;//(beta+1)*50;// burn-in guess if(weightmode!=0||statemap[0]!=-1)fprintf(stderr,"Warning: expect weightmode=0, statemap[0]=-1\n"); printf("beta = %g\n",beta); printf("burn-in = %d\n",burnin); nd=-1;//5964; for(i=0;i<=8;i++)sp[i]=0;// sp[i] = sum of i^th powers of q t0=cpu(); for(n=0;n<nd||nd<0;){// Disorder samples initweights(weightmode,centreflag); for(i=0;i<nb;i++){// State samples init_state(); for(j=0;j<burnin;j++)tree1gibbs_slow(randint(2),randint(2),randint(N),beta); memcpy(sbuf[i],XBa,NBV*sizeof(int)); } for(i=0;i<nb-1;i++)for(j=i+1;j<nb;j++){ for(k=0,q=0;k<NBV;k++)q+=btab[sbuf[i][k]^sbuf[j][k]];q/=NV; for(k=0,x=1;k<=8;k++){sp[k]+=x;x*=q;} } n++; t1=cpu(); if(t1-t0>5||n==nd){ t0=t1; printf("n=%d\n",n); printf("beta %g\n",beta); printf("burn-in %d\n",burnin); printf("nb %d\n",nb); for(j=1;j<=8;j++)printf("%3d %12g\n",j,sp[j]/sp[0]); printf("Binder %12g\n",.5*(3-sp[0]*sp[4]/(sp[2]*sp[2]))); printf("\n"); fflush(stdout); } } } void findexchangemontecarlotemperatureset(void){ int i,j,n,v,prch,pr=0; int maxn; int nt=ngp>1?genp[1]:500;// Number of temperatures (fine grid for evaluation purposes) double tp,del,tim0,be0,be1,be[nt],s0[nt],s1[nt],s2[nt],(*vhist)[nt]; int en[nt],ex[nt-1],sbuf[nt][NBV]; gibbstables*gt; be0=ngp>2?genp[2]:0.5;be1=ngp>3?genp[3]:5;// low and high beta for(i=0;i<nt;i++)be[i]=be0*pow(be1/be0,i/(nt-1.));// Interpolate geometrically for first guess printf("nt=%d\n",nt); printf("be0=%g\n",be0); printf("be1=%g\n",be1); printf("%s mode\n",genp[0]?"Single bigvertex":"Tree"); tp=ngp>4?genp[4]:0.25; printf("Going for transition probability %g\n",tp); prch=100*(genp[0]?16:1);// Print chunksize for(i=0;i<nt;i++){init_state();memcpy(sbuf[i],XBa,NBV*sizeof(int));} for(i=0;i<nt-1;i++)ex[i]=0; for(i=0;i<nt;i++)s0[i]=s1[i]=s2[i]=0; maxn=ngp>5?genp[5]:250000; vhist=(double(*)[nt])malloc(maxn*nt*sizeof(double));assert(vhist); initrandtab(100000); gt=initgibbstables(nt,be,(int)(genp[0])==0); tim0=cpu(); for(n=0;n<maxn;){ for(i=0;i<nt;i++){ memcpy(XBa,sbuf[i],NBV*sizeof(int)); switch((int)(genp[0])){ case 0: tree1gibbs(randint(2),randint(2),randint(N),&gt[i]); break; case 1: simplegibbssweep(&gt[i]); break; } v=val();en[i]=v;vhist[n][i]=v; memcpy(sbuf[i],XBa,NBV*sizeof(int)); if(pr>=2)printf("%5d ",en[i]); s0[i]+=1;s1[i]+=v;s2[i]+=v*v; if(n&1){v=vhist[n>>1][i];s0[i]-=1;s1[i]-=v;s2[i]-=v*v;} } if(pr>=2)printf("\n"); for(i=0;i<nt-1;i++){ if(pr>=3)printf(" "); del=(be[i+1]-be[i])*(en[i]-en[i+1]); if(del<0||randfloat()<exp(-del)){ memcpy(XBa,sbuf[i],NBV*sizeof(int)); memcpy(sbuf[i],sbuf[i+1],NBV*sizeof(int)); memcpy(sbuf[i+1],XBa,NBV*sizeof(int)); v=en[i];en[i]=en[i+1];en[i+1]=v; if(pr>=3)printf("X"); ex[i]++; } else if(pr>=3)printf(" "); } if(pr>=3)printf("\n"); n++; if(n%prch==0){ int i0,nb; double p,err,minerr,mu1,sd1,mu[nt],sd[nt],ben[nt]; for(i=0;i<nt;i++){mu[i]=s1[i]/s0[i];sd[i]=sqrt((s2[i]-s1[i]*s1[i]/s0[i])/(s0[i]-1));} printf("\n"); if(pr>=1){ printf(" ");for(i=0;i<nt-1;i++)printf(" %5.3f",ex[i]/(double)n);printf("\n"); //for(i=0;i<nt;i++)printf("%5.3f ",s1[i]/s0[i]);printf("\n"); //for(i=0;i<nt;i++)printf("%5.3f ",mu[i]);printf(" mu[]\n"); //for(i=0;i<nt;i++)printf("%5.3f ",sd[i]);printf(" sd[]\n"); printf(" "); for(i=0;i<nt-1;i++){ mu1=-(be[i+1]-be[i])*(mu[i+1]-mu[i]); sd1=(be[i+1]-be[i])*sqrt(sd[i]*sd[i]+sd[i+1]*sd[i+1]); if(sd1>1e-6)p=Phi(-mu1/sd1)+exp(sd1*sd1/2-mu1)*Phi(mu1/sd1-sd1); else p=exp(-mu1); printf(" %5.3f",p); } printf("\n"); } j=nt-1;nb=0; while(j>0){ ben[nb++]=be[j]; minerr=1e9;i0=-1; for(i=j-1;i>=0;i--){ mu1=-(be[j]-be[i])*(mu[j]-mu[i]); sd1=(be[j]-be[i])*sqrt(sd[i]*sd[i]+sd[j]*sd[j]); // Stable version of Phi(-mu1/sd1)+exp(sd1*sd1/2-mu1)*Phi(mu1/sd1-sd1): if(sd1<1e-6)p=exp(-mu1); else p=Phi(-mu1/sd1)+phi(mu1/sd1)/Rphi(mu1/sd1-sd1); err=log(p/tp); if(fabs(err)<minerr){minerr=fabs(err);i0=i;} if(err<0)break; } j=i0; } printf("%d steps. CPU=%.2f\n",n,cpu()-tim0); printf("p=%.3f choice of be[]:",tp); for(i=nb-1;i>=0;i--)printf(" %5.3f",ben[i]);printf("\n"); fflush(stdout); } }// while(1) } double*loadbetaset(int weightmode,double betaskip,int*nt){ double be_single[2]={betaskip}; double bew7[][50]={// Weightmode 7, be[] {0}, {0}, {0.202,0.485,0.911,1.549,3.042,50.000},// 2, 0.25 {0}, {0.202,0.325,0.508,0.690,0.887,1.131,1.409,1.782,2.382,3.666,50.000},// 4, 0.25 {0}, {0.202,0.318,0.435,0.554,0.679,0.807,0.944,1.096,1.272,1.477,1.741,2.101,2.596,3.363,4.675,50.000},// 6, 0.25 {0}, {0.267,0.352,0.438,0.520,0.604,0.690,0.782,0.880,0.982,1.096,1.214,1.355,1.524,1.741,2.020,2.382, 2.920,3.783,5.511,50.000},// 8, 0.25 {0}, {0.245,0.315,0.383,0.452,0.525,0.595,0.669,0.741,0.820,0.901,0.982,1.071,1.167,1.272,1.398,1.536, 1.687,1.868,2.101,2.401,2.786,3.337,4.255,6.248,50.000},// 10, 0.25 {0}, {0.329,0.388,0.447,0.506,0.566,0.628,0.688,0.747,0.811,0.880,0.946,1.016,1.092,1.173,1.260,1.354, 1.470,1.595,1.732,1.899,2.083,2.308,2.583,2.952,3.442,4.183,5.631,50.000},// 12, 0.25 {0}, {0.322,0.372,0.421,0.471,0.522,0.572,0.621,0.674,0.725,0.778,0.836,0.889,0.946,1.006,1.070,1.138,1.210, 1.286,1.368,1.470,1.579,1.697,1.842,1.999,2.192,2.404,2.664,2.982,3.407,3.974,4.828,6.368,50.000},// 14, 0.25 {0}, {0.492,0.537,0.581,0.626,0.672,0.722,0.771,0.814,0.865,0.918,0.976,1.029,1.086,1.146,1.210,1.278,1.350, 1.426,1.515,1.610,1.719,1.837,1.982,2.139,2.332,2.544,2.804,3.140,3.640,4.274,5.400,8.100,50.000}// 16, 0.25, Hand-adjusted using s3 }; double bew11[][50]={// Weightmode 11, be[] {0}, {0}, {0.084,0.179,0.296,0.484,1.043,20.000},// 2, 0.25 {0}, {0.056,0.094,0.134,0.174,0.222,0.278,0.350,0.461,0.669,1.148,2.446,20.000},// 4, 0.25 {0}, {0.056,0.081,0.107,0.132,0.160,0.190,0.222,0.256,0.296,0.346,0.414,0.507,0.645,0.882,1.375,2.598,20.000},// 6, 0.25 {0}, {0.061,0.080,0.099,0.119,0.139,0.160,0.183,0.206,0.230,0.256,0.285,0.322,0.363,0.414,0.478,0.559,0.669, 0.840,1.121,1.646,2.894,20.000},// 8, 0.25 {0}, {0.052,0.068,0.083,0.098,0.113,0.129,0.146,0.162,0.179,0.197,0.216,0.235,0.256,0.282,0.310,0.341,0.376, 0.419,0.472,0.539,0.622,0.736,0.892,1.134,1.531,2.222,3.507,6.166,20.000},// 10, 0.25 {0}, {0.052,0.064,0.077,0.090,0.103,0.116,0.129,0.142,0.156,0.170,0.185,0.201,0.216,0.233,0.250,0.269,0.289,0.310, 0.337,0.367,0.399,0.439,0.489,0.552,0.630,0.727,0.850,1.018,1.264,1.626,2.169,3.110,4.850,20.000},// 12, 0.25 {0}, {0.054,0.064,0.075,0.086,0.097,0.108,0.119,0.131,0.142,0.155,0.166,0.179,0.192,0.204,0.216,0.230,0.244,0.259,0.275,0.292, 0.310,0.333,0.358,0.385,0.419,0.455,0.495,0.545,0.608,0.677,0.763,0.871,1.006,1.190,1.442,1.812,2.417,3.724,20.000},// 14, 0.25 {0}, {0.150,0.159,0.169,0.179,0.189,0.200,0.211,0.223,0.235,0.248,0.262,0.278,0.293,0.310,0.329,0.350,0.373,0.399,0.428,0.460,0.499, 0.540,0.590,0.650,0.727,0.817,0.933,1.071,1.266,1.500,1.802,2.316,2.917,4.100,20.000}// 16, 0.25-0.30, Hand-adjusted using s5 }; int i,n,skip; double *be0,*be; if(betaskip>0)be0=be_single; else { skip=(int)betaskip; switch(weightmode){ case 7: be0=bew7[N]-skip;break; default: fprintf(stderr,"Warning: no temperature set available for weightmode %d. Using weightmode 11's set.\n",weightmode); case 11: be0=bew11[N]-skip;break; } } for(n=0;be0[n]>0;n++);assert(n>0); be=(double*)malloc(n*sizeof(double)); for(i=0;i<n;i++)be[i]=be0[i]; *nt=n; return be; } double*loadspecbetaset(int weightmode,int qu,int*nt){ // Placeholder values double bew7[][50]={// Weightmode 7, be[] {0}, {0}, {0.202,0.485,0.911,1.549,3.042,50.000},// 2, 0.25 {0}, {0.202,0.325,0.508,0.690,0.887,1.131,1.409,1.782,2.382,3.666,50.000},// 4, 0.25 {0}, {0.202,0.318,0.435,0.554,0.679,0.807,0.944,1.096,1.272,1.477,1.741,2.101,2.596,3.363,4.675,50.000},// 6, 0.25 {0}, //{0.000,0.014,0.078,0.142,0.205,0.267,0.329,0.396,0.464,0.529,0.603,0.670,0.744,0.805,0.871,0.943,1.020,1.104, // 1.195,1.293,1.399,1.514,1.638,1.819,2.021,2.305,2.629,2.999,3.605,4.449,5.787,8.363,13.426,50.000},// 8, 0.4 //{0.000,0.055,0.118,0.180,0.240,0.304,0.366,0.428,0.489,0.557,0.619,0.688,0.764,0.827,0.895,0.968,1.047, // 1.133,1.226,1.327,1.436,1.554,1.726,1.918,2.130,2.430,2.771,3.245,3.800,4.689,6.776,10.0,50.000},// 8, 0.4 {0.000,0.100,0.180,0.267,0.352,0.438,0.520,0.604,0.690,0.782,0.880,0.982,1.096,1.214,1.355,1.524,1.741,2.020,2.382, 2.920,3.783,5.511,9.5,50.000},// 8, 0.25 {0}, {0.000,0.060,0.120,0.180,0.245,0.315,0.383,0.452,0.525,0.595,0.669,0.741,0.820,0.901,0.982,1.071,1.167,1.272,1.398,1.536, 1.687,1.868,2.101,2.401,2.786,3.337,4.255,6.248,50.000}// 10, 0.25 }; double bew11[][50]={// Weightmode 11, be[] {0}, {0}, {0.084,0.179,0.296,0.484,1.043,20.000},// 2, 0.25 {0}, {0.056,0.094,0.134,0.174,0.222,0.278,0.350,0.461,0.669,1.148,2.446,20.000},// 4, 0.25 {0}, {0.056,0.081,0.107,0.132,0.160,0.190,0.222,0.256,0.296,0.346,0.414,0.507,0.645,0.882,1.375,2.598,20.000},// 6, 0.25 {0}, {0.000,0.020,0.040,0.061,0.080,0.099,0.119,0.139,0.160,0.183,0.206,0.230,0.256,0.285,0.322,0.363,0.414,0.478,0.559,0.669, 0.840,1.121,1.646,2.894,20.000},// 8, 0.25 {0}, {0.052,0.068,0.083,0.098,0.113,0.129,0.146,0.162,0.179,0.197,0.216,0.235,0.256,0.282,0.310,0.341,0.376, 0.419,0.472,0.539,0.622,0.736,0.892,1.134,1.531,2.222,3.507,6.166,20.000},// 10, 0.25 {0}, {0.052,0.064,0.077,0.090,0.103,0.116,0.129,0.142,0.156,0.170,0.185,0.201,0.216,0.233,0.250,0.269,0.289,0.310, 0.337,0.367,0.399,0.439,0.489,0.552,0.630,0.727,0.850,1.018,1.264,1.626,2.169,3.110,4.850,20.000},// 12, 0.25 {0}, {0.054,0.064,0.075,0.086,0.097,0.108,0.119,0.131,0.142,0.155,0.166,0.179,0.192,0.204,0.216,0.230,0.244,0.259,0.275,0.292, 0.310,0.333,0.358,0.385,0.419,0.455,0.495,0.545,0.608,0.677,0.763,0.871,1.006,1.190,1.442,1.812,2.417,3.724,20.000}// 14, 0.25 }; int i,n; double *be0,*be; switch(weightmode){ case 7: be0=bew7[N];break; default: fprintf(stderr,"Warning: no temperature set available for weightmode %d. Using weightmode 11's set.\n",weightmode); case 11: be0=bew11[N];break; } for(n=1;be0[n]>0;n++);assert(n>0); be=(double*)malloc(n*sizeof(double)); for(i=0;i<n;i++)be[i]=be0[i]/qu; *nt=n; return be; } void calcbinderratio(int weightmode,int centreflag){ int h,i,j,k,m,n,r,v,eqb,nd,btab[16]; double be0[]={0.108,0.137,0.166,0.196,0.226,0.258,0.291,0.326,0.364,0.405,0.451,0.500,0.557,0.624,0.704,0.808,0.944,1.131,1.438,2.000}; // ^ N=8 -w0 -x-1 p=0.3 //double be2[]={0.133,0.170,0.209,0.248,0.288,0.329,0.370,0.413,0.458,0.507,0.557,0.612,0.672,0.744,0.830,0.941,1.084,1.268,1.543,1.967,2.821,5.000}; // ^ N=8 -w2 -x-1 p=0.3 double be2[]={0.502,0.528,0.556,0.585,0.613,0.644,0.678,0.713,0.754,0.797,0.842,0.894,0.954,1.022,1.101,1.190,1.294,1.419,1.570,1.762,2.015,2.357,2.821,3.505,5.000}; // ^ N=8 -w2 -x-1 p=0.6 double be2_15[]={0.530,0.561,0.595,0.630,0.668,0.699,0.732,0.767,0.804,0.842,0.881,0.923,0.967,1.013,1.061,1.111,1.164,1.219,1.276,1.337,1.400,1.467,1.536,1.609,1.685,1.785,1.892,2.004,2.124,2.276,2.440,2.616,2.836,3.111,3.453,3.832,4.352,5.000}; //double be2_15[]={2};//check // ^ N=15 -w7 p=0.4 int nt;// Number of temperatures int nhist,maxhist=500;// Keep samples for the purposes of error-estimating double *be; if((weightmode!=0&&weightmode!=2)||statemap[0]!=-1)fprintf(stderr,"Warning: expect weightmode=0 or 2, statemap[0]=-1\n"); if(weightmode==0){be=be0;nt=sizeof(be0)/sizeof(double);} else {be=be2;nt=sizeof(be2)/sizeof(double);} if(weightmode==7&&N==15){be=be2_15;nt=sizeof(be2_15)/sizeof(double);} double q,x,del,nex,maxerr,ex[nt-1]; typedef struct {double n,qq[nt][2];} qest; qest lsp,sp,hist[maxhist]; int en[nt],sbuf[2][nt][NBV]; printf("nt=%d\n",nt); printf("beta_low=%g\n",be[0]); printf("beta_high=%g\n",be[nt-1]); printf("be[] =");for(i=0;i<nt;i++)printf(" %5.3f",be[i]);printf("\n"); for(i=1,btab[0]=4;i<16;i++)btab[i]=btab[i>>1]-2*(i&1);// (# 0 bits) - (# 1 bits) sp.n=0;for(i=0;i<nt;i++)for(j=0;j<2;j++)sp.qq[i][j]=0; nhist=0; for(i=0,nex=0;i<nt-1;i++)ex[i]=0;// Count of exchanges nd=0;// Number of disorder samples eqb=(ngp>1?genp[1]:100); printf("Equilibration time %d\n",eqb); fflush(stdout); if(ngp>0&&genp[0]==-1){// optimise for(r=0;r<1;r++)for(i=0;i<nt;i++){init_state();memcpy(sbuf[r][i],XBa,NBV*sizeof(int));} int vmin=1000000000; while(1){// Thermal loop r=0; for(i=0;i<nt;i++){ memcpy(XBa,sbuf[r][i],NBV*sizeof(int)); for(j=0;j<1;j++)tree1gibbs_slow(randint(2),randint(2),randint(N),be[i]); v=val();en[i]=v; if(v<vmin){vmin=v;printf("min = %d\n",vmin);} memcpy(sbuf[r][i],XBa,NBV*sizeof(int)); } for(i=0;i<nt-1;i++){ del=(be[i+1]-be[i])*(en[i]-en[i+1]); if(del<0||randfloat()<exp(-del)){ memcpy(XBa,sbuf[r][i],NBV*sizeof(int)); memcpy(sbuf[r][i],sbuf[r][i+1],NBV*sizeof(int)); memcpy(sbuf[r][i+1],XBa,NBV*sizeof(int)); v=en[i];en[i]=en[i+1];en[i+1]=v; ex[i]++; } } nex++; if((int)nex%100==0){ for(i=0;i<nt;i++)printf("%6.3f ",be[i]);printf(" be[]\n"); printf(" "); for(i=0;i<nt-1;i++)printf(" %6.3f",ex[i]/nex);printf(" exch[]\n"); fflush(stdout); } } } while(1){// Loop over disorders initweights(weightmode,centreflag);// Disorder (J_ij) sample for(r=0;r<2;r++)for(i=0;i<nt;i++){init_state();memcpy(sbuf[r][i],XBa,NBV*sizeof(int));} lsp.n=0;for(i=0;i<nt;i++)for(k=0;k<2;k++)lsp.qq[i][k]=0; n=-eqb; while(n<eqb){// Thermal loop for(r=0;r<2;r++){// Replica loop for(i=0;i<nt;i++){ memcpy(XBa,sbuf[r][i],NBV*sizeof(int)); for(j=0;j<1;j++)tree1gibbs_slow(randint(2),randint(2),randint(N),be[i]); v=val();en[i]=v; memcpy(sbuf[r][i],XBa,NBV*sizeof(int)); } for(i=0;i<nt-1;i++){ del=(be[i+1]-be[i])*(en[i]-en[i+1]); if(del<0||randfloat()<exp(-del)){ memcpy(XBa,sbuf[r][i],NBV*sizeof(int)); memcpy(sbuf[r][i],sbuf[r][i+1],NBV*sizeof(int)); memcpy(sbuf[r][i+1],XBa,NBV*sizeof(int)); v=en[i];en[i]=en[i+1];en[i+1]=v; ex[i]++; } } nex++; }//r n++; if(n>=0){ for(i=0;i<nt;i++){ for(k=0,q=0;k<NBV;k++)q+=btab[sbuf[0][i][k]^sbuf[1][i][k]];q/=NV; x=q*q;lsp.qq[i][0]+=x;lsp.qq[i][1]+=x*x; } lsp.n+=1;// Keep this as a variable in case decide to vary the equilibrium point for different disorders } } nd++; if(nhist<maxhist)hist[nhist++]=lsp; else { nhist++; i=randint(nhist); if(i<maxhist)hist[i]=lsp; } int nsubsamp,nsamp=200; double p0=0.16;// Error percentile p0 to 1-p0, roughly corresponding to +/-1sd of a normal. double q0,q2,q4,samp[nt][nsamp]; double est[nt],err[nt]; sp.n+=lsp.n; for(i=0;i<nt;i++)for(j=0;j<2;j++)sp.qq[i][j]+=lsp.qq[i][j];// lsp.qq[i][j] = <q_i^(2(j+1))> <.> = thermal sum for(i=0;i<nt;i++){ q0=sp.n; q2=sp.qq[i][0]; q4=sp.qq[i][1]; est[i]=.5*(3-q0*q4/(q2*q2)); } n=MIN(nhist,maxhist); nsubsamp=n;// say for(k=0;k<nsamp;k++){ lsp.n=0;for(i=0;i<nt;i++)for(j=0;j<2;j++)lsp.qq[i][j]=0; for(m=0;m<nsubsamp;m++){ h=randint(n); lsp.n+=hist[h].n; for(i=0;i<nt;i++)for(j=0;j<2;j++)lsp.qq[i][j]+=hist[h].qq[i][j]; } for(i=0;i<nt;i++){ q0=lsp.n; q2=lsp.qq[i][0]; q4=lsp.qq[i][1]; samp[i][k]=.5*(3-q0*q4/(q2*q2)); } } for(i=0;i<nt;i++){ double e0,e1; qsort(samp[i],nsamp,sizeof(double),cmpd); e0=samp[i][(int)floor(p0*nsamp)]; e1=samp[i][(int)floor((1-p0)*nsamp)]; err[i]=MAX(fabs(est[i]-e0),fabs(est[i]-e1))*sqrt(nsubsamp/(double)nhist); } printf("\n"); printf("Number of disorders: %d\n",nd); for(i=0;i<nt;i++)printf("%6.3f ",be[i]);printf(" be[]\n"); for(i=0;i<nt;i++)printf("%6.3f ",est[i]);printf(" est[]\n"); for(i=0,maxerr=0;i<nt;i++){ printf("%6.3f ",err[i]); if(err[i]>maxerr)maxerr=err[i]; } printf(" err[]\n"); printf(" "); for(i=0;i<nt-1;i++)printf(" %6.3f",ex[i]/nex);printf(" exch[]\n"); fflush(stdout); if(nd>=2&&maxerr<1e-3)break; } } int findeqbmusingchisq(int weightmode){ // Compare equilibration times of exchange Monte-Carlo by measuring <E>. Use chi^2 method on all temps to determine eqbn. // Currently configured to use only a particular disorder (specified by the input seed). //if(weightmode!=2||statemap[0]!=-1)fprintf(stderr,"Warning: expect weightmode=2, statemap[0]=-1\n"); double *be; int nt;// Number of temperatures int nd;// Number of disorders sampled int pr=2; be=loadbetaset(weightmode,genp[3],&nt); int en[nt],sbuf[nt][NBV]; double lem[2][nt];// Total energies for a given disorder double em[2][nt][3];// Energy moments over all disorders double een[2][nt],ven[2][nt];// Derived energy estimates and std errs double x,del,nex,ex[nt-1]; int eqb,leqb;// Equilibration time double eps=ngp>2?genp[2]:0.1;// Target absolute error in energy double chi; int e,i,n,v; double nit,nsol; printf("Number of temperatures %d\n",nt); for(i=0;i<nt;i++)printf("%8.3f ",be[i]);printf(" be[]\n"); printf("Monte Carlo mode %g (%g)\n",genp[0],genp[1]); for(i=0,nex=0;i<nt-1;i++)ex[i]=0;// Count of exchanges eqb=1; while(1){// Loop over equilibration times printf("\nEquilibration times %d and %d\n",eqb,eqb*2); for(e=0;e<2;e++)for(i=0;i<nt;i++)em[e][i][0]=em[e][i][1]=em[e][i][2]=0; nd=0; while(1){// Loop over disorders and runs //if(genp[1]==0)initweights(weightmode,centreflag);// Disorder (J_ij) sample for(e=0;e<2;e++){// Loop over two equilibrations being compared leqb=eqb<<e; nit=nsol=0; lp0: for(i=0;i<nt;i++){init_state();memcpy(sbuf[i],XBa,NBV*sizeof(int));} for(i=0;i<nt;i++)lem[e][i]=0; for(n=0;n<2*leqb||genp[1]<0;n++){// Thermal loop for(i=0;i<nt;i++){ memcpy(XBa,sbuf[i],NBV*sizeof(int)); switch((int)(genp[0])){ case 0: tree1gibbs_slow(randint(2),randint(2),randint(N),be[i]); nit+=1; break; case 1: simplegibbssweep_slow(be[i]); nit+=1; break; } v=val();en[i]=v; memcpy(sbuf[i],XBa,NBV*sizeof(int)); if(genp[1]<0&&v==genp[1]){nsol+=1;printf("IT %g SOL %g IT/SOL %g\n",nit,nsol,nit/nsol);goto lp0;} } for(i=0;i<nt-1;i++){ del=(be[i+1]-be[i])*(en[i]-en[i+1]); if(del<0||randfloat()<exp(-del)){ memcpy(XBa,sbuf[i],NBV*sizeof(int)); memcpy(sbuf[i],sbuf[i+1],NBV*sizeof(int)); memcpy(sbuf[i+1],XBa,NBV*sizeof(int)); v=en[i];en[i]=en[i+1];en[i+1]=v; ex[i]++; } } nex++; if(n>=leqb)for(i=0;i<nt;i++)lem[e][i]+=en[i];// Add energies deemed to have been equilibrated if(pr>=3&&n>=leqb){for(i=0;i<nt;i++)printf("%8d ",en[i]);printf(" e=%d\n",e);} }// Thermal for(i=0;i<nt;i++)lem[e][i]/=leqb; }// e nd++; if(pr>=1){ printf("\n"); for(i=0;i<nt;i++)printf("%8.3f ",be[i]);printf(" be[]\n"); printf(" "); for(i=0;i<nt-1;i++)printf(" %8.3f",ex[i]/nex);printf(" exch[]\n"); } for(e=0;e<2;e++){ for(i=0;i<nt;i++){ x=lem[e][i];if(pr>=2)printf("%8.2f ",x); em[e][i][0]+=1;em[e][i][1]+=x;em[e][i][2]+=x*x; een[e][i]=em[e][i][1]/em[e][i][0]; ven[e][i]=(em[e][i][2]-em[e][i][1]*em[e][i][1]/em[e][i][0])/(em[e][i][0]-1)/em[e][i][0]; } if(pr>=2)printf(" sample_%d\n",eqb<<e); } if(pr>=1){ for(e=0;e<2;e++){ for(i=0;i<nt;i++)printf("%8.2f ",een[e][i]);printf(" een[%d][]\n",eqb<<e); for(i=0;i<nt;i++)printf("%8.4f ",sqrt(ven[e][i]));printf(" err[%d][]\n",eqb<<e); } } for(i=0;i<nt;i++){ x=een[0][i]-een[1][i]; x=x*x/(ven[0][i]+ven[1][i]);if(pr>=1)printf("%8.2f ",x); } if(pr>=1)printf(" chi^2 (raw)\n"); chi=0; for(i=0;i<nt;i++){ x=fabs(een[0][i]-een[1][i])-eps;x=MAX(x,0); x=x*x/(ven[0][i]+ven[1][i]);if(pr>=1)printf("%8.2f ",x); chi+=x; } if(pr>=1){ printf(" chi^2 (reduced)\n"); printf("Error %g cf chi^2_%d, N=%d, nd=%d, genp[]=",chi,nt,N,nd); for(i=0;i<ngp;i++)printf("%g%s",genp[i],i<ngp-1?",":""); printf(", CPU=%.2fs\n",cpu()); fflush(stdout); } if(nd>=15&&chi>=nt+4*sqrt(nt))break; if(nd>=(genp[5]==0?1000:genp[5]))goto ok0; }// Disorders if(genp[4]>0&&eqb>=genp[4]){printf("Giving up. Equilibration time %d deemed insufficient for target error %g at nd=%d, N=%d, method=%g.\n",eqb,eps,nd,N,genp[0]);return -1;} eqb*=2; }// Eqbn times ok0: printf("Equilibration time %d deemed sufficient for target error %g at nd=%d, N=%d, method=%g\n",eqb,eps,nd,N,genp[0]); return eqb; } int findeqbmusingtopbeta(int weightmode){ // Compare equilibration times of exchange Monte-Carlo by measuring <E>. Determine eqbn // by assuming top beta is enough to essentially force groundstate. Currently // configured to use only a particular disorder (specified by the input seed). double *be;// Set of betas int nt;// Number of temperatures (betas) int nd;// Number of disorders sampled (or number of restarts if the disorder is fixed) int pr=genp[1]; be=loadbetaset(weightmode,genp[3],&nt); typedef struct { int X[NBV];// State int t[nt];// t[i] = Time last visited temperature i (-1 = never) int e;// Energy } tstate; // Tempering state tstate sbuf[nt],ts; int vmin; double em[nt][3];// Energy moments over all disorders double een[nt],ven[nt];// Derived energy estimates and std errs double x,y,del,nex,ex[nt-1],ex2[nt][nt]; int eqb;// Current upper bound on equilibration time double eps=ngp>2?genp[2]:0.1;// Target absolute error in energy int e,i,j,k,n,v,foundsol; double mu,va,se,nit,nsol,tim0,tim1,tim2,tts0,tts1; gibbstables*gt; printf("Number of temperatures: %d\n",nt); for(i=0;i<nt;i++)printf("%8.3f ",be[i]);printf(" be[]\n"); printf("Monte Carlo mode %g\n",genp[0]); int ndmax=5/eps; // 5/eps is rough-and-ready parameter. >=5/eps gives some degree of // protection against rare events int ndgu=0.4*ndmax; // Give-up point const int eqbprec=4096; // Only care about knowing the required eqb to 1/eqbprec accuracy, int eqbblksz; // so consider equilibration steps in blocks of eqbblksz to keep memory compact. int eqbnblk; eqb=ngp>5?genp[5]:1; vmin=ngp>6?genp[6]:1000000000; initrandtab(50000); tts0=tts1=0; gt=initgibbstables(nt,be,(int)(genp[0])==0); while(1){// Loop over equilibration lengths eqbblksz=(eqb-1)/eqbprec+1; eqb-=eqb%eqbblksz; eqbnblk=eqb/eqbblksz; double ten[2*eqbnblk],sten0[eqbnblk+1],sten1[eqbnblk+1],sten2[eqbnblk+1]; double lem[nt]; printf("\nEquilibration length %d\n",eqb);fflush(stdout); for(i=0;i<eqbnblk+1;i++)sten0[i]=sten1[i]=sten2[i]=0; for(i=0;i<nt;i++)em[i][0]=em[i][1]=em[i][2]=0; for(i=0,nex=0;i<nt-1;i++)ex[i]=0;// Count of pair-exchanges nd=0; tim0=cpu(); while(1){// Loop over runs tim2=cpu(); for(i=0;i<nt;i++)for(j=0;j<nt;j++)ex2[i][j]=0;// Count of long-range exchanges for a particular run nit=nsol=0; for(i=0;i<nt;i++){ init_state();memcpy(sbuf[i].X,XBa,NBV*sizeof(int)); for(j=0;j<nt;j++)sbuf[i].t[j]=-(j!=i); } for(i=0;i<nt;i++)lem[i]=0; for(i=0;i<2*eqbnblk;i++)ten[i]=0; foundsol=0; for(n=0;n<2*eqb;n++){// Thermal loop for(i=0;i<nt;i++){ memcpy(XBa,sbuf[i].X,NBV*sizeof(int)); switch((int)(genp[0])){ case 0: //tree1gibbs_slow(randint(2),randint(2),randint(N),be[i]); tree1gibbs(randint(2),randint(2),randint(N),&gt[i]); nit+=1; break; case 1: simplegibbssweep(&gt[i]); nit+=1; break; } v=val();if(v<vmin){vmin=v;tts0=tts1=0;} if(v==vmin&&foundsol==0){tts0+=1;tts1+=n;foundsol=1;} sbuf[i].e=v; memcpy(sbuf[i].X,XBa,NBV*sizeof(int)); } for(i=0;i<nt-1;i++){ del=(be[i+1]-be[i])*(sbuf[i].e-sbuf[i+1].e); if(del<0||randfloat()<exp(-del)){ ts=sbuf[i]; sbuf[i]=sbuf[i+1]; sbuf[i+1]=ts; ex[i]++; if(n>=eqb)for(k=i;k<=i+1;k++)for(j=0;j<nt;j++){ if(sbuf[k].t[j]>sbuf[k].t[k])ex2[j][k]+=1;// add j->k flux unit if more recently in j than in k } } for(j=0;j<nt;j++)sbuf[j].t[j]=nt*n+i; } nex++; ten[n/eqbblksz]+=sbuf[nt-1].e;// Record top beta's energy (for equilibration detection) if(n>=eqb)for(i=0;i<nt;i++)lem[i]+=sbuf[i].e;// Store total energies at each temperature (for interest) if(pr>=4)printf("Top beta energy %g\n",ten[n]); }// Thermal for(i=0;i<nt;i++)lem[i]/=eqb; nd++; if(pr>=1){ printf("\n"); for(i=0;i<nt;i++)printf("%8.3f ",be[i]);printf(" be[]\n"); printf(" "); for(i=0;i<nt-1;i++)printf(" %8.3f",ex[i]/nex);printf(" exch[]\n"); } if(pr>=3){ for(i=0;i<nt;i++){ for(j=0;j<nt;j++)printf("%8.3f ",ex2[i][j]/eqb); printf("\n"); } } for(i=0;i<nt;i++){ x=lem[i];if(pr>=2)printf("%8.2f ",x); em[i][0]+=1;em[i][1]+=x;em[i][2]+=x*x; een[i]=em[i][1]/em[i][0]; ven[i]=(em[i][2]-em[i][1]*em[i][1]/em[i][0])/(em[i][0]-1)/em[i][0]; } if(pr>=2)printf(" sample_%d\n",eqb); if(pr>=1){ for(i=0;i<nt;i++)printf("%8.2f ",een[i]);printf(" een[%d][]\n",eqb); for(i=0;i<nt;i++)printf("%8.4f ",sqrt(ven[i]));printf(" err[%d][]\n",eqb); } for(n=1,x=0;n<=eqbnblk;n++){ x+=ten[2*n-2]+ten[2*n-1]-ten[n-1]; // x = sum of ten[n],...,ten[2n-1] = the top-energy terms that would be used at eqb=n*eqbblksz y=x/(n*eqbblksz); sten0[n]+=1;sten1[n]+=y;sten2[n]+=y*y; } mu=sten1[eqbnblk]/sten0[eqbnblk]; va=(sten2[eqbnblk]-sten1[eqbnblk]*sten1[eqbnblk]/sten0[eqbnblk])/(sten0[eqbnblk]-1); se=sqrt(va/sten0[eqbnblk]); assert(mu>=vmin); if(pr>=1){ printf("Error %.3g (std err %.3g), vmin=%d, N=%d, nd=%d, nt=%d, eqb=%d, tts=%g, genp[]=",mu-vmin,se,vmin,N,nd,nt,eqb,tts1/tts0); for(i=0;i<ngp;i++)printf("%g%s",genp[i],i<ngp-1?",":""); tim1=cpu();printf(", CPU=%.2fs, CPU_this=%.2fs, CPU_lastrun=%.2fs, CPU/run=%.3fs\n",tim1,tim1-tim0,tim1-tim2,(tim1-tim0)/nd); prtimes(); fflush(stdout); } // Of course N(mu,se^2) is a very poor approximation to the posterior distribution of the energy of the top beta (NCU anyway) if((mu-vmin)*MIN(nd,ndgu)/(double)ndgu>eps)break; if(nd>=ndmax){ if(pr>=3)for(n=1;n<=eqbnblk;n++)printf("%6d %12.6f %12g\n",n*eqbblksz,sten1[n]/sten0[n],sten1[n]/sten0[n]-vmin); for(n=1,e=1;n<=eqbnblk;n++)if(sten1[n]/sten0[n]-vmin>eps)e++; eqb=e*eqbblksz; printf("Equilibration time %d deemed sufficient for target error %g at nd=%d, eqb=%d, N=%d, vmin=%d, method=%g, workproduct=%d\n", eqb,eps,nd,eqb,N,vmin,genp[0],eqb*nt); goto ok1; } }// Runs (nd) if(genp[4]>0&&eqb>=genp[4]){printf("Giving up. Equilibration time %d deemed insufficient for target error %g at nd=%d, N=%d, method=%g.\n",eqb,eps,nd,N,genp[0]);return -1;} eqb*=2;// This scale-up ratio should perhaps be chosen to minimise (r-1+ndgu/ndmax)/log(r) }// Eqbn times ok1:; freegibbstables(nt,gt); return eqb; } // Find a state of energy <=bv, from a clean start #define MAXERANGE (1<<16)// Maximum energy range int pertandgibbs(int tree,double beta,double pert,int bv){ int e,v,nit,mine,maxe,stats[MAXERANGE]; double t0,t1,tt,now; gibbstables*gt; printf("Monte Carlo mode: %s\n",tree?"tree":"single-vertex"); printf("Beta: %g\n",beta); printf("Perturbation: %g\n",pert); printf("Target energy %d\n",bv); initrandtab(50000); gt=initgibbstables(1,&beta,tree); memset(stats,0,sizeof(stats)); init_state();nit=0; mine=1000000000;maxe=-mine; t0=cpu();// Initial time t1=0;// Elapsed time threshold for printing update while(1){// Loop over runs switch(tree){ case 0: pertstate(pert); simplegibbssweep(gt); nit+=1; break; case 1: pertstate(pert); tree1gibbs(randint(2),randint(2),randint(N),gt); nit+=1; break; } v=val();if(v<bv)goto frexit; if(v<mine)mine=v;if(v>maxe)maxe=v; if(v>=bv){assert(v-bv<MAXERANGE);stats[v-bv]++;} now=cpu(); tt=now-t0; if(v<=bv||tt>=t1){ t1=MAX(tt*1.1,tt+5); printf("%10.2fs %10d iterations\n",tt,nit); for(e=maxe;e>=mine;e--)if(stats[e-bv])printf("%6d: %10d\n",e,stats[e-bv]); printf("\n"); } if(v<=bv)goto frexit; } frexit:; freegibbstables(1,gt); return v; } // Find a state of energy <=bv, from a clean start; simple version of pertandgibbs() int pertandgibbs_simple(int tree,double beta,double pert,int bv,gibbstables*gt,int64*nit){ int v; init_state(); while(1){// Loop over runs pertstate(pert); if(randfloat()<genp[4])init_state(); switch(tree){ case 0: simplegibbssweep(gt); break; case 1: tree1gibbs(randint(2),randint(2),randint(N),gt); break; } if(nit)(*nit)++; v=val();if(v<=bv)return v; } } // Perturbation + Gibbs sampling at fixed (pert,beta) void opt3(int weightmode,int tree,double beta,double pert,int bv,int tns){ int ns,cv; int64 nit; double tim0,tim1,now; gibbstables*gt; printf("Monte Carlo mode: %s\n",tree?"tree":"single-vertex"); printf("Beta: %g\n",beta); printf("Perturbation: %g\n",pert); printf("Target energy %d\n",bv); initrandtab(50000); gt=initgibbstables(1,&beta,tree); ns=0;tim0=tim1=cpu();nit=0; printf(" Iterations R T beta pert bv ns t(bv) t(all) t(bv)/ns its/ns\n");fflush(stdout); while(ns<tns){ cv=pertandgibbs_simple(tree,beta,pert,bv,gt,&nit); now=cpu(); if(cv<bv){ns=0;tim1=now;nit=0;bv=cv;} else ns++; printf("%12lld %d %d %8.3g %8.3g %10d %6d %8.2f %8.2f %8.3g %8.3g\n",nit,RANDSTART,tree,beta,pert,bv,ns,now-tim1,now-tim0,(now-tim1)/ns,nit/(double)ns);fflush(stdout); } freegibbstables(1,gt); } double addlog(double x,double y){// log(e^x+e^y) double d=x-y; if(d>0){d=-d;y=x;} if(d<-44)return y; return y+log(1+exp(d)); } void findspectrum(int weightmode,int tree,const char*outprobfn,int pr){ double *be;// Set of betas int nt;// Number of temperatures (betas) int qu;// energy quantum qu=energyquantum(); be=loadspecbetaset(weightmode,qu,&nt); typedef struct { int X[NBV];// State int e;// Energy int me;// min energy this state has visited int ne;// min energy this state has visited since visiting lowest beta (normally beta=0) } tstate; // Tempering state tstate sbuf[nt],ts; double een[nt],ven[nt],veo[nt];// Derived energy estimates, variance and std errs double ovl[nt-1];// Overlap probabilities for iterative Z-finding int d,e,h,i,j,n,r,v,dc,lc,h0,lqc,margin,printed; int nis;// number of independent solutions (minima since hitting lowest beta) int base,mine,maxe;// base energy (0-pt for ndj array), min, max energies double x,y,z,del,nit,tim0,tim1,tim2; gibbstables*gt; FILE*fp; margin=100;// safety margin for lowest energy lqc=centreconst(); if(!checkbisym()){fprintf(stderr,"Error: findspectrum() uses symmetry and assumes that the model has no external fields\n");exit(1);} init_state();v=stabletreeexhaust(val(),1,0);base=v-margin;mine=v;maxe=lqc>>1; //if(ngp>1)mine=genp[1]; const int maxdoublings=50; const int linlen=20; const int nhist=maxdoublings*linlen; const int erange=maxe+1-base; typedef struct { int64 ndj[erange];// ndj[e] = number of samples of energy base+e at history <=h int64 nid[nt];// nid[i] = number of samples at beta[i] at history <=h (currently simple constant) double ten1[nt],ten2[nt];// ten_r[i] = total energy^r at beta[i] at history <=h double ex0[nt],ex1[nt];// ex1[i] = number of exchanges, ex0[i] = number of possible exchanges i<->i+1 at history <=h } histdata; // Group sample values in multiples // 1 1 ... 1 2 2 ... 2 4 4 ... 4 8 8 ... 8 ... // linlen linlen linlen linlen ... // so that having sampled 2n values, can look at the last n samples // and can do that every size increase of roughly a factor of 1+1/linlen histdata hist[nhist];// switch to malloc to cope with lame stack sizes double lp[erange],lZ[nt]; printf("Number of temperatures: %d\n",nt); for(i=0;i<nt;i++)printf("%8.3f ",be[i]);printf(" be[]\n"); printf("Monte Carlo mode: %s\n",tree?"tree":"single-vertex"); printf("Randstart: %d\n",RANDSTART); printf("Centre constant: %d\n",lqc); printf("Energy quantum: %d\n",qu); printf("Tree mode: %d\n",tree); initrandtab(50000); gt=initgibbstables(nt,be,tree); nis=0; tim0=cpu();tim1=tim2=0; for(i=0;i<nt;i++){ init_state();memcpy(sbuf[i].X,XBa,NBV*sizeof(int)); sbuf[i].e=sbuf[i].me=sbuf[i].ne=val(); lZ[i]=0; } dc=0;// doubling counter: do linlen lots of 2^dc lc=0;// linear counter 0<=lc<linlen nit=0;printed=0; memset(&hist[0],0,sizeof(hist[0])); for(e=mine;e<=maxe;e++)lp[e-base]=0; while(mine!=genp[1]||!printed||((ngp<=2||nis<genp[2])&&(ngp<=3||nis==0||cpu()-tim0<genp[3]))){ tim1-=cpu(); lc+=1;if(lc==linlen){lc=0;dc+=1;assert(dc<maxdoublings);} h=dc*linlen+lc;// position in history memcpy(&hist[h],&hist[h-1],sizeof(hist[h])); for(d=0;d<(1<<dc);d++){ for(i=0;i<nt;i++){ memcpy(XBa,sbuf[i].X,NBV*sizeof(int)); switch(tree){ case 0: simplegibbssweep(&gt[i]); break; case 1: tree1gibbs(randint(2),randint(2),randint(N),&gt[i]); break; } v=val();if(v<mine){mine=v;nis=0;} sbuf[i].e=v; if(v<sbuf[i].me)sbuf[i].me=v; if(v==mine&&sbuf[i].ne>mine)nis++; if(i==0||v<sbuf[i].ne)sbuf[i].ne=v; memcpy(sbuf[i].X,XBa,NBV*sizeof(int)); hist[h].nid[i]++; hist[h].ten1[i]+=v; hist[h].ten2[i]+=v*(double)v; if(v<base)fprintf(stderr,"Error: energy lower than expected. Try increasing margin.\n"); if(v>maxe)v=lqc-v;// apply symmetry assert(v>=base&&v-base<erange); hist[h].ndj[v-base]++; } for(i=0;i<nt-1;i++){ del=(be[i+1]-be[i])*(sbuf[i].e-sbuf[i+1].e); if(del<0||randfloat()<exp(-del)){ ts=sbuf[i]; sbuf[i]=sbuf[i+1]; sbuf[i+1]=ts; hist[h].ex1[i]++; } hist[h].ex0[i]++; } nit+=1; }//d h0=MAX(h-linlen,0);// subtract off from this point in history for(i=0;i<nt;i++){ double e0,e1,e2; e0=hist[h].nid[i]-hist[h0].nid[i]; e1=hist[h].ten1[i]-hist[h0].ten1[i]; e2=hist[h].ten2[i]-hist[h0].ten2[i]; een[i]=e1/e0; ven[i]=(e2-e1*e1/e0)/(e0-1); veo[i]=(e2-e1*e1/e0)/(e0-1)/e0; } tim1+=cpu(); if(nit>=1000*(tree?1:10)){ tim2-=cpu(); // Maximise \prod_{ij}(p_j e^{-\beta_i E_j}/Z_i)^{n_{ij}} over p_j // where Z_i = sum_j p_j e^{-\beta_i E_j} // Argmax only depends on \sum_i n_{ij} (called ndj) // and \sum_j n_{ij} (called nid) // the equation being ndj/p_j = \sum_i nid e^{-\beta_i E_j}/Z_i while(1){ // Infer Z_i double lZ0[nt]; memcpy(lZ0,lZ,sizeof(lZ0)); if(pr>=3){for(e=maxe;e>=mine;e--)printf("%6d %12g\n",e,lp[e-base]);printf("\n");} for(i=0;i<nt;i++){ lZ[i]=-1e30; for(e=mine;e<=lqc-mine;e++){// Can optimise, of course if(e<=maxe)j=e-base; else j=lqc-e-base; lZ[i]=addlog(lZ[i],lp[j]-be[i]*e); } }//i if(pr>=2){for(i=0;i<nt;i++)printf("%3d %9.3g %12g\n",i,be[i],lZ[i]);printf("\n");} // Infer p_j z=-1e30; for(e=mine;e<=maxe;e++){ j=e-base; r=hist[h].ndj[j]-hist[h0].ndj[j]; if(r==0){lp[j]=-1e30;continue;} x=-1e30; for(i=0;i<nt;i++){ y=-be[i]*e-lZ[i]; if(e<lqc-e)y=addlog(y,-be[i]*(lqc-e)-lZ[i]); x=addlog(x,log(hist[h].nid[i]-hist[h0].nid[i])+y); } lp[j]=log(r)-x; z=addlog(z,lp[j]+log(2)*(2*e<lqc));// weight by symmetry factor }//e z-=NV*log(2);// 2^NV states altogether for(e=mine;e<=maxe;e++)lp[e-base]-=z; for(i=0,x=0;i<nt;i++)x=MAX(x,fabs(lZ[i]-lZ0[i])); if(x<1e-3)break; }//while for(i=0;i<nt-1;i++){ double x0,x1,x2; x0=x1=x2=-1e30; for(e=mine;e<=lqc-mine;e++){ if(e<=maxe)j=e-base; else j=lqc-e-base; x0=addlog(x0,lp[j]*2-(be[i]+be[i+1])*e); x1=addlog(x1,lp[j]*2-be[i]*2*e); x2=addlog(x2,lp[j]*2-be[i+1]*2*e); } ovl[i]=exp(x0-(x1+x2)/2); } tim2+=cpu(); printf("\n"); for(i=0;i<nt;i++)printf("%8.3f ",be[i]);printf(" be[]\n"); printf(" ");for(i=0;i<nt-1;i++)printf(" %8.3f",(hist[h].ex1[i]-hist[h0].ex1[i])/(hist[h].ex0[i]-hist[h0].ex0[i]));printf(" exch[]\n"); for(i=0;i<nt;i++)printf("%8.2f ",een[i]);printf(" Mean energy\n"); for(i=0;i<nt;i++)printf("%8.4f ",sqrt(ven[i]));printf(" Std dev en\n"); if(0){for(i=0;i<nt;i++)printf("%8.4f ",sqrt(veo[i]));printf(" Std error (uncorrected for eqbn)\n");} for(i=0;i<nt;i++)printf("%8g ",lZ[i]);printf(" log(Z)\n"); printf(" ");for(i=0;i<nt-1;i++)printf(" %8.3f",ovl[i]);printf(" Overlap\n"); for(e=MIN(mine+nt-1,maxe);e>=mine;e--)printf("%8d ",e);printf(" Energy\n"); for(e=MIN(mine+nt-1,maxe);e>=mine;e--)if(lp[e-base]>-1e10)printf("%8.2f ",lp[e-base]); else printf(" zilch "); printf(" log(occupancy)\n"); for(e=MIN(mine+nt-1,maxe);e>=mine;e--)if(lp[e-base]>-1e10)printf("%8.2f ",lp[e-base]-lp[mine-base]); else printf(" zilch "); printf(" same rel gr st\n"); for(e=MIN(mine+nt-1,maxe);e>=mine;e--)printf("%8.3g ",(double)(hist[h].ndj[e-base]-hist[h0].ndj[e-base]));printf(" ndj\n"); for(e=mine,n=0;e<=maxe;e++)n+=hist[h].ndj[e-base]-hist[h0].ndj[e-base]>0; printf("min_en=%d, max_en=%d, nnz_en=%d, N=%d, nt=%d, genp[]=",mine,maxe,n,N,nt); for(i=0;i<ngp;i++)printf("%g%s",genp[i],i<ngp-1?",":""); for(i=j=0;i<nt;i++)j+=(sbuf[i].me==mine); printf(", CPU=%.2fs, CPU_EMC=%.2fs, CPU_Z=%.2fs, CPU/EMCit=%.3gs, its=%.3g, centre_energy=%g, nummine=%d, nind=%d\n", cpu()-tim0,tim1,tim2,tim1/nit,(double)hist[h].nid[0],lqc/2.,j,nis); prtimes(); if(outprobfn){ fp=fopen(outprobfn,"w");assert(fp); for(e=maxe;e>=mine;e--)if(lp[e-base]>-1e10)fprintf(fp,"%6d %12.3f\n",e,lp[e-base]); fclose(fp); } fflush(stdout); printed=1; } } freegibbstables(nt,gt); } void wanglandau(int weightmode){ int e,i,o,v,x,y,e0,e1,e2,lqc,margin; int base,mine,maxe;// base energy (0-pt for ndj array), min, max energies int64 it; double s,ff,del; margin=100;// safety margin for lowest energy lqc=centreconst(); if(!checkbisym()){fprintf(stderr,"Error: wanglandau() uses symmetry and assumes that the model has no external fields\n");exit(1);} init_state();v=stabletreeexhaust(val(),1,0);base=v-margin;mine=v;maxe=lqc>>1; const int erange=maxe+1-base; int64 hist[erange]; double lp[erange]; for(i=0;i<erange;i++){lp[i]=0;hist[i]=0;} it=0;e0=val();ff=.1; while(1){ hist[e0-base]++; x=randint(N);y=randint(N);o=randbit();i=randint(4); XB(x,y,o)^=1<<i; e1=val();// inefficient - but just a test routine anyway it++; if(e1<=maxe)e2=e1; else e2=lqc-e1; if(e2<mine)mine=e2; del=lp[e0-base]-lp[e2-base]; if(del>0||randfloat()<exp(del)){// accept if(e1>maxe)for(x=0;x<N;x++)for(y=0;y<N;y++)XB(x,y,(x+y)&1)^=15; lp[e2-base]+=ff/(1+(e2!=lqc-e2)); e0=e2; }else XB(x,y,o)^=1<<i; if(it%10000000==0){ printf("it=%lld\n",it); for(e=mine,s=-1e30;e<=maxe;e++)s=addlog(s,lp[e-base]+log(2)*(2*e<lqc)); s-=NV*log(2);// 2^NV states altogether for(e=mine;e<=maxe;e++)lp[e-base]-=s; for(e=maxe;e>=mine;e--)printf("%6d %12lld %12g\n",e,hist[e-base],lp[e-base]); printf("\n"); fflush(stdout); } } } uint64 hash(){// Symmetry-invariant hash, suitable if checksym()==1 int o,v,x,y,xor; uint64 h; xor=(XB(0,0,0)&1)*15; h=0; for(x=0;x<N;x++)for(y=0;y<N;y++)for(o=0;o<2;o++){ v=XB(x,y,o)^xor; h=h*h+h*9123091820398247ULL+(h>>20)+v; } return h; } void countgroundstates(int weightmode,int tns,int strat){ int bv,cv,ns; double x; assert(checksym()); if(ngp>0)bv=genp[0]; else bv=1000000000; restart: printf("Using bv=%d\n",bv); for(ns=0;ns<tns;ns++){ cv=opt1(0,1e9,0,1,&x,strat,bv,-1000000000); if(cv<bv){bv=cv;printf("RESTART\n");goto restart;} //prstate(stdout,1,0); printf("HASH %016llx\n",hash());fflush(stdout); } } void findspectrum_ds(int weightmode,int tree,const char*outprobfn,int pr){ double *be;// Set of betas int bi;// beta index (part of state) int nt;// Number of temperatures (betas) int qu;// energy quantum int visited0;// have visited lowest beta since found a minimum qu=energyquantum(); be=loadspecbetaset(weightmode,qu,&nt); double een[nt],ven[nt],veo[nt];// Derived energy estimates, variance and std errs double ovl[nt-1];// Overlap probabilities for iterative Z-finding int d,e,h,i,j,n,v,dc,lc,h0,h1,lqc,margin,printed; int nis;// number of independent solutions (minima since hitting lowest beta) int base,mine,maxe;// base energy (0-pt for ndj array), min, max energies double x,y,z,del,nit,tim0,tim1,tim2; gibbstables*gt; FILE*fp; margin=100;// safety margin for lowest energy lqc=centreconst(); if(!checkbisym()){fprintf(stderr,"Error: findspectrum() uses symmetry and assumes that the model has no external fields\n");exit(1);} init_state();v=stabletreeexhaust(val(),1,0);base=v-margin;mine=v;maxe=lqc>>1; //if(ngp>1)mine=genp[1]; const int maxdoublings=50; const int linlen=20; const int nhist=maxdoublings*linlen; const int erange=maxe+1-base; typedef struct { int64 ndj[erange];// ndj[e] = number of samples of energy base+e at history <=h int64 nid[nt];// nid[i] = number of samples at beta[i] at history <=h (currently simple constant) int64 ndd;// number of samples at history <=h double ten1[nt],ten2[nt];// ten_r[i] = total energy^r at beta[i] at history <=h double ex0[nt],ex1[nt];// ex1[i] = number of exchanges, ex0[i] = number of possible exchanges i<->i+1 at history <=h double lZ[nt];// log(Z_i) for this group of exchanges } histdata; // Group sample values in multiples // 1 1 ... 1 2 2 ... 2 4 4 ... 4 8 8 ... 8 ... // linlen linlen linlen linlen ... // so that having sampled 2n values, can look at the last n samples // and can do that every size increase of roughly a factor of 1+1/linlen histdata hist[nhist];// switch to malloc to cope with lame stack sizes double lp[erange],lZ[nt]; printf("Number of temperatures: %d\n",nt); for(i=0;i<nt;i++)printf("%8.3f ",be[i]);printf(" be[]\n"); printf("Monte Carlo mode: %s\n",tree?"tree":"single-vertex"); printf("Randstart: %d\n",RANDSTART); printf("Centre constant: %d\n",lqc); printf("Energy quantum: %d\n",qu); printf("Tree mode: %d\n",tree); initrandtab(50000); gt=initgibbstables(nt,be,tree); nis=0; visited0=1; tim0=cpu();tim1=tim2=0; bi=0;init_state(); dc=0;// doubling counter: do linlen lots of 2^dc lc=0;// linear counter 0<=lc<linlen nit=0;printed=0; memset(&hist[0],0,sizeof(hist[0])); for(e=mine;e<=maxe;e++)lp[e-base]=0; if(0){ double cheat[24]={354.891, 358.017, 363.142, 371.352, 382.006, 395.402, 410.602, 428.556, 449.365, 474.283, 503.631, 537.024, 577.334, 621.79, 677.746, 747.765, 840.861, 963.706, 1126.02, 1370.36, 1765.61, 2560.82, 4399.47, 23070}; memcpy(lZ,cheat,nt*sizeof(double)); } while(mine!=genp[1]||ngp<2||nis<genp[2]||!printed){ tim1-=cpu(); lc+=1;if(lc==linlen){lc=0;dc+=1;assert(dc<maxdoublings);} h=dc*linlen+lc;// position in history memcpy(&hist[h],&hist[h-1],sizeof(hist[h])); memcpy(hist[h].lZ,lZ,sizeof(lZ)); for(d=0;d<(1<<dc);d++){ switch(tree){ case 0: simplegibbssweep(&gt[bi]); break; case 1: tree1gibbs(randint(2),randint(2),randint(N),&gt[bi]); break; } v=val();if(v<mine){mine=v;nis=0;visited0=1;} if(bi==0)visited0=1; if(v==mine&&visited0){nis++;visited0=0;} hist[h].ndd++; hist[h].nid[bi]++; hist[h].ten1[bi]+=v; hist[h].ten2[bi]+=v*(double)v; if(v<base)fprintf(stderr,"Error: energy lower than expected. Try increasing margin.\n"); if(v>maxe)v=lqc-v;// apply symmetry assert(v>=base&&v-base<erange); hist[h].ndj[v-base]++; i=bi+randsign();// try moving to neighbouring beta (careful to make this procedure self-dual) if(i>=0&&i<nt){ del=lZ[bi]-lZ[i]-(be[i]-be[bi])*v; hist[h].ex0[MIN(i,bi)]++; if(del>0||randfloat()<exp(del)){hist[h].ex1[MIN(i,bi)]++;bi=i;} } nit+=1; }//d tim1+=cpu(); h0=MAX(h-linlen,0);// subtract off from this point in history //h0=0; if(nit>=100000*(tree?1:10)){ tim2-=cpu(); // Maximise \prod_{ij}(p_j e^{-\beta_i E_j}/Z_i)^{n_{ij}} over p_j // where Z_i = sum_j p_j e^{-\beta_i E_j} // Argmax only depends on \sum_i n_{ij} (called ndj) // and \sum_j n_{ij} (called nid) // the equation being ndj/p_j = \sum_i nid e^{-\beta_i E_j}/Z_i {//check int64 n0,n1,n2; for(i=0,n0=0;i<nt;i++)n0+=hist[h].nid[i]-hist[h0].nid[i]; for(e=mine,n1=0;e<=maxe;e++)n1+=hist[h].ndj[e-base]-hist[h0].ndj[e-base]; n2=hist[h].ndd-hist[h0].ndd; printf("\n\n\n*********** %10lld %10lld %10lld (%d - %d) *********\n",n0,n1,n2,h0+1,h); assert(n0==n1&&n1==n2); } int it=0; double lZ1[nt]; memcpy(lZ1,lZ,sizeof(lZ1)); while(1){ // Get Z_i double w,eps,lZ0[nt]; double zz[h-h0];// h0+1, ..., h eps=0;//1e-100;// for stability memcpy(lZ0,lZ,sizeof(lZ0)); if(pr>=3){for(e=maxe;e>=mine;e--)printf("%6d %12g\n",e,lp[e-base]);printf("\n");} for(i=0;i<nt;i++){ lZ[i]=-1e30; for(e=mine;e<=lqc-mine;e++){// Can optimise, of course if(e<=maxe)j=e-base; else j=lqc-e-base; lZ[i]=addlog(lZ[i],lp[j]-be[i]*e); } }//i if(pr>=2){for(i=0;i<nt;i++)printf("%3d %9.3g %12g\n",i,be[i],lZ[i]);printf("\n");} // Infer p_j for(h1=h0+1;h1<=h;h1++){ for(i=0,z=-1e30;i<nt;i++)z=addlog(z,lZ[i]-hist[h1].lZ[i]); zz[h1-h0-1]=z; } z=-1e30; for(e=mine;e<=maxe;e++){ double r; j=e-base; r=hist[h].ndj[j]-hist[h0].ndj[j]+eps*(h-h0); if(r==0){lp[j]=-1e30;continue;} for(h1=h0+1,w=-1e30;h1<=h;h1++){ for(i=0,x=-1e30;i<nt;i++){ y=-be[i]*e; if(e<lqc-e)y=addlog(y,-be[i]*(lqc-e)); x=addlog(x,y-hist[h1].lZ[i]); } w=addlog(w,log(hist[h1].ndd-hist[h1-1].ndd+eps*erange)-zz[h1-h0-1]+x); } lp[j]=log(r)-w; z=addlog(z,lp[j]+log(2)*(2*e<lqc));// weight by symmetry factor }//e z-=NV*log(2);// 2^NV states altogether for(e=mine;e<=maxe;e++)lp[e-base]-=z; for(i=0,x=0;i<nt;i++)x=MAX(x,fabs(lZ[i]-lZ0[i])); it++; if(it>=5&&x<1e-3)break; }//while //for(i=0;i<nt;i++)lZ[i]=0.5*lZ[i]+0.5*lZ1[i]; for(h1=h;h1<=h;h1++){ for(i=0,z=-1e30;i<nt;i++)z=addlog(z,lZ[i]-hist[h1].lZ[i]); for(i=0;i<nt;i++)printf("%8.3g ",lZ[i]-hist[h1].lZ[i]-z);printf(" logprob(beta)\n"); } if(0)for(e=maxe;e>=mine;e-=10){ int e1,e2,ok; e2=MAX(e-9,mine); for(e1=e,ok=0;e1>=e2;e1--)if(lp[e1-base])ok=1; if(ok){ printf("%6d ... %6d: ",e,e2); for(e1=e,ok=0;e1>=e2;e1--)printf("%9.3g ",lp[e1-base]);printf("\n"); } } for(i=0;i<nt-1;i++){ double x0,x1,x2; x0=x1=x2=-1e30; for(e=mine;e<=lqc-mine;e++){ if(e<=maxe)j=e-base; else j=lqc-e-base; x0=addlog(x0,lp[j]*2-(be[i]+be[i+1])*e); x1=addlog(x1,lp[j]*2-be[i]*2*e); x2=addlog(x2,lp[j]*2-be[i+1]*2*e); } ovl[i]=exp(x0-(x1+x2)/2); } tim2+=cpu(); } if(1){ for(i=0;i<nt;i++){ double e0,e1,e2; e0=hist[h].nid[i]-hist[h0].nid[i]; e1=hist[h].ten1[i]-hist[h0].ten1[i]; e2=hist[h].ten2[i]-hist[h0].ten2[i]; een[i]=e1/e0; ven[i]=(e2-e1*e1/e0)/(e0-1); veo[i]=(e2-e1*e1/e0)/(e0-1)/e0; } printf("\n"); for(i=0;i<nt;i++)printf("%8.3f ",be[i]);printf(" be[]\n"); printf(" ");for(i=0;i<nt-1;i++)printf(" %8.3f",(hist[h].ex1[i]-hist[h0].ex1[i])/(hist[h].ex0[i]-hist[h0].ex0[i]));printf(" exch[]\n"); for(i=0;i<nt;i++)printf("%8.2f ",een[i]);printf(" Mean energy\n"); for(i=0;i<nt;i++)printf("%8.4f ",sqrt(ven[i]));printf(" Std dev en\n"); if(0){for(i=0;i<nt;i++)printf("%8.4f ",sqrt(veo[i]));printf(" Std error (uncorrected for eqbn)\n");} for(i=0;i<nt;i++)printf("%8.3g ",(double)(hist[h].nid[i]-hist[h0].nid[i]));printf(" nid\n"); for(i=0;i<nt;i++)printf("%8g ",lZ[i]);printf(" log(Z)\n"); printf(" ");for(i=0;i<nt-1;i++)printf(" %8.3f",ovl[i]);printf(" Overlap\n"); for(e=MIN(mine+nt-1,maxe);e>=mine;e--)printf("%8d ",e);printf(" Energy\n"); for(e=MIN(mine+nt-1,maxe);e>=mine;e--)if(lp[e-base]>-1e10)printf("%8.2f ",lp[e-base]); else printf(" zilch "); printf(" log(occupancy)\n"); for(e=MIN(mine+nt-1,maxe);e>=mine;e--)if(lp[e-base]>-1e10)printf("%8.2f ",lp[e-base]-lp[mine-base]); else printf(" zilch "); printf(" same rel gr st\n"); for(e=MIN(mine+nt-1,maxe);e>=mine;e--)printf("%8.3g ",(double)(hist[h].ndj[e-base]-hist[h0].ndj[e-base]));printf(" ndj\n"); for(e=mine,n=0;e<=maxe;e++)n+=hist[h].ndj[e-base]-hist[h0].ndj[e-base]>0; printf("min_en=%d, max_en=%d, nnz_en=%d, N=%d, nt=%d, genp[]=",mine,maxe,n,N,nt); for(i=0;i<ngp;i++)printf("%g%s",genp[i],i<ngp-1?",":""); printf(", CPU=%.2fs, CPU_EMC=%.2fs, CPU_Z=%.2fs, CPU/EMCit=%.3gs, nit=%g, centre_energy=%g, nind=%d\n", cpu()-tim0,tim1,tim2,tim1/nit,(double)nit,lqc/2.,nis); prtimes(); if(outprobfn){ fp=fopen(outprobfn,"w"); for(e=maxe;e>=mine;e--)if(lp[e-base]>-1e10)fprintf(fp,"%6d %12.3f\n",e,lp[e-base]); fclose(fp); } fflush(stdout); printed=1; } } printf("CPU %g\n",cpu()-tim0); freegibbstables(nt,gt); } // Use EMC to find a state with energy<=bv from a clean start int opt4a(int weightmode,int tree,int betaskip,int bv,int64*nit,int qu,int pr){ double *be;// Set of betas int nt;// Number of temperatures (betas) be=loadbetaset(weightmode,betaskip,&nt); typedef struct { int X[NBV];// State int e;// Energy } tstate; // Tempering state tstate sbuf[nt],ts; int i,v; double del; gibbstables*gt; for(i=0;i<nt;i++)be[i]/=qu; if(pr>=2){ printf("Number of temperatures: %d\n",nt); for(i=0;i<nt;i++)printf("%8.3f ",be[i]);printf(" be[]\n"); printf("Monte Carlo mode: %s\n",tree?"tree":"single-vertex"); printf("Randstart: %d\n",RANDSTART); printf("Tree mode: %d\n",tree); } gt=initgibbstables(nt,be,tree); for(i=0;i<nt;i++){ init_state();memcpy(sbuf[i].X,XBa,NBV*sizeof(int)); sbuf[i].e=val(); } while(1){ for(i=0;i<nt;i++){ memcpy(XBa,sbuf[i].X,NBV*sizeof(int)); switch(tree){ case 0: simplegibbssweep(&gt[i]); break; case 1: tree1gibbs(randint(2),randint(2),randint(N),&gt[i]); break; } if(nit)(*nit)++; v=val();if(v<=bv){freegibbstables(nt,gt);return v;} sbuf[i].e=v; memcpy(sbuf[i].X,XBa,NBV*sizeof(int)); } for(i=0;i<nt-1;i++){ del=(be[i+1]-be[i])*(sbuf[i].e-sbuf[i+1].e); if(del<0||randfloat()<exp(-del)){ ts=sbuf[i];sbuf[i]=sbuf[i+1];sbuf[i+1]=ts; //ex1[i]++; } } } } // Use EMC to find a state with energy<=bv from a clean start // Use parallel copies to cope with possible pathological running-time distribution: // T*log_2(T) method int opt5a(int weightmode,int tree,int betaskip,int bv,int64*nit,int qu,int pr){ double *be;// Set of betas int nt;// Number of temperatures (betas) be=loadbetaset(weightmode,betaskip,&nt); const int maxbatches=31; typedef struct { int X[NBV];// State int e;// Energy } tstate; // Tempering state tstate ts,*sbuf,(*(batch[maxbatches]))[nt]; int i,r,s,v,r0,r1; int64 it,n; double del; gibbstables*gt; for(i=0;i<nt;i++)be[i]/=qu; if(pr>=2){ printf("Number of temperatures: %d\n",nt); for(i=0;i<nt;i++)printf("%8.3f ",be[i]);printf(" be[]\n"); printf("Monte Carlo mode: %s\n",tree?"tree":"single-vertex"); printf("Randstart: %d\n",RANDSTART); printf("Tree mode: %d\n",tree); } gt=initgibbstables(nt,be,tree); for(r=0;r<maxbatches;r++){ batch[r]=(tstate(*)[nt])malloc((1ULL<<r)*nt*sizeof(tstate));assert(batch[r]); for(s=0;s<(1<<r);s++){ for(i=0;i<nt;i++){ init_state();memcpy(batch[r][s][i].X,XBa,NBV*sizeof(int)); batch[r][s][i].e=val(); } } for(r0=0;r0<=r;r0++){ if(r0<r)n=1<<(r-1-r0); else n=1; n*=(tree?100:10000); for(s=0;s<(1<<r0);s++){ sbuf=batch[r0][s]; for(it=0;it<n;it++){ for(i=0;i<nt;i++){ memcpy(XBa,sbuf[i].X,NBV*sizeof(int)); switch(tree){ case 0: simplegibbssweep(&gt[i]); break; case 1: tree1gibbs(randint(2),randint(2),randint(N),&gt[i]); break; } if(nit)(*nit)++; v=val();if(v<=bv){ freegibbstables(nt,gt); for(r1=0;r1<=r;r1++)free(batch[r1]); return v; } sbuf[i].e=v; memcpy(sbuf[i].X,XBa,NBV*sizeof(int)); }//i for(i=0;i<nt-1;i++){ del=(be[i+1]-be[i])*(sbuf[i].e-sbuf[i+1].e); if(del<0||randfloat()<exp(-del)){ ts=sbuf[i];sbuf[i]=sbuf[i+1];sbuf[i+1]=ts; //ex1[i]++; } }//i }//it }//s }//r0 } assert(0); } // Find TTS using EMC void opt4(int weightmode,int pr,int tns,int tree,int betaskip,int bv,double maxt,int tlogt){ int ns,cv,pri,last,qu; int64 nit; double tim0,tim1,t1,tt,now; qu=energyquantum(); printf("Monte Carlo mode: %s\n",tree?"tree":"single-vertex"); printf("Randstart: %d\n",RANDSTART); printf("Betaskip: %d\n",betaskip); printf("Target number of presumed optima: %d\n",tns); printf("Initial best value: %d\n",bv); printf("Treemode: %s\n",tree?"yes":"no"); printf("Energy quantum: %d\n",qu); printf("Max time: %gs\n",maxt); printf("TlogT mode: %d\n",tlogt); initrandtab(50000); ns=0;tim0=tim1=cpu();t1=0;nit=0; printf(" Iterations T bv ns t(bv) t(all) t(bv)/ns its/ns\n");fflush(stdout); while(ns<tns){ if(tlogt)cv=opt5a(weightmode,tree,betaskip,bv,&nit,qu,pr); else cv=opt4a(weightmode,tree,betaskip,bv,&nit,qu,pr); now=cpu();tt=now-tim0; last=(ngp>=4&&tt>maxt); pri=(cv<bv||tt>=t1||last); if(cv<bv){ns=0;tim1=now;nit=0;bv=cv;} else ns++; if(pri||ns==tns){ printf("%12lld %d %10d %6d %8.2f %8.2f %8.3g %8.3g\n", nit,tree,bv,ns,now-tim1,tt,(now-tim1)/ns,nit/(double)ns); t1=MAX(tt*1.1,tt+0.5); } fflush(stdout); if(last)break; } printf("Time to solution %gs, assuming true minimum is %d. Iterations/soln = %g\n",(cpu()-tim1)/ns,bv,nit/(double)ns); } void SQA(int weightmode,int tree,double beta,int P){ int k,k0,k1,o,v,x,y,qu,mine,pri; int64 nit; double beta_red;// intra-slice (reduced) beta double JP,EJ,Gamma; double tim0,t1,tt; gibbstables*gt; typedef struct { int Xplus[(N+2)*N*2]; int *X;// State int e;// Energy } istate; // Imaginary time slice state istate sl[P]; qu=energyquantum(); printf("Intra-slice Monte Carlo mode: %s\n",tree?"tree":"single-vertex"); printf("Overall beta: %g\n",beta); beta_red=beta/P; printf("Intra-slice beta: %g\n",beta_red); //printf("Initial best value: %d\n",bv); printf("Treemode: %s\n",tree?"yes":"no"); printf("Energy quantum: %d\n",qu); //printf("Max time: %gs\n",maxt); printf("Imaginary time slices: %d\n",P); initrandtab(50000); init_state();mine=val(); // Consider pre-annealing at this point for(k=0;k<P;k++){// Initialise all slices to the same state memset(sl[k].Xplus,0,sizeof(sl[k].Xplus)); sl[k].X=sl[k].Xplus+N*2; memcpy(sl[k].X,XBa,NBV*sizeof(int)); sl[k].e=val(); } gt=initgibbstables(1,&beta_red,tree); long double *etab=gt->etab; unsigned char (*septab0)[16][4]=gt->septab0; signed char (*septab2a)[16][16][2]=gt->septab2a; nit=0; tim0=cpu();t1=0; for(Gamma=3;Gamma>1e-3;Gamma/=1.001){ JP=-(1/2.)*log(tanh(Gamma*beta_red)); EJ=exp(JP); tt=cpu()-tim0; if(tt>=t1){pri=1;t1=MAX(tt*1.1,tt+1);}else pri=0; for(k=0;k<P;k++){memcpy(XBa,sl[k].X,NBV*sizeof(int));v=sl[k].e=val();mine=MIN(mine,v);} if(pri){ printf("\n"); printf("Gamma = %g\n",Gamma); printf("J_perp/PT = %g\n",JP); printf("Min energy seen = %d\n",mine); printf("Iterations: %lld\n",nit); for(k=0;k<P;k++)printf("%5d ",sl[k].e);printf("\n"); } // Intra-slice sweep k0=randint(P); for(k1=0;k1<P;k1++){ k=(k0+k1)%P; memcpy(XBa,sl[k].X,NBV*sizeof(int)); switch(tree){ case 0: assert(0);// not yet done simplegibbssweep_sqa(gt); break; case 1: tree1gibbs_sqa(randint(2),randint(2),randint(N),gt,EJ,EJ,sl[(k+P-1)%P].X,sl[(k+1)%P].X); break; } sl[k].e=val(); memcpy(sl[k].X,XBa,NBV*sizeof(int)); } if(pri){for(k=0;k<P;k++)printf("%5d ",sl[k].e);printf("\n");} // Inter-slice sweep for(x=0;x<N;x++)for(y=0;y<N;y++)for(o=0;o<2;o++){ int a,b,c,i,s,en,enl,enr,eno,hs[P][2][2]; long double z,Z,ZZ0[2][2],ZZ1[2][2]; en=enc(x,y,o);eno=enc(x,y,1-o); enl=enc(x-(o==0),y-(o==1),o); enr=enc(x+(o==0),y+(o==1),o); // hs[k][a][b] = choice of b_{k-1} given b_0=a, b_k=b (k>0) if(randptr>randlength-16*P)randptr=randint(randlength-16*P+1); for(i=0;i<4;i++){ // Couple b_k = (bit i of sl[k].en) for k=0,...,P-1 with coupling constant EJ // Let e_k denote the external vertices on slice k for(k=0;k<P;k++){ int *X=sl[k].X; s=septab0[X[enl]][X[enr]][i]; if(k==0){ // Initialise ZZ0[a][b]=delta_{ab} Z( b_0=a---e_0 ) ZZ0[0][1]=ZZ0[1][0]=0; for(a=0;a<2;a++)ZZ0[a][a]=etab[septab2a[en][X[eno]][s][a]]; }else{ // ZZ0[a][b] = Z( b_0---...---b_{k-1} + b_0---e_0,...,b_{k-1}---e_{k-1} given b_0=a and b_{k-1}=b ) // a=b_0, b=b_{k-1}, c=b_k for(a=0;a<2;a++)for(c=0;c<2;c++){ ZZ1[a][c]=0; for(b=0;b<2;b++)ZZ1[a][c]+=ZZ0[a][b]*(c==b?EJ:1/EJ); z=RANDFLOAT*ZZ1[a][c]; hs[k][a][c]=(z>=ZZ0[a][0]*(c==0?EJ:1/EJ)); } // ZZ1[a][b] = Z( b_0---...---b_k + b_0---e_0,...,b_{k-1}---e_{k-1} given b_0=a and b_k=b ) for(c=0;c<2;c++){ Z=etab[septab2a[en][X[eno]][s][c]]; for(a=0;a<2;a++)ZZ1[a][c]*=Z; } // ZZ1[a][b] = Z( b_0---...---b_k + b_0---e_0,...,b_k---e_k given b_0=a and b_k=b ) for(a=0;a<2;a++)for(b=0;b<2;b++)ZZ0[a][b]=ZZ1[a][b]; } }//k Z=(ZZ0[0][0]+ZZ0[1][1])*EJ+(ZZ0[0][1]+ZZ0[1][0])/EJ; z=RANDFLOAT*Z; for(a=0;a<2;a++)for(b=0;b<2;b++){// Get choice of b_0, b_{P-1} z-=ZZ0[a][b]*(a==b?EJ:1/EJ); if(z<0||(a==1&&b==1))goto el0; } el0:; for(k=P-1;k>=0;k--){ // a=b_0, b=b_k sl[k].X[en]=(sl[k].X[en]&~(1<<i))|(b<<i); if(k==0)break; b=hs[k][a][b];assert(b==0||b==1); } }//i }//x,y,o nit++; }//Gamma freegibbstables(1,gt); } // Pseudo parallel tempering // Assumes geometric pp[i], uses ex[0] (time-weighted average) int pptb(int weightmode,int strat,int bv,int qu,int64*nit){ int f,i,nt,it,upd; const int maxnt=100; typedef struct { int X[NBV];// State int e;// Energy } tstate; // Tempering state tstate sbuf[maxnt],ts; double ex[maxnt-1],ex0[maxnt-1]; double ff,ttp,pp0,decay,pp[maxnt]; for(i=0;i<maxnt-1;i++){ex[i]=0.5;ex0[i]=1;} nt=3;ff=0.5; pp0=ngp>4?genp[4]:0.5; for(i=0;i<nt;i++){ init_state();memcpy(sbuf[i].X,XBa,NBV*sizeof(int));sbuf[i].e=val();pp[i]=pp0*pow(ff,i); } it=0;// Total iterations ttp=ngp>3?genp[3]:0.25;// target transition prb upd=ngp>6?genp[6]:(strat%10==3?100:10);// Update every upd steps decay=ngp>5?genp[5]:0.; printf("ttp=%g pp0=%g upd=%d decay=%g\n",ttp,pp0,upd,decay); while(1){ it++; for(i=0;i<nt;i++){ memcpy(XBa,sbuf[i].X,NBV*sizeof(int)); pertstate(pp[i]); sbuf[i].e=stabletreeexhaust(val(),strat%10-2,0); if(nit)(*nit)++; if(sbuf[i].e<=bv)return sbuf[i].e; memcpy(sbuf[i].X,XBa,NBV*sizeof(int)); } for(i=0;i<nt-1;i++){ ex0[i]++; if((f=(sbuf[i].e<=sbuf[i+1].e))){ts=sbuf[i];sbuf[i]=sbuf[i+1];sbuf[i+1]=ts;ex[i]++;} } if(it%upd==0){// adjust all pp[] for(i=0;i<nt;i++)printf(" %8d",sbuf[i].e);printf("\n"); for(i=0;i<nt;i++)printf(" %8.5f",pp[i]);printf("\n"); printf(" ");for(i=0;i<nt-1;i++)printf(" %8.3f",ex[i]/ex0[i]);printf("\n"); printf("\n"); fflush(stdout); double hm,fadj,maxadj=1.1; switch(2){ case 0:// HM for(i=0,hm=0;i<nt-1;i++){ if(ex[i]==0){hm=0;fadj=maxadj;goto adj;} hm+=(ex0[i]+0.0)/(ex[i]+0.0); } hm=(nt-1)/hm; break; case 1:// AM for(i=0,hm=0;i<nt-1;i++)hm+=ex[i]/ex0[i]; hm=hm/(nt-1); break; case 2:// First hm=ex[0]/ex0[0]; break; } fadj=pow(ttp/hm,0.1);fadj=MIN(fadj,maxadj);fadj=MAX(fadj,1/maxadj); adj: printf("hm=%g fadj=%g\n",hm,fadj); ff=MIN(fadj*ff,1); for(i=0;i<nt;i++)pp[i]=0.5*pow(ff,i); for(i=0;i<maxnt-1;i++){ex[i]*=decay;ex0[i]*=decay;} if(nt<maxnt&&pp[nt-1]*NBV>0.5){ printf("Introducing level %d at it=%d\n",nt+1,it); sbuf[nt]=sbuf[nt-1]; pp[nt]=pp[nt-1]*.5; nt++; } if(0&&nt>=3&&pp[nt-1]*NBV<0.1){ printf("Removing level %d at it=%d\n",nt,it); nt--; //memmove(sbuf,sbuf+1,nt*sizeof(tstate)); } } } } typedef struct { double p;// perturbation prob of lower-ranked potential exchangee int n0,n1;// How many resulted in non-exchange, exchange } epdat; int cmpep(const void*p,const void*q){ double z=((epdat*)p)->p-((epdat*)q)->p; return (z>0)-(z<0); } double dolag(double m,double g0,double pp0,int n,epdat*epl,double ttp,double*rx,int pr){ int i,gt0,gt1; double g,p,x,p1,x1; x=0;p=0;g=g0; gt0=(p>=ttp);// p>=ttp flag if(rx)*rx=-1; for(i=0;i<=n;i++){ if(i<n)x1=epl[i].p; else x1=pp0; p1=p+g*(x1-x);gt1=(p1>=ttp); if(rx&&gt0==0&&gt1==1){assert(p1>p);*rx=(ttp-p)/(p1-p)*(x1-x)+x;if(pr)printf("Interp ttp=%g from (%g,%g) to (%g,%g) --> %g\n",ttp,x,p,x1,p1,*rx);} x=x1;p=p1;gt0=gt1; if(pr)printf("%12g ... %12g %12g\n",g,x,p); if(p>0.5)p=0.5; if(i==n)break; //if(epl[i].f)g-=1/(m*p); else g+=1/(m*(1-p)); g+=(epl[i].n0/(1-p)-epl[i].n1/p)/m; if(g<0)g=0; } return p; } // Pseudo parallel tempering // Assumes geometric pp[i], uses ex[0] (historical) int pptc(int weightmode,int strat,int bv,int qu,int64*nit){ int f,i,j,n,nh,nt,upd,itnl,nn[2]; int64 t,it,nu; const int maxnt=1000,maxhist=10000,maxrwhist=10000; typedef struct { int X[NBV];// State int e;// Energy double tb;// tiebreaker int i;// original index } tstate; // Tempering state int cmpts(const void*p,const void*q){ tstate *p0=(tstate*)p,*q0=(tstate*)q; double z=q0->e+q0->tb-(p0->e+p0->tb); return (z>0)-(z<0); } int rwhist[maxrwhist][maxnt]; tstate sbuf[maxnt],ts; epdat epl[maxhist]; double ex[maxnt-1],ex0[maxnt-1],ef,e0[maxnt],e1[maxnt]; double ff,ttp,pp0,pp[maxnt]; double g0;// Initial gradient of inferred probability function double mass;// Mass in Lagrangian for ditto double nlthr; double tim0,tim1; for(i=0;i<maxnt-1;i++){ex[i]=ex0[i]=0;} for(i=0;i<maxnt;i++)e0[i]=e1[i]=0;ef=0.05; nt=2;ff=1; pp0=ngp>4?genp[4]:0.5; for(i=0;i<nt;i++){ init_state();memcpy(sbuf[i].X,XBa,NBV*sizeof(int));sbuf[i].e=val();pp[i]=pp0*pow(ff,i); } it=0;// Total iterations nu=0;// Number of updates ttp=ngp>3?genp[3]:0.35;// target transition prb itnl=ngp>5?genp[5]:(strat%10==3?50:5);// Min its to introduce new level upd=ngp>6?genp[6]:(strat%10==3?100:10);// Update density factor nlthr=ngp>7?genp[7]:0.5;// New level energy gap threshold if(deb>=2)printf("ttp=%g pp0=%g upd=%d itnl=%d nlthr=%g\n",ttp,pp0,upd,itnl,nlthr); nh=0; mass=10;g0=1; nn[0]=nn[1]=0;// number of non-exchanges and exchanges between level 0 and 1 since last pp[] adjustment tim0=tim1=0; while(1){ tim0-=cpu(); for(i=0;i<nt;i++){ memcpy(XBa,sbuf[i].X,NBV*sizeof(int)); pertstate(pp[i]); sbuf[i].e=stabletreeexhaust(val(),strat%10-2,0); if(nit)(*nit)++; if(sbuf[i].e<=bv){ if(deb>=2)printf("Final nt=%d its=%lld nu=%lld\n",nt,it,nu); if(deb>=3&&it<=maxrwhist){ j=i;printf("RWHIST"); while(1){ printf(" %d",j); it--;if(it<0)break; j=rwhist[it][j]; } printf("\n"); } return sbuf[i].e; } memcpy(sbuf[i].X,XBa,NBV*sizeof(int)); } for(i=0;i<nt;i++)sbuf[i].i=i; if(1){ for(i=0;i<nt;i++)sbuf[i].tb=randfloat(); for(i=0;i<nt-1;i++){ ex0[i]++; //f=(sbuf[i].e<=sbuf[i+1].e);// different equality case f=(sbuf[i].e<sbuf[i+1].e||(sbuf[i].e==sbuf[i+1].e&&randint(2))); if(f){ts=sbuf[i];sbuf[i]=sbuf[i+1];sbuf[i+1]=ts;ex[i]++;} if(i==0)nn[f]++; //if(f)printf("X"); else printf("."); } //printf("\n"); }else{ for(i=0;i<nt;i++)sbuf[i].tb=randfloat(); qsort(sbuf,nt,sizeof(tstate),cmpts); } if(it<maxrwhist)for(i=0;i<nt;i++)rwhist[it][i]=sbuf[i].i; for(i=0;i<nt;i++){e0[i]=(1-ef)*e0[i]+ef*1;e1[i]=(1-ef)*e1[i]+ef*sbuf[i].e;} it++; tim0+=cpu(); if(upd>0)t=floor(sqrt(upd*it+upd-0.5)); else t=0;// upd=0 is a special case meaning always update if(t*t>=upd*it){// adjust all pp[] based on getting pp[1] right and using geometric sequence double g,p,g1,gf,p0,p1,pp1; if(nh<maxhist)j=nh; else j=randint(maxhist); epl[j].p=pp[1];epl[j].n0=nn[0];epl[j].n1=nn[1];nn[0]=nn[1]=0;nh++; n=MIN(nh,maxhist); if(deb>=4)printf("%12lld %9.2f : %12lld %9.2f + %9.2f : hist %d (max %d)\n",*nit,cpu(),it,tim0,tim1,nh,maxhist); tim1-=cpu(); qsort(epl,n,sizeof(epdat),cmpep); //for(i=0;i<n;i++)printf("EPL %18.14g %6d %6d\n",epl[i].p,epl[i].n0,epl[i].n1); if(genp[2]==2){ gf=1.1; p0=dolag(mass,g0,pp0,n,epl,ttp,0,0); if(p0<0.499999){ while(1){ g1=g0*gf; p1=dolag(mass,g1,pp0,n,epl,ttp,0,0); if(g0>100||p1>0.499999)break; g0=g1;p0=p1; } }else{ while(1){ g1=g0;p1=p0; g0=g1/gf; p0=dolag(mass,g0,pp0,n,epl,ttp,0,0); if(g0<1e-3||p0<0.499999)break; } } while(p1-p0>1e-3){ g=(g0+g1)/2; p=dolag(mass,g,pp0,n,epl,ttp,0,0); if(p>0.499999){g1=g;p1=p;}else{g0=g;p0=p;} } // (g1 might be an overshoot, because the dolag() function is eventually constant in g, but g0 should be OK) //printf("Using g0=%g\n",g0); p=dolag(mass,g0,pp0,n,epl,ttp,&pp1,0); //printf("Chose p=%g\n",pp1); ff=pp1/pp0; }else ff=ttp; for(i=1;i<nt;i++)pp[i]=pp[i-1]*ff; if(deb>=4){ for(i=0;i<nt;i++)printf(" %8d",sbuf[i].e);printf("\n"); for(i=0;i<nt;i++)printf(" %8.1f",e1[i]/e0[i]);printf("\n"); for(i=0;i<nt;i++)printf(" %8.5f",pp[i]);printf("\n"); printf(" ");for(i=0;i<nt-1;i++)printf(" %8.3f",ex[i]/(double)ex0[i]);printf("\n"); } if(it>=itnl&&nt<maxnt&&pp[nt-1]*NBV>0.5&&e1[nt-2]/e0[nt-2]-e1[nt-1]/e0[nt-1]>nlthr){ if(deb>=4)printf("Introducing level %d\n",nt+1); if(it<maxrwhist)rwhist[it-1][nt]=rwhist[it-1][nt-1]; sbuf[nt]=sbuf[nt-1]; pp[nt]=pp[nt-1]*.5; nt++; } if(0&&nt>=3&&pp[nt-1]*NBV<0.1){ printf("Removing level %d\n",nt); nt--; //memmove(sbuf,sbuf+1,nt*sizeof(tstate)); } nu++; tim1+=cpu(); if(deb>=4){printf("\n");fflush(stdout);} } } } // Pseudo parallel tempering void ppt(int weightmode,int strat,int tns,int bv,double maxt){ int ns,cv,pri,last,qu; int64 nit; double tim0,tim1,t1,tt,now; qu=energyquantum(); printf("Target number of presumed optima: %d\n",tns); printf("Initial best value: %d\n",bv); printf("Energy quantum: %d\n",qu); printf("Max time: %gs\n",maxt); ns=0;tim0=tim1=cpu();t1=0;nit=0; printf(" Iterations bv ns t(bv) t(all) t(bv)/ns its/ns\n");fflush(stdout); while(ns<tns){ switch((int)(genp[2])){ case 1: cv=pptb(weightmode,strat,bv,qu,&nit); break; case 2: case 3: cv=pptc(weightmode,strat,bv,qu,&nit); break; default: assert(0); } now=cpu();tt=now-tim0; last=(tt>maxt); pri=(cv<bv||tt>=t1||last); if(cv<bv){ns=0;tim1=now;nit=0;bv=cv;} else ns++; if(pri||ns==tns){ printf("%12lld %10d %6d %8.2f %8.2f %8.3g %8.3g RESULTS\n", nit,bv,ns,now-tim1,tt,(now-tim1)/ns,nit/(double)ns); t1=MAX(tt*1.1,tt+0.5); } fflush(stdout); if(last)break; } printf("Time to solution %gs, assuming true minimum is %d. Iterations/soln = %g\n",(cpu()-tim1)/ns,bv,nit/(double)ns); } int main(int ac,char**av){ int opt,wn,mode,strat,weightmode,centreflag,numpo,targenergy; double mint,maxt; char *inprobfile,*outprobfile,*outstatefile,*genfile; wn=-1;inprobfile=outprobfile=outstatefile=0;seed=time(0);seed2=0;mint=10;maxt=1e10;statemap[0]=0;statemap[1]=1; weightmode=7;centreflag=0;mode=1;N=8;strat=13;deb=1;ext=1;numpo=500;ngp=0;genfile=0;targenergy=-1000000000; while((opt=getopt(ac,av,"ce:f:m:n:N:o:O:p:P:s:S:t:T:v:w:x:X:"))!=-1){ switch(opt){ case 'c': centreflag=1;break; case 'e': targenergy=atoi(optarg);break; case 'f': genfile=strdup(optarg);break; case 'm': mode=atoi(optarg);break; case 'n': wn=atoi(optarg);break; case 'N': N=atoi(optarg);break; case 'o': outprobfile=strdup(optarg);break; case 'O': outstatefile=strdup(optarg);break; case 'p': numpo=atoi(optarg);break; case 'P': { char *l=optarg; while(1){ assert(ngp<MAXNGP);genp[ngp++]=atof(l); l=strchr(l,',');if(!l)break; l++; } } break; case 's': { char *l=optarg; seed=atoi(l); l=strchr(l,',');if(l)seed2=atoi(l+1); } break; case 'S': strat=atoi(optarg);break; case 't': mint=atof(optarg);break; case 'T': maxt=atof(optarg);break; case 'v': deb=atoi(optarg);break; case 'w': weightmode=atoi(optarg);break; case 'x': statemap[0]=atoi(optarg);break; case 'X': ext=atof(optarg);break; default: fprintf(stderr,"Usage: %s [OPTIONS] [inputproblemfile]\n",av[0]); fprintf(stderr," -c Centre energy (default false). Adds constant to energy so that energies {-1,-2,-3,...}\n"); fprintf(stderr," transform to {1,2,3,...} or {0,1,2,...} (according to parity) when you flip the spins of one\n"); fprintf(stderr," half of the bipartite graph. Used to make QUBO mode give answers comparable to Ising mode.\n"); fprintf(stderr," -e Target energy (default no target). Stop searching when find this energy.\n"); fprintf(stderr," -f general file used in some modes\n"); fprintf(stderr," -m mode of operation:\n"); fprintf(stderr," 0 Try to find minimum value by heuristic search\n"); fprintf(stderr," 1 Try to find rate of solution generation by repeated heuristic search (default)\n"); fprintf(stderr," 2 Try to find expected minimum value by heuristic search\n"); fprintf(stderr," 3 (no longer used)\n"); fprintf(stderr," 4 Consistency checks\n"); fprintf(stderr," 5 Full exhaust (proving)\n"); fprintf(stderr," -n num working nodes (default all)\n"); fprintf(stderr," -N size of Chimera graph (default 8)\n"); fprintf(stderr," -o output problem (weight) file\n"); fprintf(stderr," -O output state file\n"); fprintf(stderr," -p target number of presumed optima for -m1\n"); fprintf(stderr," -P x[,y[,z...]] general parameters, various uses in some modes\n"); fprintf(stderr," -s seed[,seed2]\n"); fprintf(stderr," -S search strategy for heuristic search (0,1,2)\n"); fprintf(stderr," 0 Exhaust K44s repeatedly\n"); fprintf(stderr," 1 Exhaust lines repeatedly\n"); fprintf(stderr," 3 Exhaust maximal treewidth 1 subgraphs\n"); fprintf(stderr," 4 Exhaust maximal treewidth 2 subgraphs\n"); fprintf(stderr," 5 Exhaust maximal treewidth 3 subgraphs\n"); fprintf(stderr," 10+n As strategy n but with partial random state init\n"); fprintf(stderr," (Default 13)\n"); fprintf(stderr," -t min run time for some modes\n"); fprintf(stderr," -T max run time for some modes\n"); fprintf(stderr," -v 0,1,2,... verbosity level\n"); fprintf(stderr," -w weight creation convention (use with the default -x0 unless otherwise stated)\n"); fprintf(stderr," 0 All of Q_ij independently +/-1\n"); fprintf(stderr," 1 As 0, but diagonal not allowed\n"); fprintf(stderr," 2 Upper triangular\n"); fprintf(stderr," 3 All of Q_ij allowed, but constrained symmetric\n"); fprintf(stderr," 4 Constrained symmetric, diagonal not allowed - the basic Ising model mode\n"); fprintf(stderr," 5 Start with Ising J_ij (i<j) and h_i IID {-1,1} and transform back to QUBO,\n"); fprintf(stderr," ignoring constant term. (Default - meant to be equivalent to McGeoch instances.)\n"); fprintf(stderr," 6 Test case\n"); fprintf(stderr," 7 Start with Ising J_ij (i<j) IID {-1,1} (aka \"no external field\") and transform\n"); fprintf(stderr," back to QUBO form\n"); fprintf(stderr," 8 Start with Ising J_ij (i<j) IID uniform in {-100,-99,...,100} and\n"); fprintf(stderr," transform back to QUBO.\n"); fprintf(stderr," 10 Start with Ising J_ij (i<j) IID uniform in {-n,-n+1,...,n} where n=100 intra-K_44,\n"); fprintf(stderr," n=220 inter-K_44, then transform back to QUBO.\n"); fprintf(stderr," 11 Start with Ising J_ij {i<j} IID uniform on {-n,...,-1,1,...,n} where n=7, then\n"); fprintf(stderr," transform back to QUBO.\n"); fprintf(stderr," 12 True Ising mode J_ij {i<j} IID uniform on {-n,...,-1,1,...,n} where n=7 (use with -x-1)\n"); fprintf(stderr," -x set the lower state value\n"); fprintf(stderr," Default 0 corresponds to QUBO state values in {0,1}\n"); fprintf(stderr," Other common option is -1, corresponding to Ising model state values in {-1,1}\n"); exit(1); } } if(wn<0)wn=NV; else assert(wn<=NV); if(optind<ac)inprobfile=strdup(av[optind]); printf("N=%d\n",N); printf("Mode: %d\n",mode); printf("Seed: %d\n",seed); if(seed2)printf("Seed2: %d\n",seed2); printf("Search strategy: %d\n",strat); if(ngp>0){int i;printf("General parameters:");for(i=0;i<ngp;i++)printf(" %g",genp[i]);printf("\n");} Q=(int(*)[4][7])malloc(NBV*4*7*sizeof(int)); adj=(int(*)[4][7][2])malloc(NBV*4*7*2*sizeof(int)); okv=(int(*)[4])malloc(NBV*4*sizeof(int)); XBplus=(int*)calloc((N+2)*N*2*sizeof(int),1); XBa=XBplus+N*2; QBa=(intqba(*)[3][16][16])malloc(NBV*3*16*16*sizeof(intqba)); ok=(int(*)[16])malloc((NBV+1)*16*sizeof(int)); nok=(int*)malloc((NBV+1)*sizeof(int)); ok2=(int(*)[256])malloc((N*N+1)*256*sizeof(int)); nok2=(int*)malloc((N*N+1)*sizeof(int)); inittiebreaks(); initrand(seed); initgraph(wn); if(inprobfile){ wn=readweights(inprobfile,centreflag);// This overrides current setting of wn }else{ initweights(weightmode,centreflag);printf("Initialising random weight matrix with %d working node%s\n",wn,wn==1?"":"s"); } if(seed2)initrand(random()+seed2); if(0){ int i,j,k; for(i=0;i<N;i++)for(j=0;j<16;j++)for(k=0;k<16;k++)printf("QB(%d,0,0,0,%d,%d)=%d\n",i,j,k,QB(i,0,0,0,j,k)); for(i=0;i<N-1;i++)for(j=0;j<16;j++)for(k=0;k<16;k++)printf("QB(%d,0,0,2,%d,%d)=%d\n",i,j,k,QB(i,0,0,2,j,k)); } printf("%d working node%s out of %d\n",wn,wn==1?"":"s",NV); printf("States are %d,%d\n",statemap[0],statemap[1]); printf("Weight-choosing mode: %d\n",weightmode); if(targenergy>-1000000000)printf("Target energy: %d\n",targenergy); if(outprobfile){writeweights(outprobfile);printf("Wrote weight matrix to file \"%s\"\n",outprobfile);} switch(mode){ case 0:// Find minimum value using heuristic strategy strat, not worrying about independence of subsequent minima opt1(mint,maxt,deb,1,0,strat,1000000000,targenergy); break; case 1:;// Find rate of solution generation, ensuring that minima are independent { int v; double tts; v=opt1(0.5,maxt,deb,numpo,&tts,strat,ngp>0?genp[0]:1000000000,targenergy); printf("Time to solution %gs, assuming true minimum is %d\n",tts,v); break; } case 2:;// Find average minimum value { int v; double s0,s1,s2,va; s0=s1=s2=0; while(1){ initweights(weightmode,centreflag); v=opt1(0,maxt,0,500,0,strat,1000000000,targenergy); s0+=1;s1+=v;s2+=v*v;va=(s2-s1*s1/s0)/(s0-1); printf("%12g %12g %12g %12g\n",s0,s1/s0,sqrt(va),sqrt(va/s0)); } } break; case 4:;// Consistency checks { opt1(mint,maxt,1,1,0,strat,1000000000,targenergy); printf("Full exhaust %d\n",stripexhaust(0,0,N,0)); int o,v,c0,c1; for(o=0;o<2;o++)for(c0=0;c0<N;c0++)for(c1=c0+1;c1<=N;c1++){ v=stripexhaust(o,c0,c1,0)+stripval(o,0,c0)+stripval(o,c1,N); printf("Strip %2d %2d %2d %6d\n",o,c0,c1,v); } break; } case 5:// Prove using subset method v2 { int v; double t0; printf("Restricted set exhaust\n"); t0=cpu(); v=fullexhaust(); printf("Optimum %d found in %gs\n",v,cpu()-t0); break; } case 6: readstate("state");printf("state = %d\n",val()); break; case 8:// timing tests timingtests(strat,mint,maxt); break; case 9: consistencychecks2(weightmode,centreflag,strat,mint,maxt); break; case 10: { int c,r,f[N-1][16][16],g[N-1][16][16]; if(0){ int XBa0[NBV],ok0[NBV][16],nok0[NBV],ok20[N*N][256],nok20[N*N]; intqba QBa0[NBV][3][16][16]; init_state(); memcpy(XBa0,XBa,sizeof(XBa0));memcpy(QBa0,QBa,sizeof(QBa0)); memcpy(ok0,ok,sizeof(ok0));memcpy(nok0,nok,sizeof(nok0)); memcpy(ok20,ok2,sizeof(ok20));memcpy(nok20,nok2,sizeof(nok20)); for(r=0;r<N;r++){ printf("Comb at row %2d\n",r); combLB2(r,f); applyam(2,XBa0,QBa0,ok0,nok0,ok20,nok20); combLB(r,2,(int*)g); applyam(0,XBa0,QBa0,ok0,nok0,ok20,nok20); if(1)for(c=0;c<N-1;c++){ printf("MEMCMP %d\n",memcmp(f[N-2-c],g[c],16*16)); if(0){ pr16(f[N-2-c]);printf("\n"); pr16(g[c]); printf("\n---------------------\n\n"); } } printf("\n"); } } { int w,v0; init_state(); v0=lin2LB(); printf("lin2 = %d\n",v0); for(w=2;w<=MIN(N,2);w++){ printf("lin(%d) =",w);fflush(stdout); v0=linLB(w); printf(" %d\n",v0); } } } break; case 11: { int d,i,m,r,v,w,dmax; int64 b,n; double s1; init_state(); //opt1(mint,maxt,deb,1,0,strat,1000000000); for(r=0;r<N;r++){ n=1LL<<(4*N); int g[n]; combLB(r,N,g); for(w=1;w<N;w++){ m=1LL<<(4*w); int f[(N-w+1)*m]; combLB(r,w,f); s1=0;dmax=0; for(b=0;b<n;b++){ v=0; for(i=0;i<=N-w;i++)v+=f[(i<<(4*w))+((b>>(4*i))&(m-1))]; d=g[b]-v;assert(d>=0); //if(w==N-1&&d>0)printf("%d ",d); s1+=d;if(d>dmax)dmax=d; //printf("%10d %10d %10d\n",g[b],v,g[b]-v); } printf("%3d %3d %8d %12g\n",r,w,dmax,s1/n); } }//r } break; case 12: { int i,n,o,t,x,y,nb[16],stats[N][strat][256]; n=0; for(x=0;x<N;x++)for(y=0;y<strat;y++)for(i=0;i<256;i++)stats[x][y][i]=0; for(i=1,nb[0]=0;i<16;i++)nb[i]=nb[i>>1]+(i&1); while(1){ for(x=0,t=0;x<N;x++){XB(x,strat,1)=randnib();t+=nb[XB(x,strat,1)];} if(t>2*N)for(x=0;x<N;x++)XB(x,strat,1)^=15; stripexhaust(1,0,strat,1); for(x=0;x<N;x++)for(y=0;y<strat;y++)stats[x][y][XB(x,y,0)<<4|XB(x,y,1)]++; n++; if(0){ for(y=strat-1;y>=0;y--){ for(x=0;x<N;x++){printf(" ");for(o=0;o<2;o++)printf("%X",XB(x,y,o));} printf("\n"); } printf("\n"); } if(n%10==0){ for(y=strat-1;y>=0;y--){ for(x=0;x<N;x++){ //double p,s; //for(i=0,s=0;i<256;i++)if(stats[x][y][i]){p=stats[x][y][i]/(double)n;s-=p*log(p);} //printf(" %7.3f",s/log(2)); for(i=0,t=0;i<256;i++)if(stats[x][y][i])t++; printf(" %3d",t); } printf("\n"); } printf("\n"); } } } break; case 13: gibbstests(weightmode); break; case 14: binderparamestimate(weightmode,centreflag); break; case 15: findexchangemontecarlotemperatureset(); break; case 16: calcbinderratio(weightmode,centreflag); break; case 17: findeqbmusingchisq(weightmode); break; case 18: // See how long it takes to equilibrate, by measuring error in top beta estimate of <E> compared with best found energy // -P<submode:0=tree,1=vertex>,<prlevel>,<allowable abs error>, // <beta or -r for default set for this N with the first/hottest r missing>,<max equilbration size>, // <start eqb size>,<initial vmin> findeqbmusingtopbeta(weightmode); break; case 19: pertandgibbs(genp[0]==0,genp[1],genp[2],genp[3]);// treemode,beta,pert,target energy break; case 20: opt3(weightmode,genp[0]==0,genp[1],genp[2],ngp>3?genp[3]:1000000000,numpo);// treemode,beta,pert[,initial target] break; case 21: findspectrum(weightmode,genp[0]==0,genfile,deb); break; case 22: wanglandau(weightmode); break; case 23: countgroundstates(weightmode,numpo,strat); break; case 24: findspectrum_ds(weightmode,genp[0]==0,genfile,deb); break; case 25: // Optimise using EMC // -P<submode:0=tree,1=vertex>,<-r to use set of betas with the first/hottest r missing>, // <initial bv> (optional), max time (optional), TlogT flag (optional). // Also uses tns = total number of solutions opt4(weightmode,deb,numpo,genp[0]==0,genp[1],ngp>2?genp[2]:1000000000,ngp>3?genp[3]:1e8,ngp>4?genp[4]:0); break; case 26: // Simulated Quantum Annealing SQA(weightmode,genp[0]==0,genp[1],genp[2]); // genp[] = ~tree, beta, K=#imag time steps break; case 27: // Pseudo parallel tempering // -P<initial bv>, max time, 1=pptb/2=pptc, ttp, pp0, // decay(pptb)/itnl(pptc), update-interval(pptb)/update-density(pptc), nlthr(pptc) // Also uses tns = total number of solutions ppt(weightmode,strat,numpo,ngp>0?genp[0]:1000000000,ngp>1?genp[1]:1e100); break; }// mode prtimes(); if(outstatefile)writestate(outstatefile); return 0; }
atomic_messages.c
// RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 %s // RUN: %clang_cc1 -verify -fopenmp-simd -ferror-limit 100 %s int foo() { L1: foo(); #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} { foo(); goto L1; // expected-error {{use of undeclared label 'L1'}} } goto L2; // expected-error {{use of undeclared label 'L2'}} #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} { foo(); L2: foo(); } return 0; } struct S { int a; }; int readint() { int a = 0, b = 0; // Test for atomic read #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected lvalue expression}} a = 0; #pragma omp atomic read a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} #pragma omp atomic read read a = b; return 0; } int readS() { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} expected-error@+1 {{unexpected OpenMP clause 'allocate' in directive '#pragma omp atomic'}} #pragma omp atomic read read allocate(a) // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; } int writeint() { int a = 0, b = 0; // Test for atomic write #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic write a = 0; #pragma omp atomic write a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write a = b; return 0; } int writeS() { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; } int updateint() { int a = 0, b = 0; // Test for atomic update #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected built-in binary or unary operator}} foo(); #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected built-in binary operator}} a = b; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} a = b || a; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} a = a && b; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = (float)a + b; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = 2 * b; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = b + *&a; #pragma omp atomic update *&a = *&a + 2; #pragma omp atomic update a++; #pragma omp atomic ++a; #pragma omp atomic update a--; #pragma omp atomic --a; #pragma omp atomic update a += b; #pragma omp atomic a %= b; #pragma omp atomic update a *= b; #pragma omp atomic a -= b; #pragma omp atomic update a /= b; #pragma omp atomic a &= b; #pragma omp atomic update a ^= b; #pragma omp atomic a |= b; #pragma omp atomic update a <<= b; #pragma omp atomic a >>= b; #pragma omp atomic update a = b + a; #pragma omp atomic a = a * b; #pragma omp atomic update a = b - a; #pragma omp atomic a = a / b; #pragma omp atomic update a = b & a; #pragma omp atomic a = a ^ b; #pragma omp atomic update a = b | a; #pragma omp atomic a = a << b; #pragma omp atomic a = b >> a; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'update' clause}} #pragma omp atomic update update a /= b; return 0; } int captureint() { int a = 0, b = 0, c = 0; // Test for atomic capture #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected compound statement}} ; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} foo(); #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected built-in binary or unary operator}} a = b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = b || a; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} b = a = a && b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = (float)a + b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = 2 * b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = b + *&a; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected exactly two expression statements}} { a = b; } #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected exactly two expression statements}} {} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of the first expression}} {a = b;a = b;} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}} // expected-note@+1 {{expected in right hand side of the first expression}} {a = b; a = b || a;} #pragma omp atomic capture {b = a; a = a && b;} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = (float)a + b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = 2 * b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = b + *&a; #pragma omp atomic capture c = *&a = *&a + 2; #pragma omp atomic capture c = a++; #pragma omp atomic capture c = ++a; #pragma omp atomic capture c = a--; #pragma omp atomic capture c = --a; #pragma omp atomic capture c = a += b; #pragma omp atomic capture c = a %= b; #pragma omp atomic capture c = a *= b; #pragma omp atomic capture c = a -= b; #pragma omp atomic capture c = a /= b; #pragma omp atomic capture c = a &= b; #pragma omp atomic capture c = a ^= b; #pragma omp atomic capture c = a |= b; #pragma omp atomic capture c = a <<= b; #pragma omp atomic capture c = a >>= b; #pragma omp atomic capture c = a = b + a; #pragma omp atomic capture c = a = a * b; #pragma omp atomic capture c = a = b - a; #pragma omp atomic capture c = a = a / b; #pragma omp atomic capture c = a = b & a; #pragma omp atomic capture c = a = a ^ b; #pragma omp atomic capture c = a = b | a; #pragma omp atomic capture c = a = a << b; #pragma omp atomic capture c = a = b >> a; #pragma omp atomic capture { c = *&a; *&a = *&a + 2;} #pragma omp atomic capture { *&a = *&a + 2; c = *&a;} #pragma omp atomic capture {c = a; a++;} #pragma omp atomic capture {c = a; (a)++;} #pragma omp atomic capture {++a;c = a;} #pragma omp atomic capture {c = a;a--;} #pragma omp atomic capture {--a;c = a;} #pragma omp atomic capture {c = a; a += b;} #pragma omp atomic capture {c = a; (a) += b;} #pragma omp atomic capture {a %= b; c = a;} #pragma omp atomic capture {c = a; a *= b;} #pragma omp atomic capture {a -= b;c = a;} #pragma omp atomic capture {c = a; a /= b;} #pragma omp atomic capture {a &= b; c = a;} #pragma omp atomic capture {c = a; a ^= b;} #pragma omp atomic capture {a |= b; c = a;} #pragma omp atomic capture {c = a; a <<= b;} #pragma omp atomic capture {a >>= b; c = a;} #pragma omp atomic capture {c = a; a = b + a;} #pragma omp atomic capture {a = a * b; c = a;} #pragma omp atomic capture {c = a; a = b - a;} #pragma omp atomic capture {a = a / b; c = a;} #pragma omp atomic capture {c = a; a = b & a;} #pragma omp atomic capture {a = a ^ b; c = a;} #pragma omp atomic capture {c = a; a = b | a;} #pragma omp atomic capture {a = a << b; c = a;} #pragma omp atomic capture {c = a; a = b >> a;} #pragma omp atomic capture {c = a; a = foo();} // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'capture' clause}} #pragma omp atomic capture capture b = a /= b; return 0; }
cross_entropy_loss.c
#include <cdnn/model.h> #include <cdnn/loss.h> extern __Model__ * m; loss_layer * loss_layer_ = NULL; void forward_pass_L2_LOSS(){ int number_of_examples = m->y_train_mini_batch[m->current_mini_batch]->shape[1]; // int number_of_examples = m->Y_train->shape[1]; dARRAY * loss = NULL; dARRAY * Y = m->y_train_mini_batch[m->current_mini_batch]; // dARRAY * Y = m->Y_train; int act_dims[] = {m->output->shape[0],m->output->shape[1]}; if(!strcasecmp(m->current_layer->prev_layer->DENSE->activation,"softmax")){ dARRAY * log_y_hat= NULL; log_y_hat = (dARRAY *)malloc(sizeof(dARRAY)); log_y_hat->matrix = (float*)calloc(act_dims[0]*act_dims[1],sizeof(float)); for(int i=0;i<act_dims[0]*act_dims[1];i++){ log_y_hat->matrix[i] = log(m->output->matrix[i])+m->epsilon; } log_y_hat->shape[0] = act_dims[0]; log_y_hat->shape[1] = act_dims[1]; dARRAY * loss_term_temp = multiply(Y,log_y_hat); free2d(log_y_hat); log_y_hat = NULL; loss = sum(loss_term_temp,0); free2d(loss_term_temp); loss_term_temp = NULL; } else{ dARRAY * log_a = NULL; log_a = (dARRAY*)malloc(sizeof(dARRAY)); log_a->matrix = (float*)calloc(act_dims[0]*act_dims[1],sizeof(float)); // #pragma omp parallel for num_threads(8) for(int i=0;i<act_dims[0]*act_dims[1];i++){ log_a->matrix[i] = log(m->output->matrix[i]) + m->epsilon; } log_a->shape[0] = act_dims[0]; log_a->shape[1] = act_dims[1]; dARRAY * temp_ones = ones(act_dims); dARRAY * temp_sub = subtract(temp_ones,m->output); dARRAY * log_one_y_hat = NULL; log_one_y_hat = (dARRAY*)malloc(sizeof(dARRAY)); log_one_y_hat->matrix = (float*)calloc(act_dims[0]*act_dims[1],sizeof(float)); // #pragma omp parallel for num_threads(8) for(int i=0;i<act_dims[0]*act_dims[1];i++){ log_one_y_hat->matrix[i] = log(temp_sub->matrix[i]) + m->epsilon; } log_one_y_hat->shape[0] = act_dims[0]; log_one_y_hat->shape[1] = act_dims[1]; free2d(temp_sub); temp_sub = NULL; dARRAY * loss_term_1 = multiply(Y,log_a); free2d(log_a); log_a = NULL; temp_sub = subtract(temp_ones,Y); free2d(temp_ones); temp_ones = NULL; dARRAY * loss_term_2 = multiply(temp_sub,log_one_y_hat); free2d(temp_sub); temp_sub = NULL; free2d(log_one_y_hat); log_one_y_hat=NULL; loss = add(loss_term_1,loss_term_2); free2d(loss_term_1); free2d(loss_term_2); loss_term_1 = NULL; loss_term_2 = NULL; } // dARRAY * sum_of_losses_arr = (dARRAY*)malloc(sizeof(dARRAY)); // sum_of_losses_arr->matrix = (float*)calloc(1,sizeof(float)); // sum_of_losses_arr->shape[0] = 1; // sum_of_losses_arr->shape[1] = 1; // cblas_saxpy(loss->shape[0]*loss->shape[1],1.f,loss->matrix,0,sum_of_losses_arr->matrix,0); dARRAY * sum_of_losses_arr = sum(loss,1); float sum_of_losses = sum_of_losses_arr->matrix[0]; free2d(sum_of_losses_arr); free2d(loss); sum_of_losses_arr = NULL; loss = NULL; float cost = 0.0f; float cross_entropy_cost = -1 * sum_of_losses/(float)number_of_examples; float reg_cost=0.0; Computation_Graph * temp = NULL; if(m->regularization!=NULL){ temp = m->graph->next_layer; if(!strcasecmp(m->regularization,"L2")){ float layer_frobenius = 0.0; while(temp->next_layer->type!=LOSS){ layer_frobenius += frobenius_norm(temp->DENSE->weights); temp = temp->next_layer; } reg_cost = m->lambda*layer_frobenius/(2.0*number_of_examples); } else if(!strcasecmp(m->regularization,"L1")){ float layer_manhattan = 0.0; while(temp->next_layer->type!=LOSS){ layer_manhattan += Manhattan_distance(temp->DENSE->weights); temp = temp->next_layer; } reg_cost = m->lambda * layer_manhattan/(float)(2.0*number_of_examples); } cost = cross_entropy_cost + reg_cost; temp = NULL; m->iter_cost = cost; } else{ cost = cross_entropy_cost; m->iter_cost = cost; } } void backward_pass_L2_LOSS(){ int act_dims[] = {m->output->shape[0],m->output->shape[1]}; if(!strcasecmp(m->current_layer->prev_layer->DENSE->activation,"softmax")){ dARRAY * temp = divison(m->y_train_mini_batch[m->current_mini_batch],m->output); // dARRAY * temp = divison(m->Y_train,m->output); dARRAY * class_sum = sum(temp,0); free2d(temp); temp = NULL; loss_layer_->grad_out = mulScalar(class_sum,-1.0); free2d(class_sum); class_sum = NULL; } else{ dARRAY * one = ones(act_dims); dARRAY * temp1 = subtract(m->y_train_mini_batch[m->current_mini_batch],m->output); // dARRAY * temp1 = subtract(m->Y_train,m->output); dARRAY * temp2 = subtract(one,m->output); dARRAY * temp3 = multiply(m->output,temp2); free2d(one); one = NULL; dARRAY * lgrad2 = divison(temp1,temp3); free2d(temp1); free2d(temp2); free2d(temp3); temp1 = NULL; temp2 = NULL; temp3 = NULL; loss_layer_->grad_out = mulScalar(lgrad2,-1.0f); free2d(lgrad2); lgrad2 = NULL; } } void (cross_entropy_loss)(loss_args args){ loss_layer_ = (loss_layer*)malloc(sizeof(loss_layer)); loss_layer_->cost = 0.0; loss_layer_->grad_out = NULL; loss_layer_->forward = forward_pass_L2_LOSS; loss_layer_->backward = backward_pass_L2_LOSS; // loss_layer->gnd_truth = m->y_train_mini_batch[m->current_mini_batch]; append_graph(loss_layer_,"loss"); }
factorize_gmp.c
/***************************************************************************************************************** * Compiling: mpicc fattor.c -lgmp -fopenmp -o fattor * Running: mpirun -n PROCNUM --bind-to none fattor NUMBER * Note: PROCNUM is the number of processes that will be ran, and it must be >=2, NUMBER is the number to factorize *****************************************************************************************************************/ #include <mpi.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <gmp.h> struct elem { // Very basic and non-reusable stack mpz_t val; struct elem* next; }; void add(struct elem** head, mpz_t val) { struct elem* app = malloc(sizeof(struct elem)); mpz_init(app->val); mpz_set(app->val, val); // app->val = val; app->next = *head; *head = app; } void pick(struct elem** head, mpz_t toret) { mpz_init(toret); struct elem* app; if(*head == NULL) mpz_set_ui(toret, 0); // toret = 0; else { mpz_set(toret, (*head)->val); // toret = (*head)->val; app = *head; *head = (*head)->next; // mpz_finalize(app->val); free(app); } } void master_procedure(int comm_size) { int i = 1; long long rec; int shit_happened; unsigned char buffer[50]; MPI_Status stat; int count; mpz_t received_number; mpz_init(received_number); char stringa[200]; while(i < comm_size) { shit_happened = MPI_Recv(buffer, 50, MPI_UNSIGNED_CHAR, i, MPI_ANY_TAG, MPI_COMM_WORLD, &stat); MPI_Get_count(&stat, MPI_UNSIGNED_CHAR, &count); mpz_import(received_number, count, 1, 1, 1, 0, buffer); if(shit_happened) { fprintf(stderr, "Recv failed"); MPI_Abort(MPI_COMM_WORLD, 1); } if(mpz_cmp_ui(received_number, 0) == 0) // if(received_number == 0) ++i; else { mpz_get_str(stringa, 10, received_number); printf("Factor: %s\n", stringa); } } } void slave_procedure(int my_rank, int comm_size, mpz_t the_number) { int shit_happened; struct elem* head = NULL; unsigned char* buffer; mpz_t temp; mpz_t from; mpz_t to; mpz_t to_send; mpz_init(temp); mpz_init(from); mpz_init(to); mpz_init(to_send); mpz_root(temp, the_number, 2); // temp = sqrt(the_number); mpz_div_ui(temp, temp, comm_size - 1); // temp = temp / (comm_size - 1); mpz_mul_ui(from, temp, my_rank - 1); // from = temp * (my_rank - 1); mpz_mul_ui(to, temp, my_rank); // to = temp * my_rank; mpz_cmp_ui(from, 0) ? : mpz_set_ui(from, 1); // from == 0 ? from = 1 : ; #pragma omp parallel shared(from, to) { int my_thread = omp_get_thread_num(); int threads = omp_get_num_threads(); mpz_t from_thread; mpz_t to_thread; mpz_t divided; mpz_init(from_thread); mpz_init(to_thread); mpz_init(divided); mpz_sub(to_thread, to, from); // to_thread = to - from; mpz_set(from_thread, to_thread); // from_thread = to_thread; mpz_div_ui(to_thread, to_thread, threads); // to_thread = to_thread / threads; mpz_mul_ui(to_thread, to_thread, my_thread + 1); // to_thread = to_thread * (my_thread + 1); mpz_div_ui(from_thread, from_thread, threads); // from_thread = from_thread / threads; mpz_mul_ui(from_thread, from_thread, my_thread); // from_thread = from_thread * my_thread; mpz_add(from_thread, from_thread, from); // from_thread = from_thread + from; mpz_add(to_thread, to_thread, from); // to_thread = to_thread + from; while(mpz_cmp(from_thread, to_thread) <= 0) { if(mpz_divisible_p(the_number, from_thread)) { mpz_divexact(divided, the_number, from_thread); // divided = the_number / from_thread; // Only works if the_number % from_thread == 0; #pragma omp critical { add(&head, from_thread); add(&head, divided); } } mpz_add_ui(from_thread, from_thread, 1); // ++from_thread; } } // TODO IMPORTANT: make work with gmp do { pick(&head, to_send); int how_many_bytes = (mpz_sizeinbase(to_send, 2) + 7) / 8; // How many bytes is to_send buffer = malloc(how_many_bytes); *buffer = 0; mpz_export(buffer, NULL, 1, 1, 1, 0, to_send); // Export the number to buffer shit_happened = MPI_Send(buffer, how_many_bytes, MPI_UNSIGNED_CHAR, 0, 0, MPI_COMM_WORLD); if(shit_happened) { fprintf(stderr, "Send failed"); MPI_Abort(MPI_COMM_WORLD, 1); } free(buffer); }while(mpz_cmp_ui(to_send, 0)); } int main(int argc, char** argv) { int my_rank, comm_size; mpz_t the_number; mpz_init(the_number); MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &comm_size); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if(argc <= 1) { fprintf(stderr, "Missing number as argument"); MPI_Abort(MPI_COMM_WORLD, 1); } else mpz_set_str(the_number, argv[1], 10); // 10 is the base if(my_rank == 0) master_procedure(comm_size); else slave_procedure(my_rank, comm_size, the_number); MPI_Finalize(); return 0; }
chisquare.h
/* This file is part of Mitsuba, a physically based rendering system. Copyright (c) 2007-2014 by Wenzel Jakob and others. Mitsuba is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License Version 3 as published by the Free Software Foundation. Mitsuba is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. */ #pragma once #if !defined(__MITSUBA_CORE_CHISQUARE_H_) #define __MITSUBA_CORE_CHISQUARE_H_ #include <mitsuba/render/common.h> #include <boost/tuple/tuple.hpp> #include <boost/function.hpp> MTS_NAMESPACE_BEGIN /// Minimum expected cell frequency. Cells below this value will be pooled #define CHISQR_MIN_EXP_FREQUENCY 5 /** * \brief Chi-square goodness-of-fit test on the sphere * * This class performs a chi-square goodness-of-fit test of the null hypothesis * that a specified sampling procedure produces samples that are distributed * according to a supplied density function. This is very useful to verify BRDF * and phase function sampling codes for their correctness. Currently, it * supports both 2D and discrete sampling methods and mixtures thereof. * * This implementation works by generating a large batch of samples, which are * then accumulated into rectangular bins in spherical coordinates. To obtain * reference bin counts, the provided density function is numerically * integrated over the area of each bin. Comparing the actual and reference * bin counts yields the desired test statistic. * * Given a probability distribution with the following interface * * \code * class MyDistribution { * // Sample a (optionally weighted) direction. A non-unity weight * // in the return value is needed when the sampling distribution * // doesn't exactly match the implementation in pdf() * boost::tuple<Vector, Float, EMeasure> generateSample() const; * * /// Compute the probability density for the specified direction and measure * Float pdf(const Vector &direction, EMeasure) const; * }; * \endcode * * the code in this class might be used as follows * * \code * MyDistribution myDistrInstance; * ChiSquare chiSqr; * * // Initialize the tables used by the chi-square test * chiSqr.fill( * boost::bind(&MyDistribution::generateSample, myDistrInstance), * boost::bind(&MyDistribution::pdf, myDistrInstance, _1, _2) * ); * * // Optional: dump the tables to a MATLAB file for external analysis * chiSqr.dumpTables("debug.m"); * * if (!chiSqr.runTest()) * Log(EError, "Uh oh -- test failed, the implementation is probably incorrect!"); * \endcode * \ingroup libcore */ class MTS_EXPORT_CORE ChiSquare : public Object { public: /// Possible outcomes in \ref runTest() enum ETestResult { /// The null hypothesis was rejected EReject = 0, /// The null hypothesis was accepted EAccept = 1, /// The degrees of freedom were too low ELowDoF = 2 }; /** * \brief Create a new Chi-square test instance with the given * resolution and sample count * * \param thetaBins * Number of bins wrt. latitude. The default is 10 * * \param phiBins * Number of bins wrt. azimuth. The default is to use * twice the number of \c thetaBins * * \param numTests * Number of independent tests that will be performed. This * is used to compute the Sidak-correction factor. * * \param sampleCount * Number of samples to be used when computing the bin * values. The default is \c thetaBins*phiBins*5000 */ ChiSquare(int thetaBins = 10, int phiBins = 0, int numTests = 1, size_t sampleCount = 0); /// Get the log level inline ELogLevel getLogLevel() const { return m_logLevel; } /// Set the log level inline void setLogLevel(ELogLevel logLevel) { m_logLevel = logLevel; } /** * \brief Set the tolerance threshold for bins with very low * aggregate probabilities * * When the Chi-square test integrates the supplied probability * density function over the support of a bin and determines that * the aggregate bin probability is zero, the test would ordinarily * fail if as much as one sample is placed in that bin in the * subsequent sampling step. However, due to various numerical * errors in a system based on finite-precision arithmetic, it * may be a good idea to tolerate at least a few samples without * immediately rejecting the null hypothesis. This parameter * sets this threshold. The default value is \c number-of-samples*1e-4f */ inline void setTolerance(Float tolerance) { m_tolerance = tolerance; } /** * \brief Fill the actual and reference bin counts * * Please see the class documentation for a description * on how to invoke this function */ void fill( const boost::function<boost::tuple<Vector, Float, EMeasure>()> &sampleFn, const boost::function<Float (const Vector &, EMeasure)> &pdfFn); /** * \brief Dump the bin counts to a file using MATLAB format */ void dumpTables(const fs::path &filename); /** * \brief Perform the actual chi-square test * * \param pvalThresh * The implementation will reject the null hypothesis * when the computed p-value lies below this parameter * (default: 0.01f) * * \return A status value of type \ref ETestResult */ ETestResult runTest(Float pvalThresh = 0.01f); MTS_DECLARE_CLASS() protected: /// Release all memory virtual ~ChiSquare(); /// Functor to evaluate the pdf values in parallel using OpenMP static void integrand( const boost::function<Float (const Vector &, EMeasure)> &pdfFn, size_t nPts, const Float *in, Float *out) { #if defined(MTS_OPENMP) #pragma omp parallel for #endif for (int i=0; i<(int) nPts; ++i) out[i] = pdfFn(sphericalDirection(in[2*i], in[2*i+1]), ESolidAngle) * std::sin(in[2*i]); } private: ELogLevel m_logLevel; Float m_tolerance; int m_thetaBins, m_phiBins; int m_numTests; size_t m_sampleCount; Float *m_table; Float *m_refTable; }; MTS_NAMESPACE_END #endif /* __MITSUBA_CORE_CHISQUARE_H_ */
hashtable.impl.h
/* * 'hashtable.impl.h' * This file is part of the "trinity" project. * (https://github.com/hobywan/trinity) * Copyright 2016, Hoby Rakotoarivelo * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once /* -------------------------------------------------------------------------- */ namespace trinity { /* -------------------------------------------------------------------------- */ template <typename type_t, typename flag_t> Hashtable<type_t, flag_t>::Hashtable(size_t table_size, size_t bucket_size, size_t bucket_stride) { assert(table_size); assert(bucket_size); assert(bucket_stride); nb_cores = omp_get_max_threads(); size = table_size; capacity = bucket_size; stride = bucket_stride; offset = new int[table_size]; bucket = new type_t* [table_size]; #pragma omp parallel for for (unsigned i = 0; i < table_size; ++i) { bucket[i] = new type_t[capacity]; } } /* -------------------------------------------------------------------------- */ template <typename type_t, typename flag_t> Hashtable<type_t, flag_t>::~Hashtable() { for (unsigned i = 0; i < size; ++i) { delete[] bucket[i]; } delete[] bucket; delete[] offset; } /* -------------------------------------------------------------------------- */ template <typename type_t, typename flag_t> type_t Hashtable<type_t, flag_t>::generateKey(type_t i, type_t j, size_t scale) const { assert(scale); assert(nb_cores); auto min_key = (uint32_t) std::min(i, j); return (type_t) tools::hash(min_key) % (scale * nb_cores); } /* -------------------------------------------------------------------------- */ template <typename type_t, typename flag_t> size_t Hashtable<type_t, flag_t>::getCapacity() const { return size; } /* -------------------------------------------------------------------------- */ template <typename type_t, typename flag_t> void Hashtable<type_t, flag_t>::push(type_t key, const std::initializer_list<type_t>& val) { assert(val.size() == stride); auto j = sync::fetchAndAdd(offset + key, (int) stride); assert((j + stride) < capacity); for (unsigned i = 0; i < stride; ++i) bucket[key][j + i] = *(val.begin() + i); } /* -------------------------------------------------------------------------- */ template <typename type_t, typename flag_t> type_t Hashtable<type_t, flag_t>::getValue(type_t v1, type_t v2, bool use_hash) const { assert(stride == 2); type_t key = (use_hash ? generateKey(v1,v2) : std::min(v1, v2)); type_t hint = std::max(v1, v2); for (int k = 0; k < offset[key] - 1; k += 2) { if (bucket[key][k] == hint) { return bucket[key][k + 1]; } } return (type_t) -1; // not found } /* -------------------------------------------------------------------------- */ template <typename type_t, typename flag_t> void Hashtable<type_t, flag_t>::reset() { #pragma omp for for (unsigned i = 0; i < size; ++i) { std::memset(bucket[i], -1, capacity * sizeof(int)); } #pragma omp for for (unsigned i = 0; i < size; ++i) { offset[i] = 0; } } /* -------------------------------------------------------------------------- */ } // namespace trinity
mandelbrot.c
/* To compile: gcc -O3 -o mandelbrot mandelbrot.c png_util.c -I. -lpng -lm -fopenmp Or just type: module load gcc make To create an image with 4096 x 4096 pixels (last argument will be used to set number of threads): ./mandelbrot 4096 4096 1 */ #include <math.h> #include <stdio.h> #include <stdlib.h> #include "png_util.h" // Q2a: add include for OpenMP header file here: #include <omp.h> #define MXITER 1000 typedef struct { double r; double i; }complex_t; // return iterations before z leaves mandelbrot set for given c int testpoint(complex_t c){ int iter; complex_t z; double temp; z = c; for(iter=0; iter<MXITER; iter++){ temp = (z.r*z.r) - (z.i*z.i) + c.r; z.i = z.r*z.i*2. + c.i; z.r = temp; if((z.r*z.r+z.i*z.i)>4.0){ return iter; } } return iter; } // perform Mandelbrot iteration on a grid of numbers in the complex plane // record the iteration counts in the count array void mandelbrot(int Nre, int Nim, complex_t cmin, complex_t cmax, float *count){ int n,m; complex_t c; double dr = (cmax.r-cmin.r)/(Nre-1); double di = (cmax.i-cmin.i)/(Nim-1);; // Q2c: add a compiler directive to split the outer for loop amongst threads here #pragma omp parallel for private(c,m,n) for(n=0;n<Nim;++n){ for(m=0;m<Nre;++m){ c.r = cmin.r + dr*m; c.i = cmin.i + di*n; count[m+n*Nre] = testpoint(c); } } } int main(int argc, char **argv){ // to create a 4096x4096 pixel image [ last argument is placeholder for number of threads ] // usage: ./mandelbrot 4096 4096 1 int Nre = atoi(argv[1]); int Nim = atoi(argv[2]); int Nthreads = atoi(argv[3]); // Q2b: set the number of OpenMP threads to be Nthreads here: omp_set_num_threads(Nthreads); // storage for the iteration counts float *count = (float*) malloc(Nre*Nim*sizeof(float)); // Parameters for a bounding box for "c" that generates an interesting image const float centRe = -.759856, centIm= .125547; const float diam = 0.151579; complex_t cmin; complex_t cmax; cmin.r = centRe - 0.5*diam; cmax.r = centRe + 0.5*diam; cmin.i = centIm - 0.5*diam; cmax.i = centIm + 0.5*diam; // Q2d: complete this to read time before calling mandelbrot with OpenMP API wall clock time double start; start = omp_get_wtime(); // compute mandelbrot set mandelbrot(Nre, Nim, cmin, cmax, count); // Q2d: complete this to read time after calling mandelbrot using OpenMP wall clock time double end; end = omp_get_wtime(); // print elapsed time printf("elapsed = %g\n", end-start); // output mandelbrot to png format image FILE *fp = fopen("mandelbrot.png", "w"); write_hot_png(fp, Nre, Nim, count, 0, 80); exit(0); return 0; }
parallel.c
#include <stdbool.h> #include <stdio.h> #include <math.h> #include "mpi.h" #include "ppp/ppp.h" #include "ppp_pnm/ppp_pnm.h" /** * Computes the force body j exercises on body i. * The acceleration is returned in *ax and *ay. */ static void compute(body* bodies, int i, int j, long double* ax, long double* ay) { long double aij_x, aij_y, dx, dy, r3; dx = bodies[j].x - bodies[i].x; dy = bodies[j].y - bodies[i].y; r3 = powl(sqrtl(dx * dx + dy * dy), 3); aij_x = dx / r3; aij_y = dy / r3; *ax = aij_x * bodies[j].mass; *ay = aij_y * bodies[j].mass; } /** * Updates (x,y) and (vx,vy) of body b from its * current position, velocity and acceleration. */ static void update(body* b, double deltaT, long double ax, long double ay) { long double dvx, dvy; dvx = ax * (G * deltaT); dvy = ay * (G * deltaT); b->x += (b->vx + dvx / 2) * deltaT; b->y += (b->vy + dvy / 2) * deltaT; b->vx += dvx; b->vy += dvy; } /** * Determines length and offset in a problem buffer of size <size> * for process <rank> where the total number of processes is <np>. */ static void portionize(int size, int np, int rank, int* length, int* offset) { int portion = size / np; int residue = size % np; if (rank < residue) { *length = portion + 1; *offset = (*length) * rank; } else { *length = portion; *offset = (*length) * rank + residue; } if (*length == 0) { *offset = 0; } } /** * Determines x and y accelerations exercised by body j on body i. */ void acc_foo(body* bodies, int i, int j, long double* accX, long double* accY) { long double ax, ay; compute(bodies, i, j, &ax, &ay); accX[i] += ax; accY[i] += ay; } /** * Determines x and y accelerations exercised by body j on body i and * uses those to symmetrically set accelerations of body i on body j. */ void acc_bar(body* bodies, int i, int j, long double* accX, long double* accY) { long double ax, ay; compute(bodies, i, j, &ax, &ay); accX[i] += ax; accY[i] += ay; long double ratio = bodies[i].mass / bodies[j].mass; accX[j] -= ratio * ax; accY[j] -= ratio * ay; } /** * Performs the nbody simulation in a distributed fashion using MPI and OpenMP. */ void compute_parallel(struct TaskInput* TI) { int np, self; MPI_Comm_size(MPI_COMM_WORLD, &np); MPI_Comm_rank(MPI_COMM_WORLD, &self); const bool debug = TI->debug; const long double deltaT = TI->deltaT; const int nSteps = TI->nSteps; const int imageStep = TI->imageStep; const int nBodies = TI->nBodies; body* bodies = TI->bodies; MPI_Datatype body_type, pos_type; MPI_Type_contiguous(5, MPI_LONG_DOUBLE, &body_type); MPI_Type_commit(&body_type); MPI_Type_create_indexed_block(1, 2, (int[]){1}, MPI_LONG_DOUBLE, &pos_type); MPI_Type_create_resized(pos_type, 0, sizeof(body), &pos_type); MPI_Type_commit(&pos_type); int counts[np], displs[np]; #pragma omp parallel for for (int p = 0; p < np; p++) { portionize(nBodies, np, p, &counts[p], &displs[p]); } int myoffset = displs[self]; int mylength = counts[self]; long double accX[nBodies], accY[nBodies]; for (int step = 0; step < nSteps; step++) { // save an image snapshot every <imageStep> steps if (self == 0 && imageStep > 0 && step % imageStep == 0) { saveImage(step / imageStep, bodies, nBodies); } if (self == 0 && debug) { printf("%d\r", step); } // initialize this step's accelerations #pragma omp parallel for for (int k = 0; k < nBodies; k++) { accX[k] = accY[k] = 0; } if (TI->newton3) { // implementation with Newton's third law used globally /* * reduction on whole arrays turned out to be faster than * initiating parallelism every iteration and reducing individual * fields manually into temporary local variables. */ #pragma omp parallel for reduction(+:accX,accY) for (int i = myoffset; i < myoffset + mylength / 2; i++) { for (int j = 0; j < i; j++) { acc_bar(bodies, i, j, accX, accY); } } int p = (np - 1) - self; // mirror process #pragma omp parallel for reduction(+:accX,accY) for (int i = displs[p]+counts[p]/2; i < displs[p]+counts[p]; i++) { for (int j = 0; j < i; j++) { acc_bar(bodies, i, j, accX, accY); } } // sum accelerations together over all processes MPI_Request requests[2]; MPI_Iallreduce(MPI_IN_PLACE, &accX, nBodies, MPI_LONG_DOUBLE, MPI_SUM, MPI_COMM_WORLD, &requests[0]); MPI_Iallreduce(MPI_IN_PLACE, &accY, nBodies, MPI_LONG_DOUBLE, MPI_SUM, MPI_COMM_WORLD, &requests[1]); MPI_Waitall(2, requests, MPI_STATUSES_IGNORE); } else if (TI->newton3local) { // implementation with Newton's third law for local computations #pragma omp parallel for reduction(+:accX,accY) for (int i = myoffset; i < myoffset + mylength; i++) { for (int j = 0; j < nBodies; j++) { if (j >= myoffset && j < myoffset + mylength) { if (j < i) { acc_bar(bodies, i, j, accX, accY); } } else { acc_foo(bodies, i, j, accX, accY); } } } } else { // straightforward implementation without calculation savings #pragma omp parallel for for (int i = myoffset; i < myoffset + mylength; i++) { for (int j = 0; j < nBodies; j++) { if (i == j) { continue; } acc_foo(bodies, i, j, accX, accY); } } } // update this process' associated bodies #pragma omp parallel for for (int i = myoffset; i < myoffset + mylength; i++) { update(&bodies[i], deltaT, accX[i], accY[i]); } // sync body positions over all processes MPI_Allgatherv(MPI_IN_PLACE, 0, pos_type, bodies, counts, displs, pos_type, MPI_COMM_WORLD); } // save a final snapshot if <imageStep> divides <nSteps> if (self == 0 && imageStep > 0 && nSteps % imageStep == 0) { saveImage(nSteps / imageStep, bodies, nBodies); } if (debug) { printf("\n"); } // collect final result on root if (self == 0) { MPI_Gatherv(MPI_IN_PLACE, 0, body_type, bodies, counts, displs, body_type, 0, MPI_COMM_WORLD); } else { MPI_Gatherv(bodies + myoffset, mylength, body_type, bodies, counts, displs, body_type, 0, MPI_COMM_WORLD); } // clean up custom types MPI_Type_free(&body_type); MPI_Type_free(&pos_type); }
minmax.c
#include "math.h" #include <stdio.h> #include <stdlib.h> #define _DISP #define EXP 1.6 #define THRESH 1 struct number{ int num[4]; int flag; }; struct number initarray[5040]; inline void num2p(int num,int *p){ int i; for(i=0;i<4;i++) *(p++)=0; i=3; while(num){ *(--p)=num%10; num=num/10; } } inline int check1(int * p){ int i,j; for(i=0;i<4;i++){ for(j=i+1;j<4;j++){ if(p[i]==p[j]) return 0; } } return 1; } void PreInitArray(){ int i,j; int cnt=0; int numt[4]; //struct number * arrayp=initarray; for(i=123;i<=9876;i++){ num2p(i,numt); if(check1(numt)){ initarray[cnt].flag=1; for(j=0;j<4;j++) { initarray[cnt].num[j]=numt[j]; } cnt++; } } printf("\nPre Iint Over!\n"); } void InitArray(struct number * nump){ int i,j; for(i=0;i<5040;i++){ for(j=0;j<4;j++) nump[i].num[j]=initarray[i].num[j]; nump[i].flag=1; } } inline void check2(int * num0,int *numg,int *a,int *b){ int i,j; *a=0; *b=0; for(i=0;i<4;i++){ if(num0[i]==numg[i]) (*a)++; for(j=0;j<4;j++){ if(num0[i]==numg[j]) (*b)++; } } (*b)-=(*a); } double Division(struct number * array,double cnt,int *nump){ int hist[15]={0,0,0,0,0, 0,0,0,0,0, 0,0,0,0,0}; int i; int ta,tb; for(i=0;i<5040;i++){ if(array[i].flag){ check2(array[i].num,nump,&ta,&tb); hist[ta*(11-ta)/2+tb]++; } } double div=0; for(i=0;i<13;i++){ if(hist[i]!=0) { if(div<hist[i]) div=hist[i]; } } return div; } int BestDivision(struct number * array,int count){ double best=10000*10000+0.0; int bestindex=-1; double best2=10000*10000+0.0; int bestindex2=-1; double new; int i; double cnt=0.0; for(i=0;i<5040;i++) cnt+=array[i].flag; if(cnt<1.1){ for(i=0;i<5040;i++){ if(array[i].flag) return i; } } //cnt=cnt/13.0; /*if(count<=1){ for(i=0;i<5040;i++){ if(array[i].flag){ new=Division(array,cnt,array[i].num); if(best>new){ best=new; bestindex=i; } } } return bestindex; }*/ { for(i=0;i<5040;i++){ if( array[i].flag) { new=Division(array,cnt,array[i].num); if(best>new){ best=new; bestindex=i; } } else{ new=Division(array,cnt,array[i].num); if(best2>new){ best2=new; bestindex2=i; } } } if(best2<best) return bestindex2; // printf("best min:%f\n",best); return bestindex; } } int CCguess(int * num){ int numg[4]; int cnt=0; int i; int a,b,ta,tb; int ans; struct number array[5040]; //printf("Begin Init!\n"); InitArray(array); //printf("Init Over!\n"); for(i=0;i<4;i++) numg[i]=i; while(1){ check2(num,numg,&a,&b); //printf("a:%d,b:%d\n",a,b); cnt++; if(a==4&&b==0) return cnt; if(cnt>9) return 0; for(i=0;i<5040;i++){ if(array[i].flag){ check2(array[i].num,numg,&ta,&tb); array[i].flag=(ta==a && tb==b); } } // printf("best Error\n"); ans=BestDivision(array,cnt); // printf("Error: ans:%dcnt:%d\n",ans,cnt); for(i=0;i<4;i++) numg[i]=array[ans].num[i]; } } int main(){ PreInitArray(); int i,j,cnt=0; int ans; int hist[11]; for(i=0;i<11;i++) hist[i]=0; #pragma omp parallel for for(i=0;i<5040;i++){ ans=CCguess(initarray[i].num); hist[ans]++; for(j=0;j<4;j++) printf("%d",initarray[i].num[j]); printf(",%d\n",ans); if(ans==0){ printf("\nError!\n"); //break; exit(1); } // if(i%100==0) // printf("%5d\n",i); } printf("time:"); for(j=1;j<11;j++) printf("%5d",j); printf("\n "); for(j=1;j<11;j++){ cnt+=hist[j]*j; printf("%5d",hist[j]); } printf("\naverage cnt:%12f\n",cnt/(5040+0.0)); return 1; }
convolution_pack1to4_int8.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_pack1to4_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_int8, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { int* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { int32x4_t _sum0 = vdupq_n_s32(0); const signed char* kptr = weight_data_int8.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const signed char* sptr = m.row<const signed char>(i * stride_h) + j * stride_w; for (int k = 0; k < maxk; k++) { int8x8_t _val = vdup_n_s8(sptr[space_ofs[k]]); int8x8_t _w = vld1_s8(kptr); int16x8_t _s0 = vmull_s8(_val, _w); _sum0 = vaddw_s16(_sum0, vget_low_s16(_s0)); kptr += 4; } } vst1q_s32(outptr + j * 4, _sum0); } outptr += outw * 4; } } }
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright @ 2002 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/constitute.h" #include "MagickCore/draw.h" #include "MagickCore/draw-private.h" #include "MagickCore/effect.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/identify.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/magick.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/segment.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/transform.h" #include "MagickCore/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ typedef struct _EdgeInfo { double left, right, top, bottom; } EdgeInfo; static double GetEdgeBackgroundCensus(const Image *image, const CacheView *image_view,const GravityType gravity,const size_t width, const size_t height,const ssize_t x_offset,const ssize_t y_offset, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double census; Image *edge_image; PixelInfo background, pixel; RectangleInfo edge_geometry; const Quantum *p; ssize_t y; /* Determine the percent of image background for this edge. */ switch (gravity) { case NorthWestGravity: case NorthGravity: default: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); break; } case NorthEastGravity: case EastGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); break; } case SouthEastGravity: case SouthGravity: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); break; } case SouthWestGravity: case WestGravity: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); break; } } GetPixelInfoPixel(image,p,&background); artifact=GetImageArtifact(image,"background"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); artifact=GetImageArtifact(image,"trim:background-color"); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,&background,exception); edge_geometry.width=width; edge_geometry.height=height; edge_geometry.x=x_offset; edge_geometry.y=y_offset; GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) return(0.0); census=0.0; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) census++; p+=GetPixelChannels(edge_image); } } census/=((double) edge_image->columns*edge_image->rows); edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); return(census); } static inline double GetMinEdgeBackgroundCensus(const EdgeInfo *edge) { double census; census=MagickMin(MagickMin(MagickMin(edge->left,edge->right),edge->top), edge->bottom); return(census); } static RectangleInfo GetEdgeBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *edge_view; const char *artifact; double background_census, percent_background; EdgeInfo edge, vertex; Image *edge_image; RectangleInfo bounds; /* Get the image bounding box. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); SetGeometry(image,&bounds); edge_image=CloneImage(image,0,0,MagickTrue,exception); if (edge_image == (Image *) NULL) return(bounds); (void) ParseAbsoluteGeometry("0x0+0+0",&edge_image->page); (void) memset(&vertex,0,sizeof(vertex)); edge_view=AcquireVirtualCacheView(edge_image,exception); edge.left=GetEdgeBackgroundCensus(edge_image,edge_view,WestGravity, 1,0,0,0,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view,EastGravity, 1,0,0,0,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view,NorthGravity, 0,1,0,0,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view,SouthGravity, 0,1,0,0,exception); percent_background=1.0; artifact=GetImageArtifact(edge_image,"trim:percent-background"); if (artifact != (const char *) NULL) percent_background=StringToDouble(artifact,(char **) NULL)/100.0; percent_background=MagickMin(MagickMax(1.0-percent_background,MagickEpsilon), 1.0); background_census=GetMinEdgeBackgroundCensus(&edge); for ( ; background_census < percent_background; background_census=GetMinEdgeBackgroundCensus(&edge)) { if ((bounds.width == 0) || (bounds.height == 0)) break; if (fabs(edge.left-background_census) < MagickEpsilon) { /* Trim left edge. */ vertex.left++; bounds.width--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.right-background_census) < MagickEpsilon) { /* Trim right edge. */ vertex.right++; bounds.width--; edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } if (fabs(edge.top-background_census) < MagickEpsilon) { /* Trim top edge. */ vertex.top++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.top=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); continue; } if (fabs(edge.bottom-background_census) < MagickEpsilon) { /* Trim bottom edge. */ vertex.bottom++; bounds.height--; edge.left=GetEdgeBackgroundCensus(edge_image,edge_view, NorthWestGravity,1,bounds.height,(ssize_t) vertex.left,(ssize_t) vertex.top,exception); edge.right=GetEdgeBackgroundCensus(edge_image,edge_view, NorthEastGravity,1,bounds.height,(ssize_t) vertex.right,(ssize_t) vertex.top,exception); edge.bottom=GetEdgeBackgroundCensus(edge_image,edge_view, SouthWestGravity,bounds.width,1,(ssize_t) vertex.left,(ssize_t) vertex.bottom,exception); continue; } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); bounds.x=(ssize_t) vertex.left; bounds.y=(ssize_t) vertex.top; if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return(bounds); } MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const char *artifact; MagickBooleanType status; PixelInfo target[4], zero; RectangleInfo bounds; const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); artifact=GetImageArtifact(image,"trim:percent-background"); if (artifact != (const char *) NULL) return(GetEdgeBoundingBox(image,exception)); artifact=GetImageArtifact(image, "trim:edges"); if (artifact == (const char *) NULL) { bounds.width=image->columns == 1 ? 1 : 0; bounds.height=image->rows == 1 ? 1 : 0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; } else { char *edges, *q, *r; bounds.width=(size_t) image->columns; bounds.height=(size_t) image->rows; bounds.x=0; bounds.y=0; edges=AcquireString(artifact); r=edges; while ((q=StringToken(",",&r)) != (char *) NULL) { if (LocaleCompare(q,"north") == 0) bounds.y=(ssize_t) image->rows; if (LocaleCompare(q,"east") == 0) bounds.width=0; if (LocaleCompare(q,"south") == 0) bounds.height=0; if (LocaleCompare(q,"west") == 0) bounds.x=(ssize_t) image->columns; } edges=DestroyString(edges); } GetPixelInfo(image,&target[0]); image_view=AcquireVirtualCacheView(image,exception); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const Quantum *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } GetPixelInfoPixel(image,p,&target[0]); GetPixelInfo(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[1]); GetPixelInfo(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[2]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,(ssize_t) image->rows-1,1,1,exception); if (p != (const Quantum *) NULL) GetPixelInfoPixel(image,p,&target[3]); status=MagickTrue; GetPixelInfo(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; RectangleInfo bounding_box; const Quantum *magick_restrict q; ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; q=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (q == (const Quantum *) NULL) { status=MagickFalse; continue; } pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { GetPixelInfoPixel(image,q,&pixel); if ((x < bounding_box.x) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; if ((x < (ssize_t) bounding_box.width) && (y > (ssize_t) bounding_box.height) && (IsFuzzyEquivalencePixelInfo(&pixel,&target[3]) == MagickFalse)) { bounding_box.width=(size_t) x; bounding_box.height=(size_t) y; } q+=GetPixelChannels(image); } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C o n v e x H u l l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageConvexHull() returns the convex hull points of an image canvas. % % The format of the GetImageConvexHull method is: % % PointInfo *GetImageConvexHull(const Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the convex hull. % % o exception: return any errors or warnings in this structure. % */ static double LexicographicalOrder(PointInfo *a,PointInfo *b,PointInfo *c) { /* Order by x-coordinate, and in case of a tie, by y-coordinate. */ return((b->x-a->x)*(c->y-a->y)-(b->y-a->y)*(c->x-a->x)); } static PixelInfo GetEdgeBackgroundColor(const Image *image, const CacheView *image_view,ExceptionInfo *exception) { const char *artifact; double census[4], edge_census; PixelInfo background[4], edge_background; ssize_t i; /* Most dominant color of edges/corners is the background color of the image. */ memset(&edge_background,0,sizeof(edge_background)); artifact=GetImageArtifact(image,"convex-hull:background-color"); if (artifact == (const char *) NULL) artifact=GetImageArtifact(image,"background"); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) #endif for (i=0; i < 4; i++) { CacheView *edge_view; GravityType gravity; Image *edge_image; PixelInfo pixel; RectangleInfo edge_geometry; const Quantum *p; ssize_t y; census[i]=0.0; (void) memset(&edge_geometry,0,sizeof(edge_geometry)); switch (i) { case 0: default: { p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); gravity=WestGravity; edge_geometry.width=1; edge_geometry.height=0; break; } case 1: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); gravity=EastGravity; edge_geometry.width=1; edge_geometry.height=0; break; } case 2: { p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); gravity=NorthGravity; edge_geometry.width=0; edge_geometry.height=1; break; } case 3: { p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1, (ssize_t) image->rows-1,1,1,exception); gravity=SouthGravity; edge_geometry.width=0; edge_geometry.height=1; break; } } GetPixelInfoPixel(image,p,background+i); if (artifact != (const char *) NULL) (void) QueryColorCompliance(artifact,AllCompliance,background+i, exception); GravityAdjustGeometry(image->columns,image->rows,gravity,&edge_geometry); edge_image=CropImage(image,&edge_geometry,exception); if (edge_image == (Image *) NULL) continue; edge_view=AcquireVirtualCacheView(edge_image,exception); for (y=0; y < (ssize_t) edge_image->rows; y++) { ssize_t x; p=GetCacheViewVirtualPixels(edge_view,0,y,edge_image->columns,1, exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) edge_image->columns; x++) { GetPixelInfoPixel(edge_image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,background+i) == MagickFalse) census[i]++; p+=GetPixelChannels(edge_image); } } edge_view=DestroyCacheView(edge_view); edge_image=DestroyImage(edge_image); } edge_census=(-1.0); for (i=0; i < 4; i++) if (census[i] > edge_census) { edge_background=background[i]; edge_census=census[i]; } return(edge_background); } void TraceConvexHull(PointInfo *vertices,size_t number_vertices, PointInfo ***monotone_chain,size_t *chain_length) { PointInfo **chain; ssize_t i; size_t demark, n; /* Construct the upper and lower hulls: rightmost to leftmost counterclockwise. */ chain=(*monotone_chain); n=0; for (i=0; i < (ssize_t) number_vertices; i++) { while ((n >= 2) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } demark=n+1; for (i=(ssize_t) number_vertices-2; i >= 0; i--) { while ((n >= demark) && (LexicographicalOrder(chain[n-2],chain[n-1],&vertices[i]) <= 0.0)) n--; chain[n++]=(&vertices[i]); } *chain_length=n; } MagickExport PointInfo *GetImageConvexHull(const Image *image, size_t *number_vertices,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MemoryInfo *monotone_info, *vertices_info; PixelInfo background; PointInfo *convex_hull, **monotone_chain, *vertices; size_t n; ssize_t y; /* Identify convex hull vertices of image foreground object(s). */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices_info=AcquireVirtualMemory(image->columns,image->rows* sizeof(*vertices)); monotone_info=AcquireVirtualMemory(2*image->columns,2* image->rows*sizeof(*monotone_chain)); if ((vertices_info == (MemoryInfo *) NULL) || (monotone_info == (MemoryInfo *) NULL)) { if (monotone_info != (MemoryInfo *) NULL) monotone_info=(MemoryInfo *) RelinquishVirtualMemory(monotone_info); if (vertices_info != (MemoryInfo *) NULL) vertices_info=RelinquishVirtualMemory(vertices_info); return((PointInfo *) NULL); } vertices=(PointInfo *) GetVirtualMemoryBlob(vertices_info); monotone_chain=(PointInfo **) GetVirtualMemoryBlob(monotone_info); image_view=AcquireVirtualCacheView(image,exception); background=GetEdgeBackgroundColor(image,image_view,exception); status=MagickTrue; n=0; for (y=0; y < (ssize_t) image->rows; y++) { const Quantum *p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel; GetPixelInfoPixel(image,p,&pixel); if (IsFuzzyEquivalencePixelInfo(&pixel,&background) == MagickFalse) { vertices[n].x=(double) x; vertices[n].y=(double) y; n++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Return the convex hull of the image foreground object(s). */ TraceConvexHull(vertices,n,&monotone_chain,number_vertices); convex_hull=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*convex_hull)); if (convex_hull != (PointInfo *) NULL) for (n=0; n < *number_vertices; n++) convex_hull[n]=(*monotone_chain[n]); monotone_info=RelinquishVirtualMemory(monotone_info); vertices_info=RelinquishVirtualMemory(vertices_info); return(convex_hull); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDepth() returns the depth of a particular image channel. % % The format of the GetImageDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t i; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=(size_t) GetMagickResourceLimit(ThreadResource); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (i=0; i < (ssize_t) number_threads; i++) current_depth[i]=1; if ((image->storage_class == PseudoClass) && (image->alpha_trait == UndefinedPixelTrait)) { for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickBooleanType atDepth; QuantumAny range; atDepth=MagickTrue; range=GetQuantumRange(current_depth[id]); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].red),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].green),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse) && (GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) if (IsPixelAtDepth(ClampToQuantum(image->colormap[i].blue),range) == MagickFalse) atDepth=MagickFalse; if ((atDepth != MagickFalse)) break; current_depth[id]++; } } depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireVirtualCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if ((1UL*QuantumRange) <= MaxMap) RestoreMSCWarning { size_t *depth_map; /* Scale pixels to desired (optimized with depth map). */ depth_map=(size_t *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) { for (depth=1; depth < (size_t) MAGICKCORE_QUANTUM_DEPTH; depth++) { Quantum pixel; QuantumAny range; range=GetQuantumRange(depth); pixel=(Quantum) i; if (pixel == ScaleAnyToQuantum(ScaleQuantumToAny(pixel,range),range)) break; } depth_map[i]=depth; } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel = GetPixelChannelChannel(image,j); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (depth_map[ScaleQuantumToMap(p[j])] > current_depth[id]) current_depth[id]=depth_map[ScaleQuantumToMap(p[j])]; } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; depth_map=(size_t *) RelinquishMagickMemory(depth_map); current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } #endif /* Compute pixel depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) continue; for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,j); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { QuantumAny range; range=GetQuantumRange(current_depth[id]); if (p[j] == ScaleAnyToQuantum(ScaleQuantumToAny(p[j],range),range)) break; current_depth[id]++; } } p+=GetPixelChannels(image); } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (i=1; i < (ssize_t) number_threads; i++) if (depth < current_depth[i]) depth=current_depth[i]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M i n i m u m B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMinimumBoundingBox() returns the points that form the minimum % bounding box around the image foreground objects with the "Rotating % Calipers" algorithm. The method also returns these properties: % minimum-bounding-box:area, minimum-bounding-box:width, % minimum-bounding-box:height, and minimum-bounding-box:angle. % % The format of the GetImageMinimumBoundingBox method is: % % PointInfo *GetImageMinimumBoundingBox(Image *image, % size_t number_vertices,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o number_vertices: the number of vertices in the bounding box. % % o exception: return any errors or warnings in this structure. % */ typedef struct _CaliperInfo { double area, width, height, projection; ssize_t p, q, v; } CaliperInfo; static inline double getAngle(PointInfo *p,PointInfo *q) { /* Get the angle between line (p,q) and horizontal axis, in degrees. */ return(RadiansToDegrees(atan2(q->y-p->y,q->x-p->x))); } static inline double getDistance(PointInfo *p,PointInfo *q) { double distance; distance=hypot(p->x-q->x,p->y-q->y); return(distance*distance); } static inline double getProjection(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Projection of vector (x,y) - p into a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->x-p->x)+(v->y-p->y)*(q->y-p->y))/sqrt(distance); } static inline double getFeretDiameter(PointInfo *p,PointInfo *q,PointInfo *v) { double distance; /* Distance from a point (x,y) to a line passing through p and q. */ distance=getDistance(p,q); if (distance < MagickEpsilon) return(INFINITY); return((q->x-p->x)*(v->y-p->y)-(v->x-p->x)*(q->y-p->y))/sqrt(distance); } MagickExport PointInfo *GetImageMinimumBoundingBox(Image *image, size_t *number_vertices,ExceptionInfo *exception) { CaliperInfo caliper_info; const char *artifact; double angle, diameter, distance; PointInfo *bounding_box, *vertices; ssize_t i; size_t number_hull_vertices; /* Generate the minimum bounding box with the "Rotating Calipers" algorithm. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); *number_vertices=0; vertices=GetImageConvexHull(image,&number_hull_vertices,exception); if (vertices == (PointInfo *) NULL) return((PointInfo *) NULL); *number_vertices=4; bounding_box=(PointInfo *) AcquireQuantumMemory(*number_vertices, sizeof(*bounding_box)); if (bounding_box == (PointInfo *) NULL) { vertices=(PointInfo *) RelinquishMagickMemory(vertices); return((PointInfo *) NULL); } caliper_info.area=2.0*image->columns*image->rows; caliper_info.width=(double) image->columns+image->rows; caliper_info.height=0.0; caliper_info.projection=0.0; caliper_info.p=(-1); caliper_info.q=(-1); caliper_info.v=(-1); for (i=0; i < (ssize_t) number_hull_vertices; i++) { double area = 0.0, max_projection = 0.0, min_diameter = -1.0, min_projection = 0.0; ssize_t j, k; ssize_t p = -1, q = -1, v = -1; for (j=0; j < (ssize_t) number_hull_vertices; j++) { diameter=fabs(getFeretDiameter(&vertices[i], &vertices[(i+1) % number_hull_vertices],&vertices[j])); if (min_diameter < diameter) { min_diameter=diameter; p=i; q=(i+1) % number_hull_vertices; v=j; } } for (k=0; k < (ssize_t) number_hull_vertices; k++) { double projection; /* Rotating calipers. */ projection=getProjection(&vertices[p],&vertices[q],&vertices[k]); min_projection=MagickMin(min_projection,projection); max_projection=MagickMax(max_projection,projection); } area=min_diameter*(max_projection-min_projection); if (caliper_info.area > area) { caliper_info.area=area; caliper_info.width=min_diameter; caliper_info.height=max_projection-min_projection; caliper_info.projection=max_projection; caliper_info.p=p; caliper_info.q=q; caliper_info.v=v; } } /* Initialize minimum bounding box. */ diameter=getFeretDiameter(&vertices[caliper_info.p], &vertices[caliper_info.q],&vertices[caliper_info.v]); angle=atan2(vertices[caliper_info.q].y-vertices[caliper_info.p].y, vertices[caliper_info.q].x-vertices[caliper_info.p].x); bounding_box[0].x=vertices[caliper_info.p].x+cos(angle)* caliper_info.projection; bounding_box[0].y=vertices[caliper_info.p].y+sin(angle)* caliper_info.projection; bounding_box[1].x=floor(bounding_box[0].x+cos(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[1].y=floor(bounding_box[0].y+sin(angle+MagickPI/2.0)*diameter+ 0.5); bounding_box[2].x=floor(bounding_box[1].x+cos(angle)*(-caliper_info.height)+ 0.5); bounding_box[2].y=floor(bounding_box[1].y+sin(angle)*(-caliper_info.height)+ 0.5); bounding_box[3].x=floor(bounding_box[2].x+cos(angle+MagickPI/2.0)*(-diameter)+ 0.5); bounding_box[3].y=floor(bounding_box[2].y+sin(angle+MagickPI/2.0)*(-diameter)+ 0.5); /* Export minimum bounding box properties. */ (void) FormatImageProperty(image,"minimum-bounding-box:area","%.*g", GetMagickPrecision(),caliper_info.area); (void) FormatImageProperty(image,"minimum-bounding-box:width","%.*g", GetMagickPrecision(),caliper_info.width); (void) FormatImageProperty(image,"minimum-bounding-box:height","%.*g", GetMagickPrecision(),caliper_info.height); (void) FormatImageProperty(image,"minimum-bounding-box:_p","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.p].x, GetMagickPrecision(),vertices[caliper_info.p].y); (void) FormatImageProperty(image,"minimum-bounding-box:_q","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.q].x, GetMagickPrecision(),vertices[caliper_info.q].y); (void) FormatImageProperty(image,"minimum-bounding-box:_v","%.*g,%.*g", GetMagickPrecision(),vertices[caliper_info.v].x, GetMagickPrecision(),vertices[caliper_info.v].y); /* Find smallest angle to origin. */ distance=hypot(bounding_box[0].x,bounding_box[0].y); angle=getAngle(&bounding_box[0],&bounding_box[1]); for (i=1; i < 4; i++) { double d = hypot(bounding_box[i].x,bounding_box[i].y); if (d < distance) { distance=d; angle=getAngle(&bounding_box[i],&bounding_box[(i+1) % 4]); } } artifact=GetImageArtifact(image,"minimum-bounding-box:orientation"); if (artifact != (const char *) NULL) { double length, q_length, p_length; PointInfo delta, point; /* Find smallest perpendicular distance from edge to origin. */ point=bounding_box[0]; for (i=1; i < 4; i++) { if (bounding_box[i].x < point.x) point.x=bounding_box[i].x; if (bounding_box[i].y < point.y) point.y=bounding_box[i].y; } for (i=0; i < 4; i++) { bounding_box[i].x-=point.x; bounding_box[i].y-=point.y; } for (i=0; i < 4; i++) { double d, intercept, slope; delta.x=bounding_box[(i+1) % 4].x-bounding_box[i].x; delta.y=bounding_box[(i+1) % 4].y-bounding_box[i].y; slope=delta.y*PerceptibleReciprocal(delta.x); intercept=bounding_box[(i+1) % 4].y-slope*bounding_box[i].x; d=fabs((slope*bounding_box[i].x-bounding_box[i].y+intercept)* PerceptibleReciprocal(sqrt(slope*slope+1.0))); if ((i == 0) || (d < distance)) { distance=d; point=delta; } } angle=RadiansToDegrees(atan(point.y*PerceptibleReciprocal(point.x))); length=hypot(point.x,point.y); p_length=fabs((double) MagickMax(caliper_info.width,caliper_info.height)- length); q_length=fabs(length-(double) MagickMin(caliper_info.width, caliper_info.height)); if (LocaleCompare(artifact,"landscape") == 0) { if (p_length > q_length) angle+=(angle < 0.0) ? 90.0 : -90.0; } else if (LocaleCompare(artifact,"portrait") == 0) { if (p_length < q_length) angle+=(angle >= 0.0) ? 90.0 : -90.0; } } (void) FormatImageProperty(image,"minimum-bounding-box:angle","%.*g", GetMagickPrecision(),angle); (void) FormatImageProperty(image,"minimum-bounding-box:unrotate","%.*g", GetMagickPrecision(),-angle); vertices=(PointInfo *) RelinquishMagickMemory(vertices); return(bounding_box); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ImageType GetImageType(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } if (IsImageMonochrome(image) != MagickFalse) return(BilevelType); if (IsImageGray(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(GrayscaleAlphaType); return(GrayscaleType); } if (IsPaletteImage(image) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageGray() returns grayscale if all the pixels in the image have % the same red, green, and blue intensities, and bi-level is the intensity is % either 0 or QuantumRange. Otherwise undefined is returned. % % The format of the IdentifyImageGray method is: % % ImageType IdentifyImageGray(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageGray(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; const Quantum *p; ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (IsImageGray(image) != MagickFalse) return(image->type); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(UndefinedType); type=BilevelType; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelGray(image,p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsPixelMonochrome(image,p) == MagickFalse)) type=GrayscaleType; p+=GetPixelChannels(image); } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if ((type == GrayscaleType) && (image->alpha_trait != UndefinedPixelTrait)) type=GrayscaleAlphaType; return(type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageMonochrome() returns MagickTrue if all the pixels in the image % have the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IdentifyImageMonochrome method is: % % MagickBooleanType IdentifyImageMonochrome(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IdentifyImageMonochrome(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType bilevel; ssize_t x; const Quantum *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) return(MagickFalse); bilevel=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsPixelMonochrome(image,p) == MagickFalse) { bilevel=MagickFalse; break; } p+=GetPixelChannels(image); } if (bilevel == MagickFalse) break; } image_view=DestroyCacheView(image_view); return(bilevel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i f y I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentifyImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,IdentifyImageType(image,exception),exception); % % The format of the IdentifyImageType method is: % % ImageType IdentifyImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType IdentifyImageType(const Image *image, ExceptionInfo *exception) { ImageType type; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->alpha_trait == UndefinedPixelTrait) return(ColorSeparationType); return(ColorSeparationAlphaType); } type=IdentifyImageGray(image,exception); if (IsGrayImageType(type)) return(type); if (IdentifyPaletteImage(image,exception) != MagickFalse) { if (image->alpha_trait != UndefinedPixelTrait) return(PaletteAlphaType); return(PaletteType); } if (image->alpha_trait != UndefinedPixelTrait) return(TrueColorAlphaType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e G r a y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageGray() returns MagickTrue if the type of the image is grayscale or % bi-level. % % The format of the IsImageGray method is: % % MagickBooleanType IsImageGray(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageGray(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (IsGrayImageType(image->type)) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e M o n o c h r o m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageMonochrome() returns MagickTrue if type of the image is bi-level. % % The format of the IsImageMonochrome method is: % % MagickBooleanType IsImageMonochrome(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageMonochrome(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type == BilevelType) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O p a q u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageOpaque() returns MagickTrue if none of the pixels in the image have % an alpha value other than OpaqueAlpha (QuantumRange). % % Will return true immediatally is alpha channel is not available. % % The format of the IsImageOpaque method is: % % MagickBooleanType IsImageOpaque(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsImageOpaque(const Image *image, ExceptionInfo *exception) { CacheView *image_view; const Quantum *p; ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->alpha_trait == UndefinedPixelTrait) return(MagickTrue); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelAlpha(image,p) != OpaqueAlpha) break; p+=GetPixelChannels(image); } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageDepth() sets the depth of the image. % % The format of the SetImageDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (depth >= MAGICKCORE_QUANTUM_DEPTH) { image->depth=depth; return(MagickTrue); } range=GetQuantumRange(depth); if (image->storage_class == PseudoClass) { ssize_t i; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->colors,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].red),range),range); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].green),range),range); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].blue),range),range); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ScaleAnyToQuantum(ScaleQuantumToAny( ClampPixel(image->colormap[i].alpha),range),range); } } status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if !defined(MAGICKCORE_HDRI_SUPPORT) DisableMSCWarning(4127) if ((1UL*QuantumRange) <= MaxMap) RestoreMSCWarning { Quantum *depth_map; ssize_t i; /* Scale pixels to desired (optimized with depth map). */ depth_map=(Quantum *) AcquireQuantumMemory(MaxMap+1,sizeof(*depth_map)); if (depth_map == (Quantum *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); for (i=0; i <= (ssize_t) MaxMap; i++) depth_map[i]=ScaleAnyToQuantum(ScaleQuantumToAny((Quantum) i,range), range); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t j; for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,j); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=depth_map[ScaleQuantumToMap(q[j])]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); depth_map=(Quantum *) RelinquishMagickMemory(depth_map); if (status != MagickFalse) image->depth=depth; return(status); } #endif /* Scale pixels to desired depth. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { ssize_t x; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel; PixelTrait traits; channel=GetPixelChannelChannel(image,i); traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ScaleAnyToQuantum(ScaleQuantumToAny(ClampPixel((MagickRealType) q[i]),range),range); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (status != MagickFalse) image->depth=depth; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageType() sets the type of image. Choose from these types: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % OptimizeType % % The format of the SetImageType method is: % % MagickBooleanType SetImageType(Image *image,const ImageType type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: Image type. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageType(Image *image,const ImageType type, ExceptionInfo *exception) { const char *artifact; ImageInfo *image_info; MagickBooleanType status; QuantizeInfo *quantize_info; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); status=MagickTrue; image_info=AcquireImageInfo(); image_info->dither=image->dither; artifact=GetImageArtifact(image,"dither"); if (artifact != (const char *) NULL) (void) SetImageOption(image_info,"dither",artifact); switch (type) { case BilevelType: { status=TransformImageColorspace(image,GRAYColorspace,exception); (void) NormalizeImage(image,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=2; quantize_info->colorspace=GRAYColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleType: { status=TransformImageColorspace(image,GRAYColorspace,exception); image->alpha_trait=UndefinedPixelTrait; break; } case GrayscaleAlphaType: { status=TransformImageColorspace(image,GRAYColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case PaletteType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if ((image->storage_class == DirectClass) || (image->colors > 256)) { quantize_info=AcquireQuantizeInfo(image_info); quantize_info->number_colors=256; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); } image->alpha_trait=UndefinedPixelTrait; break; } case PaletteBilevelAlphaType: { ChannelType channel_mask; status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); channel_mask=SetImageChannelMask(image,AlphaChannel); (void) BilevelImage(image,(double) QuantumRange/2.0,exception); (void) SetImageChannelMask(image,channel_mask); quantize_info=AcquireQuantizeInfo(image_info); status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case PaletteAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); quantize_info=AcquireQuantizeInfo(image_info); quantize_info->colorspace=TransparentColorspace; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); break; } case TrueColorType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case TrueColorAlphaType: { status=TransformImageColorspace(image,sRGBColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case ColorSeparationType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); image->alpha_trait=UndefinedPixelTrait; break; } case ColorSeparationAlphaType: { status=TransformImageColorspace(image,CMYKColorspace,exception); if (image->storage_class != DirectClass) status=SetImageStorageClass(image,DirectClass,exception); if (image->alpha_trait == UndefinedPixelTrait) status=SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); break; } case OptimizeType: case UndefinedType: break; } image_info=DestroyImageInfo(image_info); if (status == MagickFalse) return(status); image->type=type; return(MagickTrue); }
Matrix_Add_RowMajor.c
#include<stdio.h> #include<stdlib.h> #include <sys/time.h> double rtclock() { struct timezone Tzp; struct timeval Tp; int stat; stat = gettimeofday (&Tp, &Tzp); if (stat != 0) printf("Error return from gettimeofday: %d",stat); return(Tp.tv_sec + Tp.tv_usec*1.0e-6); } #define size 10000 #define NT 8 int A[size][size]; int B[size][size]; int C[size][size]; int flag[size];//to set flag[i]==1 if arr[i] is maximum int main(int argc, char *argv[]){ if(argc!=2){ printf("Usage path-to-executable seedvalue (example usage: ./a.out 3)\n"); exit(0); } srand(atoi(argv[1]));//Seed for random number command line integer value //generates random number for(int i=0;i<size;i++){ for(int j=0;j<size;j++){ A[i][j]=rand()%1048576; B[i][j]=rand()%1048576; } } double t1=rtclock(); #pragma omp parallel for num_threads(8) for(int i=0;i<size;i++) for(int j=0;j<size;j++) C[i][j]=A[i][j]+B[i][j]; double t2=rtclock(); printf("\nTIME =%f \n",(t2-t1)*1000); } /*Run executable-path <integer-seed-value> *example: ./a.out 3 */
GB_binop__isle_int32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_08__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_02__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_04__isle_int32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_int32) // A*D function (colscale): GB (_AxD__isle_int32) // D*A function (rowscale): GB (_DxB__isle_int32) // C+=B function (dense accum): GB (_Cdense_accumB__isle_int32) // C+=b function (dense accum): GB (_Cdense_accumb__isle_int32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_int32) // C=scalar+B GB (_bind1st__isle_int32) // C=scalar+B' GB (_bind1st_tran__isle_int32) // C=A+scalar GB (_bind2nd__isle_int32) // C=A'+scalar GB (_bind2nd_tran__isle_int32) // C type: int32_t // A type: int32_t // A pattern? 0 // B type: int32_t // B pattern? 0 // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ int32_t #define GB_BTYPE \ int32_t #define GB_CTYPE \ int32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_INT32 || GxB_NO_ISLE_INT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_int32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_int32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int32_t int32_t bwork = (*((int32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_int32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *restrict Cx = (int32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_int32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; int32_t alpha_scalar ; int32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((int32_t *) alpha_scalar_in)) ; beta_scalar = (*((int32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isle_int32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isle_int32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_int32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_int32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t *Cx = (int32_t *) Cx_output ; int32_t x = (*((int32_t *) x_input)) ; int32_t *Bx = (int32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int32_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_int32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int32_t *Cx = (int32_t *) Cx_output ; int32_t *Ax = (int32_t *) Ax_input ; int32_t y = (*((int32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int32_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_int32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t x = (*((const int32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_int32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int32_t y = (*((const int32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_uint64_int16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_uint64_int16 // op(A') function: GB_tran__abs_uint64_int16 // C type: uint64_t // A type: int16_t // cast: uint64_t cij = (uint64_t) aij // unaryop: cij = aij #define GB_ATYPE \ int16_t #define GB_CTYPE \ uint64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ uint64_t z = (uint64_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT64 || GxB_NO_INT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_uint64_int16 ( uint64_t *restrict Cx, const int16_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_uint64_int16 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_subassign_zombie.c
//------------------------------------------------------------------------------ // GB_subassign_zombie: C(I,J)<!,repl> = empty ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 00: C(I,J)<!,repl> = empty ; using S // M: NULL // Mask_comp: true // C_replace: true // accum: any (present or not; result is the same) // A: any (scalar or matrix; result is the same) // S: constructed // C: not bitmap // C->iso is not affected. #include "GB_subassign_methods.h" #undef GB_FREE_ALL #define GB_FREE_ALL GB_phbix_free (S) ; GrB_Info GB_subassign_zombie ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t ni, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nj, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ; //-------------------------------------------------------------------------- // S = C(I,J) //-------------------------------------------------------------------------- GrB_Info info ; struct GB_Matrix_opaque S_header ; GrB_Matrix S = GB_clear_static_header (&S_header) ; GB_OK (GB_subassign_symbolic (S, C, I, ni, J, nj, false, Context)) ; ASSERT (GB_JUMBLED_OK (S)) ; // S can be returned as jumbled //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- const int64_t *restrict Sx = (int64_t *) S->x ; int64_t *restrict Ci = C->i ; //-------------------------------------------------------------------------- // Method 00: C(I,J)<!,repl> = empty ; using S //-------------------------------------------------------------------------- // Time: Optimal, O(nnz(S)), assuming S has already been constructed. //-------------------------------------------------------------------------- // Parallel: all entries in S can be processed entirely in parallel. //-------------------------------------------------------------------------- // All entries in C(I,J) are deleted. The result does not depend on A or // the scalar. int64_t snz = GB_nnz (S) ; GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (snz, chunk, nthreads_max) ; int64_t nzombies = C->nzombies ; int64_t pS ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:nzombies) for (pS = 0 ; pS < snz ; pS++) { // S (inew,jnew) is a pointer back into C (I(inew), J(jnew)) int64_t pC = Sx [pS] ; int64_t i = Ci [pC] ; // ----[X A 0] or [X . 0]----------------------------------------------- // action: ( X ): still a zombie // ----[C A 0] or [C . 0]----------------------------------------------- // action: C_repl: ( delete ): becomes a zombie if (!GB_IS_ZOMBIE (i)) { nzombies++ ; Ci [pC] = GB_FLIP (i) ; } } //-------------------------------------------------------------------------- // free workspace and return result //-------------------------------------------------------------------------- C->nzombies = nzombies ; GB_FREE_ALL ; return (GrB_SUCCESS) ; }
lastprivate-conditional-2.c
void foo (int *p) { int a = -1, b = -1, c = -1, d = -1, e = -1, f = -1, g = -1, h = -1; int i; #pragma omp parallel #pragma omp for lastprivate (conditional: a) for (i = 0; i < 32; i++) if (p[i]) a = i; #pragma omp simd lastprivate (conditional: b) for (i = 0; i < 32; i++) if (p[i]) b = i; #pragma omp parallel #pragma omp for simd lastprivate (conditional: c) for (i = 0; i < 32; i++) if (p[i]) c = i; #pragma omp parallel for lastprivate (conditional: d) for (i = 0; i < 32; i++) if (p[i]) d = i; #pragma omp parallel for simd lastprivate (conditional: e) for (i = 0; i < 32; i++) if (p[i]) e = i; }
test_vadd.c
#include <stdio.h> #include <stdlib.h> #include <assert.h> #ifdef __cilk #include <cilk/cilk.h> #endif #ifdef _OPENMP #include <omp.h> #endif #if USE_GFX #include <gfx/gfx_rt.h> #endif #define RESTRICT double vdiff(int n, const float * RESTRICT a, const float * RESTRICT b) { double d = 0.0; for(int i = 0; i < n; i++) { d += (a[i] - b[i]); } return d; } void vadd0(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c) { for(int i = 0; i < n; i++) c[i] = a[i] + b[i]; } void vadd1(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c) { #if defined(_OPENMP) && (_OPENMP >= 201307) #pragma omp parallel for simd #elif defined(_OPENMP) #warning No OpenMP simd support! #pragma omp parallel for #else #warning No OpenMP support! #endif for(int i = 0; i < n; i++) c[i] = a[i] + b[i]; } void vadd2(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c) { #if defined(_OPENMP) && (_OPENMP >= 201307) #pragma omp target map(to:n,a[0:n],b[0:n]) map(from:c[0:n]) # if defined(__INTEL_COMPILER) && defined(__INTEL_OFFLOAD) #pragma omp parallel for simd # else #pragma omp teams distribute parallel for simd # endif #else #warning No OpenMP target/simd support! #pragma omp parallel for #endif for(int i = 0; i < n; i++) c[i] = a[i] + b[i]; } void vadd3(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c) { #ifdef __cilk _Cilk_for(int i = 0; i < n; i++) #else #warning No Cilk support. Using sequential for loop. for(int i = 0; i < n; i++) #endif c[i] = a[i] + b[i]; } void vadd4(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c) { #ifdef __cilk #if defined(__INTEL_COMPILER) && defined(__INTEL_OFFLOAD) #pragma offload target(gfx) in(a,b : length(n)) out(c : length(n)) //pin(a, b, c : length(n)) #else #warning No Cilk offload support! #endif _Cilk_for(int i = 0; i < n; i++) #else #warning No Cilk support. Using sequential for loop. for(int i = 0; i < n; i++) #endif c[i] = a[i] + b[i]; } #if USE_GFX /* The following works for Linux but not Windows. */ __attribute__((target(gfx_kernel))) void gfx_vadd5(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c) { _Cilk_for(int i = 0; i < n; i++) c[i] = a[i] + b[i]; } void vadd5(int n, float * RESTRICT a, float * RESTRICT b, float * RESTRICT c) { int rc = 0; rc = _GFX_share(a, sizeof(float)*n); if (rc) printf("_GFX_share returned %#06x\n", -_GFX_get_last_error()); rc = _GFX_share(b, sizeof(float)*n); if (rc) printf("_GFX_share returned %#06x\n", -_GFX_get_last_error()); rc = _GFX_share(c, sizeof(float)*n); if (rc) printf("_GFX_share returned %#06x\n", -_GFX_get_last_error()); GfxTaskId id = _GFX_offload("gfx_vadd5", a, b, c, n); rc = _GFX_wait(id,1e9); if (rc) printf("_GFX_wait returned %#06x\n", -_GFX_get_last_error()); rc = _GFX_unshare(a); if (rc) printf("_GFX_unshare returned %#06x\n", -_GFX_get_last_error()); rc = _GFX_unshare(b); if (rc) printf("_GFX_unshare returned %#06x\n", -_GFX_get_last_error()); rc = _GFX_unshare(c); if (rc) printf("_GFX_unshare returned %#06x\n", -_GFX_get_last_error()); } #endif /* USE_GFX */ int main(int argc, char * argv[]) { int n = (argc > 1 ) ? atoi(argv[1]) : 1000; float * x = calloc(n,sizeof(float)); assert(x !=NULL); float * y = calloc(n,sizeof(float)); assert(y !=NULL); float * z0 = calloc(n,sizeof(float)); assert(z0!=NULL); float * z1 = calloc(n,sizeof(float)); assert(z1!=NULL); float * z2 = calloc(n,sizeof(float)); assert(z2!=NULL); float * z3 = calloc(n,sizeof(float)); assert(z3!=NULL); float * z4 = calloc(n,sizeof(float)); assert(z4!=NULL); #if USE_GFX float * z5 = calloc(n,sizeof(float)); assert(z5!=NULL); #endif #if 0 && defined(_OPENMP) && (_OPENMP >= 201307) int nthrd = omp_get_max_threads(); int ndevs = omp_get_num_devices(); printf("OpenMP threads = %d devices = %d\n", nthrd, ndevs); #endif for (int i=0; i<n; i++) { x[i] = (float)i; } for (int i=0; i<n; i++) { y[i] = (float)i; } for (int iter=0; iter<10; iter++) { double t0 = omp_get_wtime(); vadd0(n,x,y,z0); double t1 = omp_get_wtime(); vadd1(n,x,y,z1); double t2 = omp_get_wtime(); vadd2(n,x,y,z2); double t3 = omp_get_wtime(); vadd3(n,x,y,z3); double t4 = omp_get_wtime(); vadd4(n,x,y,z4); double t5 = omp_get_wtime(); #if USE_GFX vadd5(n,x,y,z5); double t6 = omp_get_wtime(); #endif printf("%20s time = %lf \n", "for", t1-t0); printf("%20s time = %lf (error=%lf) \n", "OpenMP for", t2-t1, vdiff(n,z0,z1)); printf("%20s time = %lf (error=%lf) \n", "OpenMP offload for", t3-t2, vdiff(n,z0,z2)); #ifdef __cilk printf("%20s time = %lf (error=%lf) \n", "_Cilk_for", t4-t3, vdiff(n,z0,z3)); printf("%20s time = %lf (error=%lf) \n", "offload _Cilk_for", t5-t4, vdiff(n,z0,z4)); #endif #if USE_GFX printf("%20s time = %lf (error=%lf) \n", "GFX RT offload _Cilk_for", t6-t5, vdiff(n,z0,z5)); #if 0 for (int i=0; i<n; i++) { printf("%d z0=%f z5=%f\n", i, z0[i], z5[i]); } #endif #endif /* prevent compiler from optimizing away anything */ double junk = t0+t1+t2+t3+t4+t5; for (int i=0; i<n; i++) { junk += z0[i] + z1[i] + z2[i] + z3[i] + z4[i]; // + z5[i]; } printf("junk=%lf\n", junk); } #if USE_GFX free(z5); #endif free(z4); free(z3); free(z2); free(z1); free(z0); free(y); free(x); printf("Success\n"); return 0; }
reweighting.c
#include <math.h> #include <string.h> #include <stdlib.h> #include <stdio.h> #include <stdint.h> #include "ccmpred.h" #include "reweighting.h" void calculate_weights(conjugrad_float_t *w, unsigned char *msa, uint64_t ncol, uint64_t nrow, conjugrad_float_t threshold) { int idthres = (int)ceil(threshold * ncol); memset(w, 0, sizeof(conjugrad_float_t) * nrow); uint64_t nij = nrow * (nrow + 1) / 2; #pragma omp parallel #pragma omp for nowait for(uint64_t ij = 0; ij < nij; ij++) { // compute i and j from ij // http://stackoverflow.com/a/244550/1181102 uint64_t i, j; { uint64_t ii = nrow * (nrow + 1) / 2 - 1 - ij; uint64_t K = floor((sqrt(8 * ii + 1) - 1) / 2); i = nrow - 1 - K; j = ij - nrow * i + i * (i + 1) / 2; } int ids = 0; for(uint64_t k = 0; k < ncol; k++) { if(msa[msa_index(i, k)] == msa[msa_index(j, k)]) { ids++; } } if(ids > idthres) { w[i]++; w[j]++; } } for(uint64_t i = 0; i < nrow; i++) { w[i] = 1./(w[i] - 1); } conjugrad_float_t wsum = 0; conjugrad_float_t wmin = w[0], wmax = w[0]; for(uint64_t i = 0; i < nrow; i++) { conjugrad_float_t wt = w[i]; wsum += wt; if(wt > wmax) { wmax = wt; } if(wt < wmin) { wmin = wt; } } printf("Reweighted %ld sequences with threshold %.1f to Beff=%g weight mean=%g, min=%g, max=%g\n", nrow, threshold, wsum, wsum / nrow, wmin, wmax); } void uniform_weights(conjugrad_float_t *w, uint64_t nrow) { for(uint64_t i = 0; i < nrow; i++) { w[i] = F1; } printf("Using uniform weights on %ld sequences\n", nrow); }
FastTree.c
/* * FastTree -- inferring approximately-maximum-likelihood trees for large * multiple sequence alignments. * * Morgan N. Price, 2008-2011 * http://www.microbesonline.org/fasttree/ * * Thanks to Jim Hester of the Cleveland Clinic Foundation for * providing the first parallel (OpenMP) code, Siavash Mirarab of * UT Austin for implementing the WAG option, and Samuel Shepard * at the CDC for suggesting and helping with the -quote option. * * Copyright (C) 2008-2011 The Regents of the University of California * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * or visit http://www.gnu.org/copyleft/gpl.html * * Disclaimer * * NEITHER THE UNITED STATES NOR THE UNITED STATES DEPARTMENT OF ENERGY, * NOR ANY OF THEIR EMPLOYEES, MAKES ANY WARRANTY, EXPRESS OR IMPLIED, * OR ASSUMES ANY LEGAL LIABILITY OR RESPONSIBILITY FOR THE ACCURACY, * COMPLETENESS, OR USEFULNESS OF ANY INFORMATION, APPARATUS, PRODUCT, * OR PROCESS DISCLOSED, OR REPRESENTS THAT ITS USE WOULD NOT INFRINGE * PRIVATELY OWNED RIGHTS. */ /* * To compile FastTree, do: * gcc -Wall -O3 -finline-functions -funroll-loops -o FastTree -lm FastTree.c * Use -DNO_SSE to turn off use of SSE3 instructions * (should not be necessary because compiler should not set __SSE__ if * not available, and modern mallocs should return 16-byte-aligned values) * Use -DOPENMP -fopenmp to use multiple threads (note, old versions of gcc * may not support -fopenmp) * Use -DTRACK_MEMORY if you want detailed reports of memory usage, * but results are not correct above 4GB because mallinfo stores int values. * It also makes FastTree run significantly slower. * * To get usage guidance, do: * FastTree -help * * FastTree uses profiles instead of a distance matrix, and computes * support values for each split from the profiles of the 4 nodes * around the split. It stores a profile for each node and a average * profile over all active nodes (the "out-profile" for computing the * total sum of distance to other nodes). The neighbor joining phase * requires O(N*L*a) space, where N is the number of sequences, L is * the alignment width, and a is the alphabet size. The top-hits * heuristic requires an additional O(N sqrt(N)) memory. After * neighbor-joining, FastTree improves the topology with * nearest-neighbor interchanges (NNIs) and subtree-prune-regraft * moves (SPRs), which does not have a significant additional memory * requirement. (We need only store "up-profiles" on the path from our * current traversal point to the root.) These take O(NLa) time per * round, and with default settings, O(N log(N) L a) time total. * FastTree further improves the topology with maximum-likelihood * NNIs, using similar data structures and complexity, but with a * higher constant factor, and now the "profiles" are actually * posterior distributions for that subtree. Finally, FastTree * resamples the site likelihoods around each NNI and uses * the Shimodaira Hasegawa test to estimate the reliability of each split. * * Overview of the neighbor-joining phase: * * Although FastTree uses a log correction on profile distances to * account for multiple substitutions when doing NNIs and SPRs, the * operations on the profiles themselves involve "additive" distances * -- either %different (for nucleotide) or by using an amino acid * similarity matrix (for proteins). If we are using %different as * our distance matrix then * * Profile_distance(A,B) = 1 - sum over characters of freq(A)*freq(B) * * and we can average this value over positions. Positions with gaps * are weighted by %ungapped(A) * %ungapped(B). * * If we are using an amino acid dissimilarity matrix D(i,j) then at * each position * * Profile_distance(A,B) = sum(i,j) freq(A==i) * freq(B==j) * D(i,j) * = sum(k) Ak * Bk * Lambda(k) * * where k iterates over 20 eigenvectors, Lambda(k) is the eigenvalue, * and if A==i, then Ak is the kth column of the inverse of the * eigenvector matrix. * * The exhaustive approach (-slow) takes O(N**3*L*a) time, but * this can be reduced to as little as O(N**(3/2)*log(N)*L*a) time * by using heuristics. * * It uses a combination of three heuristics: a visible set similar to * that of FastTree (Elias & Lagergren 2005), a local hill-climbing * search for a better join (as in relaxed neighbor-joining, Evans et * al. 2006), and a top-hit list to reduce the search space (see * below). * * The "visible" set stores, for each node, the best join for that * node, as identified at some point in the past * * If top-hits are not being used, then the neighbor-joining phase can * be summarized as: * * Compute the out-profile by averaging the leaves * Compute the out-distance of each leaf quickly, using the out-profile * Compute the visible set (or approximate it using top-hits, see below) * Until we're down to 3 active nodes: * Find the best join in the visible set * (This involves recomputing the neighbor-joining criterion, * as out-distances and #active nodes may have changed) * Follow a chain of best hits (again recomputing the criterion) * until we find a locally best join, as in relaxed neighbor joining * Create a profile of the parent node, either using simple averages (default) * or using weighted joining as in BIONJ (if -bionj was specified) * Update the out-profile and the out-distances * Update the visible set: * find the best join for the new joined node * replace hits to the joined children with hits to the parent * if we stumble across a join for the new node that is better * than the corresponding entry in the visible set, "reset" * that entry. * * For each iteration, this method does * O(N) work to find the best hit in the visible set * O(L*N*a*log(N)) work to do the local search, where log(N) * is a pessimistic estimate of the number of iterations. In * practice, we average <1 iteration for 2,000 sequences. * With -fastest, this step is omitted. * O(N*a) work to compute the joined profile and update the out-profile * O(L*N*a) work to update the out-distances * O(L*N*a) work to compare the joined profile to the other nodes * (to find the new entry in the visible set) * * and there are N-3 iterations, so it takes O(N**2 * L * log(N) * a) time. * * The profile distances give exactly the same result as matrix * distances in neighbor-joining or BIONJ would if there are no gaps * in the alignment. If there are gaps, then it is an * approximation. To get the same result we also store a "diameter" * for each node (diameter is 0 for leaves). * * In the simpler case (NJ rather than BIONJ), when we join A and B to * give a new node AB, * * Profile(AB) = (A+B)/2 * Profile_distance(AB,C) = (Profile_distance(A,C)+Profile_distance(B,C))/2 * because the formulas above are linear * * And according to the neighor-joining rule, * d(AB,C) = (d(A,C)+d(B,C)-d(A,B))/2 * * and we can achieve the same value by writing * diameter(AB) = pd(A,B)/2 * diameter(leaf) = 0 * d(A,B) = pd(A,B) - diameter(A) - diameter(B) * * because * d(AB,C) = (d(A,C)+d(B,C)-d(A,B))/2 * = (pd(A,C)-diam(A)-diam(C)+pd(B,C)-diam(B)-diam(C)-d(A,B)+diam(A)+diam(B))/2 * = (pd(A,C)+pd(B,C))/2 - diam(C) - pd(A,B) * = pd(AB,C) - diam(AB) - diam(C) * * If we are using BIONJ, with weight lambda for the join: * Profile(AB) = lambda*A + (1-lambda)*B * then a similar argument gives * diam(AB) = lambda*diam(A) + (1-lambda)*diam(B) + lambda*d(A,AB) + (1-lambda)*d(B,AB), * * where, as in neighbor joining, * d(A,AB) = d(A,B) + (total out_distance(A) - total out_distance(B))/(n-2) * * A similar recursion formula works for the "variance" matrix of BIONJ, * var(AB,C) = lambda*var(A,C) + (1-lambda)*var(B,C) - lambda*(1-lambda)*var(A,B) * is equivalent to * var(A,B) = pv(A,B) - vd(A) - vd(B), where * pv(A,B) = pd(A,B) * vd(A) = 0 for leaves * vd(AB) = lambda*vd(A) + (1-lambda)*vd(B) + lambda*(1-lambda)*var(A,B) * * The top-hist heuristic to reduce the work below O(N**2*L) stores a top-hit * list of size m=sqrt(N) for each active node. * * The list can be initialized for all the leaves in sub (N**2 * L) time as follows: * Pick a "seed" sequence and compare it to all others * Store the top m hits of the seed as its top-hit list * Take "close" hits of the seed(within the top m, and see the "close" parameter), * and assume that their top m hits lie within the top 2*m hits of the seed. * So, compare them to the seed's neighors (if they do not already * have a top hit list) and set their top hits. * * This method does O(N*L) work for each seed, or O(N**(3/2)*L) work total. * * To avoid doing O(N*L) work at each iteration, we need to avoid * updating the visible set and the out-distances. So, we use "stale" * out-distances, and when searching the visible set for the best hit, * we only inspect the top m=sqrt(N) entries. We then update those * out-distances (up to 2*m*L*a work) and then find the best hit. * * To avoid searching the entire visible set, FastTree keeps * and updates a list of the top sqrt(N) entries in the visible set. * This costs O(sqrt(N)) time per join to find the best entry and to * update, or (N sqrt(N)) time overall. * * Similarly, when doing the local hill-climbing, we avoid O(N*L) work * by only considering the top-hits for the current node. So this adds * O(m*a*log(N)) work per iteration. * * When we join two nodes, we compute profiles and update the * out-profile as before. We need to compute the best hits of the node * -- we merge the lists for the children and select the best up-to-m * hits. If the top hit list contains a stale node we replace it with * its parent. If we still have <m/2 entries, we do a "refresh". * * In a "refresh", similar to the fast top-hit computation above, we * compare the "seed", in this case the new joined node, to all other * nodes. We compare its close neighbors (the top m hits) to all * neighbors (the top 2*m hits) and update the top-hit lists of all * neighbors (by merging to give a list of 3*m entries and then * selecting the best m entries). * * Finally, during these processes we update the visible sets for * other nodes with better hits if we find them, and we set the * visible entry for the new joined node to the best entry in its * top-hit list. (And whenever we update a visible entry, we * do O(sqrt(N)) work to update the top-visible list.) * These udpates are not common so they do not alter the * O(N sqrt(N) log(N) L a) total running time for the joining phase. * * Second-level top hits * * With -fastest or with -2nd, FastTree uses an additional "2nd-level" top hits * heuristic to reduce the running time for the top-hits phase to * O(N**1.25 L) and for the neighbor-joining phase to O(N**1.25 L a). * This also reduces the memory usage for the top-hits lists to * O(N**1.25), which is important for alignments with a million * sequences. The key idea is to store just q = sqrt(m) top hits for * most sequences. * * Given the neighbors of A -- either for a seed or for a neighbor * from the top-hits heuristic, if B is within the top q hits of A, we * set top-hits(B) from the top 3*q top-hits of A. And, we record that * A is the "source" of the hits for B, so if we run low on hits for * B, instead of doing a full refresh, we can do top-hits(B) := * top-hits(B) union top-hits(active_ancestor(A)). * During a refresh, these "2nd-level" top hits are updated just as * normal, but the source is maintained and only q entries are stored, * until we near the end of the neighbor joining phase (until the * root as 2*m children or less). * * Parallel execution with OpenMP * * If you compile FastTree with OpenMP support, it will take * advantage of multiple CPUs on one machine. It will parallelize: * * The top hits phase * Comparing one node to many others during the NJ phase (the simplest kind of join) * The refresh phase * Optimizing likelihoods for 3 alternate topologies during ML NNIs and ML supports * (only 3 threads can be used) * * This accounts for most of the O(N L a) or slower steps except for * minimum-evolution NNIs (which are fast anyway), minimum-evolution SPRs, * selecting per-site rates, and optimizing branch lengths outside of ML NNIs. * * Parallelizing the top hits phase may lead to a slight change in the tree, * as some top hits are computed from different (and potentially less optimal source). * This means that results on repeated runs may not be 100% identical. * However, this should not have any significant effect on tree quality * after the NNIs and SPRs. * * The OpenMP code also turns off the star-topology test during ML * NNIs, which may lead to slight improvements in likelihood. */ #include <stdio.h> #include <stdbool.h> #include <string.h> #include <assert.h> #include <math.h> #include <stdlib.h> #include <sys/time.h> #include <ctype.h> #include <unistd.h> #ifdef TRACK_MEMORY /* malloc.h apparently doesn't exist on MacOS */ #include <malloc.h> #endif /* Compile with -DOPENMP to turn on multithreading */ #ifdef OPENMP #include <omp.h> #endif /* By default, tries to compile with SSE instructions for greater speed. But if compiled with -DUSE_DOUBLE, uses double precision instead of single-precision floating point (2x memroy required), and does not use SSE. */ #ifdef __SSE__ #if !defined(NO_SSE) && !defined(USE_DOUBLE) #define USE_SSE3 #endif #endif #ifdef USE_DOUBLE #define SSE_STRING "Double precision (No SSE3)" typedef double numeric_t; #define ScanNumericSpec "%lf" #else typedef float numeric_t; #define ScanNumericSpec "%f" #endif #ifdef USE_SSE3 #define SSE_STRING "SSE3" #define ALIGNED __attribute__((aligned(16))) #define IS_ALIGNED(X) ((((unsigned long) new) & 15L) == 0L) #include <xmmintrin.h> #else #define ALIGNED #define IS_ALIGNED(X) 1 #ifndef USE_DOUBLE #define SSE_STRING "No SSE3" #endif #endif /* USE_SSE3 */ #define FT_VERSION "2.1.7" char *usage = " FastTree protein_alignment > tree\n" " FastTree < protein_alignment > tree\n" " FastTree -out tree protein_alignment\n" " FastTree -nt nucleotide_alignment > tree\n" " FastTree -nt -gtr < nucleotide_alignment > tree\n" " FastTree < nucleotide_alignment > tree\n" "FastTree accepts alignments in fasta or phylip interleaved formats\n" "\n" "Common options (must be before the alignment file):\n" " -quiet to suppress reporting information\n" " -nopr to suppress progress indicator\n" " -log logfile -- save intermediate trees, settings, and model details\n" " -fastest -- speed up the neighbor joining phase & reduce memory usage\n" " (recommended for >50,000 sequences)\n" " -n <number> to analyze multiple alignments (phylip format only)\n" " (use for global bootstrap, with seqboot and CompareToBootstrap.pl)\n" " -nosupport to not compute support values\n" " -intree newick_file to set the starting tree(s)\n" " -intree1 newick_file to use this starting tree for all the alignments\n" " (for faster global bootstrap on huge alignments)\n" " -pseudo to use pseudocounts (recommended for highly gapped sequences)\n" " -gtr -- generalized time-reversible model (nucleotide alignments only)\n" " -wag -- Whelan-And-Goldman 2001 model (amino acid alignments only)\n" " -quote -- allow spaces and other restricted characters (but not ' ) in\n" " sequence names and quote names in the output tree (fasta input only;\n" " FastTree will not be able to read these trees back in)\n" " -noml to turn off maximum-likelihood\n" " -nome to turn off minimum-evolution NNIs and SPRs\n" " (recommended if running additional ML NNIs with -intree)\n" " -nome -mllen with -intree to optimize branch lengths for a fixed topology\n" " -cat # to specify the number of rate categories of sites (default 20)\n" " or -nocat to use constant rates\n" " -gamma -- after optimizing the tree under the CAT approximation,\n" " rescale the lengths to optimize the Gamma20 likelihood\n" " -constraints constraintAlignment to constrain the topology search\n" " constraintAlignment should have 1s or 0s to indicates splits\n" " -expert -- see more options\n" "For more information, see http://www.microbesonline.org/fasttree/\n"; char *expertUsage = "FastTree [-nt] [-n 100] [-quote] [-pseudo | -pseudo 1.0]\n" " [-boot 1000 | -nosupport]\n" " [-intree starting_trees_file | -intree1 starting_tree_file]\n" " [-quiet | -nopr]\n" " [-nni 10] [-spr 2] [-noml | -mllen | -mlnni 10]\n" " [-mlacc 2] [-cat 20 | -nocat] [-gamma]\n" " [-slow | -fastest] [-2nd | -no2nd] [-slownni] [-seed 1253] \n" " [-top | -notop] [-topm 1.0 [-close 0.75] [-refresh 0.8]]\n" " [-matrix Matrix | -nomatrix] [-nj | -bionj]\n" " [-wag] [-nt] [-gtr] [-gtrrates ac ag at cg ct gt] [-gtrfreq A C G T]\n" " [ -constraints constraintAlignment [ -constraintWeight 100.0 ] ]\n" " [-log logfile]\n" " [ alignment_file ]\n" " [ -out output_newick_file | > newick_tree]\n" "\n" "or\n" "\n" "FastTree [-nt] [-matrix Matrix | -nomatrix] [-rawdist] -makematrix [alignment]\n" " [-n 100] > phylip_distance_matrix\n" "\n" " FastTree supports fasta or phylip interleaved alignments\n" " By default FastTree expects protein alignments, use -nt for nucleotides\n" " FastTree reads standard input if no alignment file is given\n" "\n" "Input/output options:\n" " -n -- read in multiple alignments in. This only\n" " works with phylip interleaved format. For example, you can\n" " use it with the output from phylip's seqboot. If you use -n, FastTree\n" " will write 1 tree per line to standard output.\n" " -intree newickfile -- read the starting tree in from newickfile.\n" " Any branch lengths in the starting trees are ignored.\n" " -intree with -n will read a separate starting tree for each alignment.\n" " -intree1 newickfile -- read the same starting tree for each alignment\n" " -quiet -- do not write to standard error during normal operation (no progress\n" " indicator, no options summary, no likelihood values, etc.)\n" " -nopr -- do not write the progress indicator to stderr\n" " -log logfile -- save intermediate trees so you can extract\n" " the trees and restart long-running jobs if they crash\n" " -log also reports the per-site rates (1 means slowest category)\n" " -quote -- quote sequence names in the output and allow spaces, commas,\n" " parentheses, and colons in them but not ' characters (fasta files only)\n" "\n" "Distances:\n" " Default: For protein sequences, log-corrected distances and an\n" " amino acid dissimilarity matrix derived from BLOSUM45\n" " or for nucleotide sequences, Jukes-Cantor distances\n" " To specify a different matrix, use -matrix FilePrefix or -nomatrix\n" " Use -rawdist to turn the log-correction off\n" " or to use %different instead of Jukes-Cantor\n" "\n" " -pseudo [weight] -- Use pseudocounts to estimate distances between\n" " sequences with little or no overlap. (Off by default.) Recommended\n" " if analyzing the alignment has sequences with little or no overlap.\n" " If the weight is not specified, it is 1.0\n" "\n" "Topology refinement:\n" " By default, FastTree tries to improve the tree with up to 4*log2(N)\n" " rounds of minimum-evolution nearest-neighbor interchanges (NNI),\n" " where N is the number of unique sequences, 2 rounds of\n" " subtree-prune-regraft (SPR) moves (also min. evo.), and\n" " up to 2*log(N) rounds of maximum-likelihood NNIs.\n" " Use -nni to set the number of rounds of min. evo. NNIs,\n" " and -spr to set the rounds of SPRs.\n" " Use -noml to turn off both min-evo NNIs and SPRs (useful if refining\n" " an approximately maximum-likelihood tree with further NNIs)\n" " Use -sprlength set the maximum length of a SPR move (default 10)\n" " Use -mlnni to set the number of rounds of maximum-likelihood NNIs\n" " Use -mlacc 2 or -mlacc 3 to always optimize all 5 branches at each NNI,\n" " and to optimize all 5 branches in 2 or 3 rounds\n" " Use -mllen to optimize branch lengths without ML NNIs\n" " Use -mllen -nome with -intree to optimize branch lengths on a fixed topology\n" " Use -slownni to turn off heuristics to avoid constant subtrees (affects both\n" " ML and ME NNIs)\n" "\n" "Maximum likelihood model options:\n" " -wag -- Whelan-And-Goldman 2001 model instead of (default) Jones-Taylor-Thorton 1992 model (a.a. only)\n" " -gtr -- generalized time-reversible instead of (default) Jukes-Cantor (nt only)\n" " -cat # -- specify the number of rate categories of sites (default 20)\n" " -nocat -- no CAT model (just 1 category)\n" " -gamma -- after the final round of optimizing branch lengths with the CAT model,\n" " report the likelihood under the discrete gamma model with the same\n" " number of categories. FastTree uses the same branch lengths but\n" " optimizes the gamma shape parameter and the scale of the lengths.\n" " The final tree will have rescaled lengths. Used with -log, this\n" " also generates per-site likelihoods for use with CONSEL, see\n" " GammaLogToPaup.pl and documentation on the FastTree web site.\n" "\n" "Support value options:\n" " By default, FastTree computes local support values by resampling the site\n" " likelihoods 1,000 times and the Shimodaira Hasegawa test. If you specify -nome,\n" " it will compute minimum-evolution bootstrap supports instead\n" " In either case, the support values are proportions ranging from 0 to 1\n" "\n" " Use -nosupport to turn off support values or -boot 100 to use just 100 resamples\n" " Use -seed to initialize the random number generator\n" "\n" "Searching for the best join:\n" " By default, FastTree combines the 'visible set' of fast neighbor-joining with\n" " local hill-climbing as in relaxed neighbor-joining\n" " -slow -- exhaustive search (like NJ or BIONJ, but different gap handling)\n" " -slow takes half an hour instead of 8 seconds for 1,250 proteins\n" " -fastest -- search the visible set (the top hit for each node) only\n" " Unlike the original fast neighbor-joining, -fastest updates visible(C)\n" " after joining A and B if join(AB,C) is better than join(C,visible(C))\n" " -fastest also updates out-distances in a very lazy way,\n" " -fastest sets -2nd on as well, use -fastest -no2nd to avoid this\n" "\n" "Top-hit heuristics:\n" " By default, FastTree uses a top-hit list to speed up search\n" " Use -notop (or -slow) to turn this feature off\n" " and compare all leaves to each other,\n" " and all new joined nodes to each other\n" " -topm 1.0 -- set the top-hit list size to parameter*sqrt(N)\n" " FastTree estimates the top m hits of a leaf from the\n" " top 2*m hits of a 'close' neighbor, where close is\n" " defined as d(seed,close) < 0.75 * d(seed, hit of rank 2*m),\n" " and updates the top-hits as joins proceed\n" " -close 0.75 -- modify the close heuristic, lower is more conservative\n" " -refresh 0.8 -- compare a joined node to all other nodes if its\n" " top-hit list is less than 80% of the desired length,\n" " or if the age of the top-hit list is log2(m) or greater\n" " -2nd or -no2nd to turn 2nd-level top hits heuristic on or off\n" " This reduces memory usage and running time but may lead to\n" " marginal reductions in tree quality.\n" " (By default, -fastest turns on -2nd.)\n" "\n" "Join options:\n" " -nj: regular (unweighted) neighbor-joining (default)\n" " -bionj: weighted joins as in BIONJ\n" " FastTree will also weight joins during NNIs\n" "\n" "Constrained topology search options:\n" " -constraints alignmentfile -- an alignment with values of 0, 1, and -\n" " Not all sequences need be present. A column of 0s and 1s defines a\n" " constrained split. Some constraints may be violated\n" " (see 'violating constraints:' in standard error).\n" " -constraintWeight -- how strongly to weight the constraints. A value of 1\n" " means a penalty of 1 in tree length for violating a constraint\n" " Default: 100.0\n" "\n" "For more information, see http://www.microbesonline.org/fasttree/\n" " or the comments in the source code\n"; ; #define MAXCODES 20 #define NOCODE 127 /* Note -- sequence lines longer than BUFFER_SIZE are allowed, but FASTA header lines must be within this limit */ #define BUFFER_SIZE 5000 #define MIN(X,Y) ((X) < (Y) ? (X) : (Y)) #define MAX(X,Y) ((X) > (Y) ? (X) : (Y)) typedef struct { int nPos; int nSeq; char **names; char **seqs; int nSaved; /* actual allocated size of names and seqs */ } alignment_t; /* For each position in a profile, we have a weight (% non-gapped) and a frequency vector. (If using a matrix, the frequency vector is in eigenspace). We also store codes for simple profile positions (all gaps or only 1 value) If weight[pos] > 0 && codes[pos] == NOCODE then we store the vector vectors itself is sets of nCodes long, so the vector for the ith nonconstant position starts at &vectors[nCodes*i] To speed up comparison of outprofile to a sequence or other simple profile, we also (for outprofiles) store codeDist[iPos*nCodes+k] = dist(k,profile[iPos]) For constraints, we store a vector of nOn and nOff If not using constraints, those will be NULL */ typedef struct { /* alignment profile */ numeric_t *weights; unsigned char *codes; numeric_t *vectors; /* NULL if no non-constant positions, e.g. for leaves */ int nVectors; numeric_t *codeDist; /* Optional -- distance to each code at each position */ /* constraint profile */ int *nOn; int *nOff; } profile_t; /* A visible node is a pair of nodes i, j such that j is the best hit of i, using the neighbor-joining criterion, at the time the comparison was made, or approximately so since then. Note that variance = dist because in BIONJ, constant factors of variance do not matter, and because we weight ungapped sequences higher naturally when averaging profiles, so we do not take this into account in the computation of "lambda" for BIONJ. For the top-hit list heuristic, if the top hit list becomes "too short", we store invalid entries with i=j=-1 and dist/criterion very high. */ typedef struct { int i, j; numeric_t weight; /* Total product of weights (maximum value is nPos) This is needed for weighted joins and for pseudocounts, but not in most other places. For example, it is not maintained by the top hits code */ numeric_t dist; /* The uncorrected distance (includes diameter correction) */ numeric_t criterion; /* changes when we update the out-profile or change nActive */ } besthit_t; typedef struct { int nChild; int child[3]; } children_t; typedef struct { /* Distances between amino acids */ numeric_t distances[MAXCODES][MAXCODES]; /* Inverse of the eigenvalue matrix, for rotating a frequency vector into eigenspace so that profile similarity computations are O(alphabet) not O(alphabet*alphabet) time. */ numeric_t eigeninv[MAXCODES][MAXCODES]; numeric_t eigenval[MAXCODES]; /* eigenvalues */ /* eigentot=eigeninv times the all-1s frequency vector useful for normalizing rotated frequency vectors */ numeric_t eigentot[MAXCODES]; /* codeFreq is the transpose of the eigeninv matrix is the rotated frequency vector for each code */ numeric_t codeFreq[MAXCODES][MAXCODES]; numeric_t gapFreq[MAXCODES]; } distance_matrix_t; /* A transition matrix gives the instantaneous rate of change of frequencies df/dt = M . f which is solved by f(t) = exp(M) . f(0) and which is not a symmetric matrix because of non-uniform stationary frequencies stat, so that M stat = 0 M(i,j) is instantaneous rate of j -> i, not of i -> j S = diag(sqrt(stat)) is a correction so that M' = S**-1 M S is symmetric Let W L W**-1 = M' be an eigendecomposition of M' Because M' is symmetric, W can be a rotation, and W**-1 = t(W) Set V = S*W M = V L V**-1 is an eigendecomposition of M Note V**-1 = W**-1 S**-1 = t(W) S**-1 Evolution by time t is given by exp(M*t) = V exp(L*t) V**-1 P(A & B | t) = B . exp(M*t) . (A * stat) note this is *not* the same as P(A->B | t) and we can reduce some of the computations from O(a**2) to O(a) time, where a is the alphabet size, by storing frequency vectors as t(V) . f = t(W) . t(S) . f Then P(f0 & f1 | t) = f1 . exp(M*t) . f0 * (f0 . stat) = sum(r0j * r1j * exp(l_j*t)) where r0 and r1 are the transformed vectors Posterior distribution of P given children f0 and f1 is given by P(i | f0, f1, t0, t1) = stat * P(i->f0 | t0) * P(i->f1 | t1) = P(i & f0 | t0) * P(i & f1 | t1) / stat ~ (V . exp(t0*L) . r0) * (V . exp(t1*L) . r1) / stat When normalize this posterior distribution (to sum to 1), divide by stat, and transform by t(V) -- this is the "profile" of internal nodes To eliminate the O(N**2) step of transforming by t(V), if the posterior distribution of an amino acid is near 1 then we can approximate it by P(i) ~= (i==A) * w + nearP(i) * (1-w), where w is fit so that P(i==A) is correct nearP = Posterior(i | i, i, 0.1, 0.1) [0.1 is an arbitrary choice] and we confirm that the approximation works well before we use it. Given this parameter w we can set rotated_posterior = rotation(w * (i==A)/stat + (1-w) * nearP/stat) = codeFreq(A) * w/stat(A) + nearFreq(A) * (1-w) */ typedef struct { numeric_t stat[MAXCODES]; /* The stationary distribution */ numeric_t statinv[MAXCODES]; /* 1/stat */ /* the eigenmatrix, with the eigenvectors as columns and rotations of individual characters as rows. Also includes a NOCODE entry for gaps */ numeric_t codeFreq[NOCODE+1][MAXCODES]; numeric_t eigeninv[MAXCODES][MAXCODES]; /* Inverse of eigenmatrix */ numeric_t eigeninvT[MAXCODES][MAXCODES]; /* transpose of eigeninv */ numeric_t eigenval[MAXCODES]; /* Eigenvalues */ /* These are for approximate posteriors (off by default) */ numeric_t nearP[MAXCODES][MAXCODES]; /* nearP[i][j] = P(parent=j | both children are i, both lengths are 0.1 */ numeric_t nearFreq[MAXCODES][MAXCODES]; /* rotation of nearP/stat */ } transition_matrix_t; typedef struct { int nRateCategories; numeric_t *rates; /* 1 per rate category */ unsigned int *ratecat; /* 1 category per position */ } rates_t; typedef struct { /* The input */ int nSeq; int nPos; char **seqs; /* the aligment sequences array (not reallocated) */ distance_matrix_t *distance_matrix; /* a pointer (not reallocated), or NULL if using %identity distance */ transition_matrix_t *transmat; /* a pointer (is allocated), or NULL for Jukes-Cantor */ /* Topological constraints are represented for each sequence as binary characters with values of '0', '1', or '-' (for missing data) Sequences that have no constraint may have a NULL string */ int nConstraints; char **constraintSeqs; /* The profile data structures */ int maxnode; /* The next index to allocate */ int maxnodes; /* Space allocated in data structures below */ profile_t **profiles; /* Profiles of leaves and intermediate nodes */ numeric_t *diameter; /* To correct for distance "up" from children (if any) */ numeric_t *varDiameter; /* To correct variances for distance "up" */ numeric_t *selfdist; /* Saved for use in some formulas */ numeric_t *selfweight; /* Saved for use in some formulas */ /* Average profile of all active nodes, the "outprofile" * If all inputs are ungapped, this has weight 1 (not nSequences) at each position * The frequencies all sum to one (or that is implied by the eigen-representation) */ profile_t *outprofile; double totdiam; /* We sometimes use stale out-distances, so we remember what nActive was */ numeric_t *outDistances; /* Sum of distances to other active (parent==-1) nodes */ int *nOutDistActive; /* What nActive was when this outDistance was computed */ /* the inferred tree */ int root; /* index of the root. Unlike other internal nodes, it has 3 children */ int *parent; /* -1 or index of parent */ children_t *child; numeric_t *branchlength; /* Distance to parent */ numeric_t *support; /* 1 for high-confidence nodes */ /* auxilliary data for maximum likelihood (defaults to 1 category of rate=1.0) */ rates_t rates; } NJ_t; /* Uniquify sequences in an alignment -- map from indices in the alignment to unique indicies in a NJ_t */ typedef struct { int nSeq; int nUnique; int *uniqueFirst; /* iUnique -> iAln */ int *alnNext; /* iAln -> next, or -1 */ int *alnToUniq; /* iAln -> iUnique, or -1 if another was the exemplar */ char **uniqueSeq; /* indexed by iUniq -- points to strings allocated elsewhere */ } uniquify_t; /* Describes which switch to do */ typedef enum {ABvsCD,ACvsBD,ADvsBC} nni_t; /* A list of these describes a chain of NNI moves in a rooted tree, making up, in total, an SPR move */ typedef struct { int nodes[2]; double deltaLength; /* change in tree length for this step (lower is better) */ } spr_step_t; /* Keep track of hits for the top-hits heuristic without wasting memory j = -1 means empty If j is an inactive node, this may be replaced by that node's parent (and dist recomputed) */ typedef struct { int j; numeric_t dist; } hit_t; typedef struct { int nHits; /* the allocated and desired size; some of them may be empty */ hit_t *hits; int hitSource; /* where to refresh hits from if a 2nd-level top-hit list, or -1 */ int age; /* number of joins since a refresh */ } top_hits_list_t; typedef struct { int m; /* size of a full top hits list, usually sqrt(N) */ int q; /* size of a 2nd-level top hits, usually sqrt(m) */ int maxnodes; top_hits_list_t *top_hits_lists; /* one per node */ hit_t *visible; /* the "visible" (very best) hit for each node */ /* The top-visible set is a subset, usually of size m, of the visible set -- it is the set of joins to select from Each entry is either a node whose visible set entry has a good (low) criterion, or -1 for empty, or is an obsolete node (which is effectively the same). Whenever we update the visible set, should also call UpdateTopVisible() which ensures that none of the topvisible set are stale (that is, they all point to an active node). */ int nTopVisible; /* nTopVisible = m * topvisibleMult */ int *topvisible; int topvisibleAge; /* joins since the top-visible list was recomputed */ #ifdef OPENMP /* 1 lock to read or write any top hits list, no thread grabs more than one */ omp_lock_t *locks; #endif } top_hits_t; /* Global variables */ /* Options */ int verbose = 1; int showProgress = 1; int slow = 0; int fastest = 0; bool useTopHits2nd = false; /* use the second-level top hits heuristic? */ int bionj = 0; double tophitsMult = 1.0; /* 0 means compare nodes to all other nodes */ double tophitsClose = -1.0; /* Parameter for how close is close; also used as a coverage req. */ double topvisibleMult = 1.5; /* nTopVisible = m * topvisibleMult; 1 or 2 did not make much difference in either running time or accuracy so I chose a compromise. */ double tophitsRefresh = 0.8; /* Refresh if fraction of top-hit-length drops to this */ double tophits2Mult = 1.0; /* Second-level top heuristic -- only with -fastest */ int tophits2Safety = 3; /* Safety factor for second level of top-hits heuristic */ double tophits2Refresh = 0.6; /* Refresh 2nd-level top hits if drops down to this fraction of length */ double staleOutLimit = 0.01; /* nActive changes by at most this amount before we recompute an out-distance. (Only applies if using the top-hits heuristic) */ double fResetOutProfile = 0.02; /* Recompute out profile from scratch if nActive has changed by more than this proportion, and */ int nResetOutProfile = 200; /* nActive has also changed more than this amount */ int nCodes=20; /* 20 if protein, 4 if nucleotide */ bool useMatrix=true; /* If false, use %different as the uncorrected distance */ bool logdist = true; /* If true, do a log-correction (scoredist-like or Jukes-Cantor) but only during NNIs and support values, not during neighbor-joining */ double pseudoWeight = 0.0; /* The weight of pseudocounts to avoid artificial long branches when nearby sequences in the tree have little or no overlap (off by default). The prior distance is based on all overlapping positions among the quartet or triplet under consideration. The log correction takes place after the pseudocount is used. */ double constraintWeight = 100.0;/* Cost of violation of a topological constraint in evolutionary distance or likelihood */ double MEMinDelta = 1.0e-4; /* Changes of less than this in tree-length are discounted for purposes of identifying fixed subtrees */ bool fastNNI = true; bool gammaLogLk = false; /* compute gamma likelihood without reoptimizing branch lengths? */ /* Maximum likelihood options and constants */ /* These are used to rescale likelihood values and avoid taking a logarithm at each position */ const double LkUnderflow = 1.0e-4; const double LkUnderflowInv = 1.0e4; const double LogLkUnderflow = 9.21034037197618; /* -log(LkUnderflowInv) */ const double Log2 = 0.693147180559945; /* These are used to limit the optimization of branch lengths. Also very short branch lengths can create numerical problems. In version 2.1.7., the minimum branch lengths (MLMinBranchLength and MLMinRelBranchLength) were increased to prevent numerical problems in rare cases. If compiled with -DUSE_DOUBLE then these minimums could be decreased. */ const double MLMinBranchLengthTolerance = 1.0e-4; /* absolute tolerance for optimizing branch lengths */ const double MLFTolBranchLength = 0.001; /* fractional tolerance for optimizing branch lengths */ const double MLMinBranchLength = 5.0e-4; const double MLMinRelBranchLength = 2.5e-4; /* minimum of rate * length */ int mlAccuracy = 1; /* Rounds of optimization of branch lengths; 1 means do 2nd round only if close */ double closeLogLkLimit = 5.0; /* If partial optimization of an NNI looks like it would decrease the log likelihood by this much or more then do not optimize it further */ double treeLogLkDelta = 0.1; /* Give up if tree log-lk changes by less than this; NNIs that change likelihood by less than this also are considered unimportant by some heuristics */ bool exactML = true; /* Exact or approximate posterior distributions for a.a.s */ double approxMLminf = 0.95; /* Only try to approximate posterior distributions if max. value is at least this high */ double approxMLminratio = 2/3.0;/* Ratio of approximated/true posterior values must be at least this high */ double approxMLnearT = 0.2; /* 2nd component of near-constant posterior distribution uses this time scale */ const int nDefaultRateCats = 20; /* Performance and memory usage */ long profileOps = 0; /* Full profile-based distance operations */ long outprofileOps = 0; /* How many of profileOps are comparisons to outprofile */ long seqOps = 0; /* Faster leaf-based distance operations */ long profileAvgOps = 0; /* Number of profile-average steps */ long nHillBetter = 0; /* Number of hill-climbing steps */ long nCloseUsed = 0; /* Number of "close" neighbors we avoid full search for */ long nClose2Used = 0; /* Number of "close" neighbors we use 2nd-level top hits for */ long nRefreshTopHits = 0; /* Number of full-blown searches (interior nodes) */ long nVisibleUpdate = 0; /* Number of updates of the visible set */ long nNNI = 0; /* Number of NNI changes performed */ long nSPR = 0; /* Number of SPR changes performed */ long nML_NNI = 0; /* Number of max-lik. NNI changes performed */ long nSuboptimalSplits = 0; /* # of splits that are rejected given final tree (during bootstrap) */ long nSuboptimalConstrained = 0; /* Bad splits that are due to constraints */ long nConstraintViolations = 0; /* Number of constraint violations */ long nProfileFreqAlloc = 0; long nProfileFreqAvoid = 0; long szAllAlloc = 0; long mymallocUsed = 0; /* useful allocations by mymalloc */ long maxmallocHeap = 0; /* Maximum of mi.arena+mi.hblkhd from mallinfo (actual mem usage) */ long nLkCompute = 0; /* # of likelihood computations for pairs of probability vectors */ long nPosteriorCompute = 0; /* # of computations of posterior probabilities */ long nAAPosteriorExact = 0; /* # of times compute exact AA posterior */ long nAAPosteriorRough = 0; /* # of times use rough approximation */ long nStarTests = 0; /* # of times we use star test to avoid testing an NNI */ /* Protein character set */ unsigned char *codesStringAA = (unsigned char*) "ARNDCQEGHILKMFPSTWYV"; unsigned char *codesStringNT = (unsigned char*) "ACGT"; unsigned char *codesString = NULL; distance_matrix_t *ReadDistanceMatrix(char *prefix); void SetupDistanceMatrix(/*IN/OUT*/distance_matrix_t *); /* set eigentot, codeFreq, gapFreq */ void ReadMatrix(char *filename, /*OUT*/numeric_t codes[MAXCODES][MAXCODES], bool check_codes); void ReadVector(char *filename, /*OUT*/numeric_t codes[MAXCODES]); alignment_t *ReadAlignment(/*READ*/FILE *fp, bool bQuote); /* Returns a list of strings (exits on failure) */ alignment_t *FreeAlignment(alignment_t *); /* returns NULL */ void FreeAlignmentSeqs(/*IN/OUT*/alignment_t *); /* Takes as input the transpose of the matrix V, with i -> j This routine takes care of setting the diagonals */ transition_matrix_t *CreateTransitionMatrix(/*IN*/double matrix[MAXCODES][MAXCODES], /*IN*/double stat[MAXCODES]); transition_matrix_t *CreateGTR(double *gtrrates/*ac,ag,at,cg,ct,gt*/, double *gtrfreq/*ACGT*/); /* For converting profiles from 1 rotation to another, or converts NULL to NULL */ distance_matrix_t *TransMatToDistanceMat(transition_matrix_t *transmat); /* Allocates memory, initializes leaf profiles */ NJ_t *InitNJ(char **sequences, int nSeqs, int nPos, /*IN OPTIONAL*/char **constraintSeqs, int nConstraints, /*IN OPTIONAL*/distance_matrix_t *, /*IN OPTIONAL*/transition_matrix_t *); NJ_t *FreeNJ(NJ_t *NJ); /* returns NULL */ void FastNJ(/*IN/OUT*/NJ_t *NJ); /* Does the joins */ void ReliabilityNJ(/*IN/OUT*/NJ_t *NJ, int nBootstrap); /* Estimates the reliability of the joins */ /* nni_stats_t is meaningless for leaves and root, so all of those entries will just be high (for age) or 0 (for delta) */ typedef struct { int age; /* number of rounds since this node was modified by an NNI */ int subtreeAge; /* number of rounds since self or descendent had a significant improvement */ double delta; /* improvement in score for this node (or 0 if no change) */ double support; /* improvement of score for self over better of alternatives */ } nni_stats_t; /* One round of nearest-neighbor interchanges according to the minimum-evolution or approximate maximum-likelihood criterion. If doing maximum likelihood then this modifies the branch lengths. age is the # of rounds since a node was NNId Returns the # of topological changes performed */ int NNI(/*IN/OUT*/NJ_t *NJ, int iRound, int nRounds, bool useML, /*IN/OUT*/nni_stats_t *stats, /*OUT*/double *maxDeltaCriterion); nni_stats_t *InitNNIStats(NJ_t *NJ); nni_stats_t *FreeNNIStats(nni_stats_t *, NJ_t *NJ); /* returns NULL */ /* One round of subtree-prune-regraft moves (minimum evolution) */ void SPR(/*IN/OUT*/NJ_t *NJ, int maxSPRLength, int iRound, int nRounds); /* Recomputes all branch lengths by minimum evolution criterion*/ void UpdateBranchLengths(/*IN/OUT*/NJ_t *NJ); /* Recomputes all branch lengths and, optionally, internal profiles */ double TreeLength(/*IN/OUT*/NJ_t *NJ, bool recomputeProfiles); typedef struct { int nBadSplits; int nConstraintViolations; int nBadBoth; int nSplits; /* How much length would be reduce or likelihood would be increased by the best NNI we find (the worst "miss") */ double dWorstDeltaUnconstrained; double dWorstDeltaConstrained; } SplitCount_t; void TestSplitsMinEvo(NJ_t *NJ, /*OUT*/SplitCount_t *splitcount); /* Sets SH-like support values if nBootstrap>0 */ void TestSplitsML(/*IN/OUT*/NJ_t *NJ, /*OUT*/SplitCount_t *splitcount, int nBootstrap); /* Pick columns for resampling, stored as returned_vector[iBoot*nPos + j] */ int *ResampleColumns(int nPos, int nBootstrap); /* Use out-profile and NJ->totdiam to recompute out-distance for node iNode Only does this computation if the out-distance is "stale" (nOutDistActive[iNode] != nActive) Note "IN/UPDATE" for NJ always means that we may update out-distances but otherwise make no changes. */ void SetOutDistance(/*IN/UPDATE*/NJ_t *NJ, int iNode, int nActive); /* Always sets join->criterion; may update NJ->outDistance and NJ->nOutDistActive, assumes join's weight and distance are already set, and that the constraint penalty (if any) is included in the distance */ void SetCriterion(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *join); /* Computes weight and distance (which includes the constraint penalty) and then sets the criterion (maybe update out-distances) */ void SetDistCriterion(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *join); /* If join->i or join->j are inactive nodes, replaces them with their active ancestors. After doing this, if i == j, or either is -1, sets weight to 0 and dist and criterion to 1e20 and returns false (not a valid join) Otherwise, if i or j changed, recomputes the distance and criterion. Note that if i and j are unchanged then the criterion could be stale If bUpdateDist is false, and i or j change, then it just sets dist to a negative number */ bool UpdateBestHit(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *join, bool bUpdateDist); /* This recomputes the criterion, or returns false if the visible node is no longer active. */ bool GetVisible(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/top_hits_t *tophits, int iNode, /*OUT*/besthit_t *visible); int ActiveAncestor(/*IN*/NJ_t *NJ, int node); /* Compute the constraint penalty for a join. This is added to the "distance" by SetCriterion */ int JoinConstraintPenalty(/*IN*/NJ_t *NJ, int node1, int node2); int JoinConstraintPenaltyPiece(NJ_t *NJ, int node1, int node2, int iConstraint); /* Helper function for computing the number of constraints violated by a split, represented as counts of on and off on each side */ int SplitConstraintPenalty(int nOn1, int nOff1, int nOn2, int nOff2); /* Reports the (min. evo.) support for the (1,2) vs. (3,4) split col[iBoot*nPos+j] is column j for bootstrap iBoot */ double SplitSupport(profile_t *p1, profile_t *p2, profile_t *p3, profile_t *p4, /*OPTIONAL*/distance_matrix_t *dmat, int nPos, int nBootstrap, int *col); /* Returns SH-like support given resampling spec. (in col) and site likelihods for the three quartets */ double SHSupport(int nPos, int nBoostrap, int *col, double loglk[3], double *site_likelihoods[3]); profile_t *SeqToProfile(/*IN/OUT*/NJ_t *NJ, char *seq, int nPos, /*OPTIONAL*/char *constraintSeqs, int nConstraints, int iNode, unsigned long counts[256]); /* ProfileDist and SeqDist only set the dist and weight fields If using an outprofile, use the second argument of ProfileDist for better performance. These produce uncorrected distances. */ void ProfileDist(profile_t *profile1, profile_t *profile2, int nPos, /*OPTIONAL*/distance_matrix_t *distance_matrix, /*OUT*/besthit_t *hit); void SeqDist(unsigned char *codes1, unsigned char *codes2, int nPos, /*OPTIONAL*/distance_matrix_t *distance_matrix, /*OUT*/besthit_t *hit); /* Computes all pairs of profile distances, applies pseudocounts if pseudoWeight > 0, and applies log-correction if logdist is true. The lower index is compared to the higher index, e.g. for profiles A,B,C,D the comparison will be as in quartet_pair_t */ typedef enum {qAB,qAC,qAD,qBC,qBD,qCD} quartet_pair_t; void CorrectedPairDistances(profile_t **profiles, int nProfiles, /*OPTIONAL*/distance_matrix_t *distance_matrix, int nPos, /*OUT*/double *distances); /* output is indexed by nni_t To ensure good behavior while evaluating a subtree-prune-regraft move as a series of nearest-neighbor interchanges, this uses a distance-ish model of constraints, as given by PairConstraintDistance(), rather than counting the number of violated splits (which is what FastTree does during neighbor-joining). Thus, penalty values may well be >0 even if no constraints are violated, but the relative scores for the three NNIs will be correct. */ void QuartetConstraintPenalties(profile_t *profiles[4], int nConstraints, /*OUT*/double d[3]); double PairConstraintDistance(int nOn1, int nOff1, int nOn2, int nOff2); /* the split is consistent with the constraint if any of the profiles have no data or if three of the profiles have the same uniform value (all on or all off) or if AB|CD = 00|11 or 11|00 (all uniform) */ bool SplitViolatesConstraint(profile_t *profiles[4], int iConstraint); /* If false, no values were set because this constraint was not relevant. output is for the 3 splits */ bool QuartetConstraintPenaltiesPiece(profile_t *profiles[4], int iConstraint, /*OUT*/double penalty[3]); /* Apply Jukes-Cantor or scoredist-like log(1-d) transform to correct the distance for multiple substitutions. */ double LogCorrect(double distance); /* AverageProfile is used to do a weighted combination of nodes when doing a join. If weight is negative, then the value is ignored and the profiles are averaged. The weight is *not* adjusted for the gap content of the nodes. Also, the weight does not affect the representation of the constraints */ profile_t *AverageProfile(profile_t *profile1, profile_t *profile2, int nPos, int nConstraints, distance_matrix_t *distance_matrix, double weight1); /* PosteriorProfile() is like AverageProfile() but it computes posterior probabilities rather than an average */ profile_t *PosteriorProfile(profile_t *profile1, profile_t *profile2, double len1, double len2, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, int nPos, int nConstraints); /* Set a node's profile from its children. Deletes the previous profile if it exists Use -1.0 for a balanced join Fails unless the node has two children (e.g., no leaves or root) */ void SetProfile(/*IN/OUT*/NJ_t *NJ, int node, double weight1); /* OutProfile does an unweighted combination of nodes to create the out-profile. It always sets code to NOCODE so that UpdateOutProfile can work. */ profile_t *OutProfile(profile_t **profiles, int nProfiles, int nPos, int nConstraints, distance_matrix_t *distance_matrix); void UpdateOutProfile(/*UPDATE*/profile_t *out, profile_t *old1, profile_t *old2, profile_t *new, int nActiveOld, int nPos, int nConstraints, distance_matrix_t *distance_matrix); profile_t *NewProfile(int nPos, int nConstraints); /* returned has no vectors */ profile_t *FreeProfile(profile_t *profile, int nPos, int nConstraints); /* returns NULL */ void AllocRateCategories(/*IN/OUT*/rates_t *rates, int nRateCategories, int nPos); /* f1 can be NULL if code1 != NOCODE, and similarly for f2 Or, if (say) weight1 was 0, then can have code1==NOCODE *and* f1==NULL In that case, returns an arbitrary large number. */ double ProfileDistPiece(unsigned int code1, unsigned int code2, numeric_t *f1, numeric_t *f2, /*OPTIONAL*/distance_matrix_t *dmat, /*OPTIONAL*/numeric_t *codeDist2); /* Adds (or subtracts, if weight is negative) fIn/codeIn from fOut fOut is assumed to exist (as from an outprofile) do not call unless weight of input profile > 0 */ void AddToFreq(/*IN/OUT*/numeric_t *fOut, double weight, unsigned int codeIn, /*OPTIONAL*/numeric_t *fIn, /*OPTIONAL*/distance_matrix_t *dmat); /* Divide the vector (of length nCodes) by a constant so that the total (unrotated) frequency is 1.0 */ void NormalizeFreq(/*IN/OUT*/numeric_t *freq, distance_matrix_t *distance_matrix); /* Allocate, if necessary, and recompute the codeDist*/ void SetCodeDist(/*IN/OUT*/profile_t *profile, int nPos, distance_matrix_t *dmat); /* The allhits list contains the distances of the node to all other active nodes This is useful for the "reset" improvement to the visible set Note that the following routines do not handle the tophits heuristic and assume that out-distances are up to date. */ void SetBestHit(int node, NJ_t *NJ, int nActive, /*OUT*/besthit_t *bestjoin, /*OUT OPTIONAL*/besthit_t *allhits); void ExhaustiveNJSearch(NJ_t *NJ, int nActive, /*OUT*/besthit_t *bestjoin); /* Searches the visible set */ void FastNJSearch(NJ_t *NJ, int nActive, /*UPDATE*/besthit_t *visible, /*OUT*/besthit_t *bestjoin); /* Subroutines for handling the tophits heuristic */ top_hits_t *InitTopHits(NJ_t *NJ, int m); top_hits_t *FreeTopHits(top_hits_t *tophits); /* returns NULL */ /* Before we do any joins -- sets tophits and visible NJ may be modified by setting out-distances */ void SetAllLeafTopHits(/*IN/UPDATE*/NJ_t *NJ, /*IN/OUT*/top_hits_t *tophits); /* Find the best join to do. */ void TopHitNJSearch(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/top_hits_t *tophits, /*OUT*/besthit_t *bestjoin); /* Returns the best hit within top hits NJ may be modified because it updates out-distances if they are too stale Does *not* update visible set */ void GetBestFromTopHits(int iNode, /*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN*/top_hits_t *tophits, /*OUT*/besthit_t *bestjoin); /* visible set is modifiable so that we can reset it more globally when we do a "refresh", but we also set the visible set for newnode and do any "reset" updates too. And, we update many outdistances. */ void TopHitJoin(int newnode, /*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/top_hits_t *tophits); /* Sort the input besthits by criterion and save the best nOut hits as a new array in top_hits_lists Does not update criterion or out-distances Ignores (silently removes) hit to self Saved list may be shorter than requested if there are insufficient entries */ void SortSaveBestHits(int iNode, /*IN/SORT*/besthit_t *besthits, int nIn, int nOut, /*IN/OUT*/top_hits_t *tophits); /* Given candidate hits from one node, "transfer" them to another node: Stores them in a new place in the same order searches up to active nodes if hits involve non-active nodes If update flag is set, it also recomputes distance and criterion (and ensures that out-distances are updated); otherwise it sets dist to -1e20 and criterion to 1e20 */ void TransferBestHits(/*IN/UPDATE*/NJ_t *NJ, int nActive, int iNode, /*IN*/besthit_t *oldhits, int nOldHits, /*OUT*/besthit_t *newhits, bool updateDistance); /* Create best hit objects from 1 or more hits. Do not update out-distances or set criteria */ void HitsToBestHits(/*IN*/hit_t *hits, int nHits, int iNode, /*OUT*/besthit_t *newhits); besthit_t HitToBestHit(int i, hit_t hit); /* Given a set of besthit entries, look for improvements to the visible set of the j entries. Updates out-distances as it goes. Also replaces stale nodes with this node, because a join is usually how this happens (i.e. it does not need to walk up to ancestors). Note this calls UpdateTopVisible() on any change */ void UpdateVisible(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN*/besthit_t *tophitsNode, int nTopHits, /*IN/OUT*/top_hits_t *tophits); /* Update the top-visible list to perhaps include this hit (O(sqrt(N)) time) */ void UpdateTopVisible(/*IN*/NJ_t * NJ, int nActive, int iNode, /*IN*/hit_t *hit, /*IN/OUT*/top_hits_t *tophits); /* Recompute the top-visible subset of the visible set */ void ResetTopVisible(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/top_hits_t *tophits); /* Make a shorter list with only unique entries. Replaces any "dead" hits to nodes that have parents with their active ancestors and ignores any that become dead. Updates all criteria. Combined gets sorted by i & j The returned list is allocated to nCombined even though only *nUniqueOut entries are filled */ besthit_t *UniqueBestHits(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/SORT*/besthit_t *combined, int nCombined, /*OUT*/int *nUniqueOut); nni_t ChooseNNI(profile_t *profiles[4], /*OPTIONAL*/distance_matrix_t *dmat, int nPos, int nConstraints, /*OUT*/double criteria[3]); /* The three internal branch lengths or log likelihoods*/ /* length[] is ordered as described by quartet_length_t, but after we do the swap of B with C (to give AC|BD) or B with D (to get AD|BC), if that is the returned choice bFast means do not consider NNIs if AB|CD is noticeably better than the star topology (as implemented by MLQuartetOptimize). If there are constraints, then the constraint penalty is included in criteria[] */ nni_t MLQuartetNNI(profile_t *profiles[4], /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, int nPos, int nConstraints, /*OUT*/double criteria[3], /* The three potential quartet log-likelihoods */ /*IN/OUT*/numeric_t length[5], bool bFast); void OptimizeAllBranchLengths(/*IN/OUT*/NJ_t *NJ); double TreeLogLk(/*IN*/NJ_t *NJ, /*OPTIONAL OUT*/double *site_loglk); double MLQuartetLogLk(profile_t *pA, profile_t *pB, profile_t *pC, profile_t *pD, int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, /*IN*/double branch_lengths[5], /*OPTIONAL OUT*/double *site_likelihoods); /* Given a topology and branch lengths, estimate rates & recompute profiles */ void SetMLRates(/*IN/OUT*/NJ_t *NJ, int nRateCategories); /* Returns a set of nRateCategories potential rates; the caller must free it */ numeric_t *MLSiteRates(int nRateCategories); /* returns site_loglk so that site_loglk[nPos*iRate + j] is the log likelihood of site j with rate iRate The caller must free it. */ double *MLSiteLikelihoodsByRate(/*IN*/NJ_t *NJ, /*IN*/numeric_t *rates, int nRateCategories); typedef struct { double mult; /* multiplier for the rates / divisor for the tree-length */ double alpha; int nPos; int nRateCats; numeric_t *rates; double *site_loglk; } siteratelk_t; double GammaLogLk(/*IN*/siteratelk_t *s, /*OPTIONAL OUT*/double *gamma_loglk_sites); /* Input site_loglk must be for each rate. Note that FastTree does not reoptimize the branch lengths under the Gamma model -- it optimizes the overall scale. Reports the gamma log likelihhod (and logs site likelihoods if fpLog is set), and reports the rescaling value. */ double RescaleGammaLogLk(int nPos, int nRateCats, /*IN*/numeric_t *rates, /*IN*/double *site_loglk, /*OPTIONAL*/FILE *fpLog); /* P(value<=x) for the gamma distribution with shape parameter alpha and scale 1/alpha */ double PGamma(double x, double alpha); /* Given a topology and branch lengths, optimize GTR rates and quickly reoptimize branch lengths If gtrfreq is NULL, then empirical frequencies are used */ void SetMLGtr(/*IN/OUT*/NJ_t *NJ, /*OPTIONAL IN*/double *gtrfreq, /*OPTIONAL WRITE*/FILE *fpLog); /* P(A & B | len) = P(B | A, len) * P(A) If site_likelihoods is present, multiplies those values by the site likelihood at each point (Note it does not handle underflow) */ double PairLogLk(/*IN*/profile_t *p1, /*IN*/profile_t *p2, double length, int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, /*OPTIONAL IN/OUT*/double *site_likelihoods); /* Branch lengths for 4-taxon tree ((A,B),C,D); I means internal */ typedef enum {LEN_A,LEN_B,LEN_C,LEN_D,LEN_I} quartet_length_t; typedef struct { int nPos; transition_matrix_t *transmat; rates_t *rates; int nEval; /* number of likelihood evaluations */ /* The pair to optimize */ profile_t *pair1; profile_t *pair2; } quartet_opt_t; double PairNegLogLk(double x, void *data); /* data must be a quartet_opt_t */ typedef struct { NJ_t *NJ; double freq[4]; double rates[6]; int iRate; /* which rate to set x from */ } gtr_opt_t; /* Returns -log_likelihood for the tree with the given rates data must be a gtr_opt_t and x is used to set rate iRate Does not recompute profiles -- assumes that the caller will */ double GTRNegLogLk(double x, void *data); /* Returns the resulting log likelihood. Optionally returns whether other topologies should be abandoned, based on the difference between AB|CD and the "star topology" (AB|CD with a branch length of MLMinBranchLength) exceeding closeLogLkLimit. If bStarTest is passed in, it only optimized the internal branch if the star test is true. Otherwise, it optimized all 5 branch lengths in turn. */ double MLQuartetOptimize(profile_t *pA, profile_t *pB, profile_t *pC, profile_t *pD, int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, /*IN/OUT*/double branch_lengths[5], /*OPTIONAL OUT*/bool *pStarTest, /*OPTIONAL OUT*/double *site_likelihoods); /* Returns the resulting log likelihood */ double MLPairOptimize(profile_t *pA, profile_t *pB, int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, /*IN/OUT*/double *branch_length); /* Returns the number of steps considered, with the actual steps in steps[] Modifies the tree by this chain of NNIs */ int FindSPRSteps(/*IN/OUT*/NJ_t *NJ, int node, int parent, /* sibling or parent of node to NNI to start the chain */ /*IN/OUT*/profile_t **upProfiles, /*OUT*/spr_step_t *steps, int maxSteps, bool bFirstAC); /* Undo a single NNI */ void UnwindSPRStep(/*IN/OUT*/NJ_t *NJ, /*IN*/spr_step_t *step, /*IN/OUT*/profile_t **upProfiles); /* Update the profile of node and its ancestor, and delete nearby out-profiles */ void UpdateForNNI(/*IN/OUT*/NJ_t *NJ, int node, /*IN/OUT*/profile_t **upProfiles, bool useML); /* Sets NJ->parent[newchild] and replaces oldchild with newchild in the list of children of parent */ void ReplaceChild(/*IN/OUT*/NJ_t *NJ, int parent, int oldchild, int newchild); int CompareHitsByCriterion(const void *c1, const void *c2); int CompareHitsByIJ(const void *c1, const void *c2); int NGaps(NJ_t *NJ, int node); /* only handles leaf sequences */ /* node is the parent of AB, sibling of C node cannot be root or a leaf If node is the child of root, then D is the other sibling of node, and the 4th profile is D's profile. Otherwise, D is the parent of node, and we use its upprofile Call this with profiles=NULL to get the nodes, without fetching or computing profiles */ void SetupABCD(NJ_t *NJ, int node, /* the 4 profiles for ABCD; the last one is an upprofile */ /*OPTIONAL OUT*/profile_t *profiles[4], /*OPTIONAL IN/OUT*/profile_t **upProfiles, /*OUT*/int nodeABCD[4], bool useML); int Sibling(NJ_t *NJ, int node); /* At root, no unique sibling so returns -1 */ void RootSiblings(NJ_t *NJ, int node, /*OUT*/int sibs[2]); /* JC probability of nucleotide not changing, for each rate category */ double *PSameVector(double length, rates_t *rates); /* JC probability of nucleotide not changing, for each rate category */ double *PDiffVector(double *pSame, rates_t *rates); /* expeigen[iRate*nCodes + j] = exp(length * rate iRate * eigenvalue j) */ numeric_t *ExpEigenRates(double length, transition_matrix_t *transmat, rates_t *rates); /* Print a progress report if more than 0.1 second has gone by since the progress report */ /* Format should include 0-4 %d references and no newlines */ void ProgressReport(char *format, int iArg1, int iArg2, int iArg3, int iArg4); void LogTree(char *format, int round, /*OPTIONAL WRITE*/FILE *fp, NJ_t *NJ, char **names, uniquify_t *unique, bool bQuote); void LogMLRates(/*OPTIONAL WRITE*/FILE *fpLog, NJ_t *NJ); void *mymalloc(size_t sz); /* Prints "Out of memory" and exits on failure */ void *myfree(void *, size_t sz); /* Always returns NULL */ /* One-dimensional minimization using brent's function, with a fractional and an absolute tolerance */ double onedimenmin(double xmin, double xguess, double xmax, double (*f)(double,void*), void *data, double ftol, double atol, /*OUT*/double *fx, /*OUT*/double *f2x); double brent(double ax, double bx, double cx, double (*f)(double, void *), void *data, double ftol, double atol, double *foptx, double *f2optx, double fax, double fbx, double fcx); /* Vector operations, either using SSE3 or not Code assumes that vectors are a multiple of 4 in size */ void vector_multiply(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, int n, /*OUT*/numeric_t *fOut); numeric_t vector_multiply_sum(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, int n); void vector_add_mult(/*IN/OUT*/numeric_t *f, /*IN*/numeric_t *add, numeric_t weight, int n); /* multiply the transpose of a matrix by a vector */ void matrixt_by_vector4(/*IN*/numeric_t mat[4][MAXCODES], /*IN*/numeric_t vec[4], /*OUT*/numeric_t out[4]); /* sum(f1*fBy)*sum(f2*fBy) */ numeric_t vector_dot_product_rot(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, /*IN*/numeric_t* fBy, int n); /* sum(f1*f2*f3) */ numeric_t vector_multiply3_sum(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, /*IN*/numeric_t* f3, int n); numeric_t vector_sum(/*IN*/numeric_t *f1, int n); void vector_multiply_by(/*IN/OUT*/numeric_t *f, /*IN*/numeric_t fBy, int n); double clockDiff(/*IN*/struct timeval *clock_start); int timeval_subtract (/*OUT*/struct timeval *result, /*IN*/struct timeval *x, /*IN*/struct timeval *y); char *OpenMPString(void); void ran_start(long seed); double knuth_rand(); /* Random number between 0 and 1 */ void tred2 (double *a, const int n, const int np, double *d, double *e); double pythag(double a, double b); void tqli(double *d, double *e, int n, int np, double *z); /* Like mymalloc; duplicates the input (returns NULL if given NULL) */ void *mymemdup(void *data, size_t sz); void *myrealloc(void *data, size_t szOld, size_t szNew, bool bCopy); double pnorm(double z); /* Probability(value <=z) */ /* Hashtable functions */ typedef struct { char *string; int nCount; /* number of times this entry was seen */ int first; /* index of first entry with this value */ } hashbucket_t; typedef struct { int nBuckets; /* hashvalue -> bucket. Or look in bucket + 1, +2, etc., till you hit a NULL string */ hashbucket_t *buckets; } hashstrings_t; typedef int hashiterator_t; hashstrings_t *MakeHashtable(char **strings, int nStrings); hashstrings_t *FreeHashtable(hashstrings_t* hash); /*returns NULL*/ hashiterator_t FindMatch(hashstrings_t *hash, char *string); /* Return NULL if we have run out of values */ char *GetHashString(hashstrings_t *hash, hashiterator_t hi); int HashCount(hashstrings_t *hash, hashiterator_t hi); int HashFirst(hashstrings_t *hash, hashiterator_t hi); void PrintNJ(/*WRITE*/FILE *, NJ_t *NJ, char **names, uniquify_t *unique, bool bShowSupport, bool bQuoteNames); /* Print topology using node indices as node names */ void PrintNJInternal(/*WRITE*/FILE *, NJ_t *NJ, bool useLen); uniquify_t *UniquifyAln(/*IN*/alignment_t *aln); uniquify_t *FreeUniquify(uniquify_t *); /* returns NULL */ /* Convert a constraint alignment to a list of sequences. The returned array is indexed by iUnique and points to values in the input alignment */ char **AlnToConstraints(alignment_t *constraints, uniquify_t *unique, hashstrings_t *hashnames); /* ReadTree ignores non-unique leaves after the first instance. At the end, it prunes the tree to ignore empty children and it unroots the tree if necessary. */ void ReadTree(/*IN/OUT*/NJ_t *NJ, /*IN*/uniquify_t *unique, /*IN*/hashstrings_t *hashnames, /*READ*/FILE *fpInTree); char *ReadTreeToken(/*READ*/FILE *fp); /* returns a static array, or NULL on EOF */ void ReadTreeAddChild(int parent, int child, /*IN/OUT*/int *parents, /*IN/OUT*/children_t *children); /* Do not add the leaf if we already set this unique-set to another parent */ void ReadTreeMaybeAddLeaf(int parent, char *name, hashstrings_t *hashnames, uniquify_t *unique, /*IN/OUT*/int *parents, /*IN/OUT*/children_t *children); void ReadTreeRemove(/*IN/OUT*/int *parents, /*IN/OUT*/children_t *children, int node); /* Routines to support tree traversal and prevent visiting a node >1 time (esp. if topology changes). */ typedef bool *traversal_t; traversal_t InitTraversal(NJ_t*); void SkipTraversalInto(int node, /*IN/OUT*/traversal_t traversal); traversal_t FreeTraversal(traversal_t, NJ_t*); /*returns NULL*/ /* returns new node, or -1 if nothing left to do. Use root for the first call. Will return every node and then root. Uses postorder tree traversal (depth-first search going down to leaves first) Keeps track of which nodes are visited, so even after an NNI that swaps a visited child with an unvisited uncle, the next call will visit the was-uncle-now-child. (However, after SPR moves, there is no such guarantee.) If pUp is not NULL, then, if going "back up" through a previously visited node (presumably due to an NNI), then it will return the node another time, with *pUp = true. */ int TraversePostorder(int lastnode, NJ_t *NJ, /*IN/OUT*/traversal_t, /*OUT OPTIONAL*/bool *pUp); /* Routines to support storing up-profiles during tree traversal Eventually these should be smart enough to do weighted joins and to minimize memory usage */ profile_t **UpProfiles(NJ_t *NJ); profile_t *GetUpProfile(/*IN/OUT*/profile_t **upProfiles, NJ_t *NJ, int node, bool useML); profile_t *DeleteUpProfile(/*IN/OUT*/profile_t **upProfiles, NJ_t *NJ, int node); /* returns NULL */ profile_t **FreeUpProfiles(profile_t **upProfiles, NJ_t *NJ); /* returns NULL */ /* Recomputes the profile for a node, presumably to reflect topology changes If bionj is set, does a weighted join -- which requires using upProfiles If useML is set, computes the posterior probability instead of averaging */ void RecomputeProfile(/*IN/OUT*/NJ_t *NJ, /*IN/OUT*/profile_t **upProfiles, int node, bool useML); /* Recompute profiles going up from the leaves, using the provided distance matrix and unweighted joins */ void RecomputeProfiles(/*IN/OUT*/NJ_t *NJ, /*OPTIONAL*/distance_matrix_t *dmat); void RecomputeMLProfiles(/*IN/OUT*/NJ_t *NJ); /* If bionj is set, computes the weight to be given to A when computing the profile for the ancestor of A and B. C and D are the other profiles in the quartet If bionj is not set, returns -1 (which means unweighted in AverageProfile). (A and B are the first two profiles in the array) */ double QuartetWeight(profile_t *profiles[4], distance_matrix_t *dmat, int nPos); /* Returns a list of nodes, starting with node and ending with root */ int *PathToRoot(NJ_t *NJ, int node, /*OUT*/int *depth); int *FreePath(int *path, NJ_t *NJ); /* returns NULL */ /* The default amino acid distance matrix, derived from the BLOSUM45 similarity matrix */ distance_matrix_t matrixBLOSUM45; /* The default amino acid transition matrix (Jones Taylor Thorton 1992) */ double matrixJTT92[MAXCODES][MAXCODES]; double statJTT92[MAXCODES]; /* The WAG amino acid transition matrix (Whelan-And-Goldman 2001) */ double matrixWAG01[MAXCODES][MAXCODES]; double statWAG01[MAXCODES]; int main(int argc, char **argv) { int nAlign = 1; /* number of alignments to read */ int iArg; char *matrixPrefix = NULL; distance_matrix_t *distance_matrix = NULL; bool make_matrix = false; char *constraintsFile = NULL; char *intreeFile = NULL; bool intree1 = false; /* the same starting tree each round */ int nni = -1; /* number of rounds of NNI, defaults to 4*log2(n) */ int spr = 2; /* number of rounds of SPR */ int maxSPRLength = 10; /* maximum distance to move a node */ int MLnni = -1; /* number of rounds of ML NNI, defaults to 2*log2(n) */ bool MLlen = false; /* optimize branch lengths; no topology changes */ int nBootstrap = 1000; /* If set, number of replicates of local bootstrap to do */ int nRateCats = nDefaultRateCats; char *logfile = NULL; bool bUseGtr = false; bool bUseWag = false; bool bUseGtrRates = false; double gtrrates[6] = {1,1,1,1,1,1}; bool bUseGtrFreq = false; double gtrfreq[4] = {0.25,0.25,0.25,0.25}; bool bQuote = false; FILE *fpOut = stdout; if (isatty(STDIN_FILENO) && argc == 1) { fprintf(stderr,"Usage for FastTree version %s %s%s:\n%s", FT_VERSION, SSE_STRING, OpenMPString(), usage); #if (defined _WIN32 || defined WIN32 || defined WIN64 || defined _WIN64) fprintf(stderr, "Windows users: Please remember to run this inside a command shell\n"); fprintf(stderr,"Hit return to continue\n"); fgetc(stdin); #endif exit(0); } for (iArg = 1; iArg < argc; iArg++) { if (strcmp(argv[iArg],"-makematrix") == 0) { make_matrix = true; } else if (strcmp(argv[iArg],"-logdist") == 0) { fprintf(stderr, "Warning: logdist is now on by default and obsolete\n"); } else if (strcmp(argv[iArg],"-rawdist") == 0) { logdist = false; } else if (strcmp(argv[iArg],"-verbose") == 0 && iArg < argc-1) { verbose = atoi(argv[++iArg]); } else if (strcmp(argv[iArg],"-quiet") == 0) { verbose = 0; showProgress = 0; } else if (strcmp(argv[iArg],"-nopr") == 0) { showProgress = 0; } else if (strcmp(argv[iArg],"-slow") == 0) { slow = 1; } else if (strcmp(argv[iArg],"-fastest") == 0) { fastest = 1; tophitsRefresh = 0.5; useTopHits2nd = true; } else if (strcmp(argv[iArg],"-2nd") == 0) { useTopHits2nd = true; } else if (strcmp(argv[iArg],"-no2nd") == 0) { useTopHits2nd = false; } else if (strcmp(argv[iArg],"-slownni") == 0) { fastNNI = false; } else if (strcmp(argv[iArg], "-matrix") == 0 && iArg < argc-1) { iArg++; matrixPrefix = argv[iArg]; } else if (strcmp(argv[iArg], "-nomatrix") == 0) { useMatrix = false; } else if (strcmp(argv[iArg], "-n") == 0 && iArg < argc-1) { iArg++; nAlign = atoi(argv[iArg]); if (nAlign < 1) { fprintf(stderr, "-n argument for #input alignments must be > 0 not %s\n", argv[iArg]); exit(1); } } else if (strcmp(argv[iArg], "-quote") == 0) { bQuote = true; } else if (strcmp(argv[iArg], "-nt") == 0) { nCodes = 4; } else if (strcmp(argv[iArg], "-intree") == 0 && iArg < argc-1) { iArg++; intreeFile = argv[iArg]; } else if (strcmp(argv[iArg], "-intree1") == 0 && iArg < argc-1) { iArg++; intreeFile = argv[iArg]; intree1 = true; } else if (strcmp(argv[iArg], "-nj") == 0) { bionj = 0; } else if (strcmp(argv[iArg], "-bionj") == 0) { bionj = 1; } else if (strcmp(argv[iArg], "-boot") == 0 && iArg < argc-1) { iArg++; nBootstrap = atoi(argv[iArg]); } else if (strcmp(argv[iArg], "-noboot") == 0 || strcmp(argv[iArg], "-nosupport") == 0) { nBootstrap = 0; } else if (strcmp(argv[iArg], "-seed") == 0 && iArg < argc-1) { iArg++; long seed = atol(argv[iArg]); ran_start(seed); } else if (strcmp(argv[iArg],"-top") == 0) { if(tophitsMult < 0.01) tophitsMult = 1.0; } else if (strcmp(argv[iArg],"-notop") == 0) { tophitsMult = 0.0; } else if (strcmp(argv[iArg], "-topm") == 0 && iArg < argc-1) { iArg++; tophitsMult = atof(argv[iArg]); } else if (strcmp(argv[iArg], "-close") == 0 && iArg < argc-1) { iArg++; tophitsClose = atof(argv[iArg]); if (tophitsMult <= 0) { fprintf(stderr, "Cannot use -close unless -top is set above 0\n"); exit(1); } if (tophitsClose <= 0 || tophitsClose >= 1) { fprintf(stderr, "-close argument must be between 0 and 1\n"); exit(1); } } else if (strcmp(argv[iArg], "-refresh") == 0 && iArg < argc-1) { iArg++; tophitsRefresh = atof(argv[iArg]); if (tophitsMult <= 0) { fprintf(stderr, "Cannot use -refresh unless -top is set above 0\n"); exit(1); } if (tophitsRefresh <= 0 || tophitsRefresh >= 1) { fprintf(stderr, "-refresh argument must be between 0 and 1\n"); exit(1); } } else if (strcmp(argv[iArg],"-nni") == 0 && iArg < argc-1) { iArg++; nni = atoi(argv[iArg]); if (nni == 0) spr = 0; } else if (strcmp(argv[iArg],"-spr") == 0 && iArg < argc-1) { iArg++; spr = atoi(argv[iArg]); } else if (strcmp(argv[iArg],"-sprlength") == 0 && iArg < argc-1) { iArg++; maxSPRLength = atoi(argv[iArg]); } else if (strcmp(argv[iArg],"-mlnni") == 0 && iArg < argc-1) { iArg++; MLnni = atoi(argv[iArg]); } else if (strcmp(argv[iArg],"-noml") == 0) { MLnni = 0; } else if (strcmp(argv[iArg],"-mllen") == 0) { MLnni = 0; MLlen = true; } else if (strcmp(argv[iArg],"-nome") == 0) { spr = 0; nni = 0; } else if (strcmp(argv[iArg],"-help") == 0) { fprintf(stderr,"FastTree %s %s%s:\n%s", FT_VERSION, SSE_STRING, OpenMPString(), usage); exit(0); } else if (strcmp(argv[iArg],"-expert") == 0) { fprintf(stderr, "Detailed usage for FastTree %s %s%s:\n%s", FT_VERSION, SSE_STRING, OpenMPString(), expertUsage); exit(0); } else if (strcmp(argv[iArg],"-pseudo") == 0) { if (iArg < argc-1 && isdigit(argv[iArg+1][0])) { iArg++; pseudoWeight = atof(argv[iArg]); if (pseudoWeight < 0.0) { fprintf(stderr,"Illegal argument to -pseudo: %s\n", argv[iArg]); exit(1); } } else { pseudoWeight = 1.0; } } else if (strcmp(argv[iArg],"-constraints") == 0 && iArg < argc-1) { iArg++; constraintsFile = argv[iArg]; } else if (strcmp(argv[iArg],"-constraintWeight") == 0 && iArg < argc-1) { iArg++; constraintWeight = atof(argv[iArg]); if (constraintWeight <= 0.0) { fprintf(stderr, "Illegal argument to -constraintWeight (must be greater than zero): %s\n", argv[iArg]); exit(1); } } else if (strcmp(argv[iArg],"-mlacc") == 0 && iArg < argc-1) { iArg++; mlAccuracy = atoi(argv[iArg]); if (mlAccuracy < 1) { fprintf(stderr, "Illlegal -mlacc argument: %s\n", argv[iArg]); exit(1); } } else if (strcmp(argv[iArg],"-exactml") == 0 || strcmp(argv[iArg],"-mlexact") == 0) { fprintf(stderr,"-exactml is not required -- exact posteriors is the default now\n"); } else if (strcmp(argv[iArg],"-approxml") == 0 || strcmp(argv[iArg],"-mlapprox") == 0) { exactML = false; } else if (strcmp(argv[iArg],"-cat") == 0 && iArg < argc-1) { iArg++; nRateCats = atoi(argv[iArg]); if (nRateCats < 1) { fprintf(stderr, "Illlegal argument to -ncat (must be greater than zero): %s\n", argv[iArg]); exit(1); } } else if (strcmp(argv[iArg],"-nocat") == 0) { nRateCats = 1; } else if (strcmp(argv[iArg], "-wag") == 0) { bUseWag = true; } else if (strcmp(argv[iArg], "-gtr") == 0) { bUseGtr = true; } else if (strcmp(argv[iArg], "-gtrrates") == 0 && iArg < argc-6) { bUseGtr = true; bUseGtrRates = true; int i; for (i = 0; i < 6; i++) { gtrrates[i] = atof(argv[++iArg]); if (gtrrates[i] < 1e-5) { fprintf(stderr, "Illegal or too small value of GTR rate: %s\n", argv[iArg]); exit(1); } } } else if (strcmp(argv[iArg],"-gtrfreq") == 0 && iArg < argc-4) { bUseGtr = true; bUseGtrFreq = true; int i; double sum = 0; for (i = 0; i < 4; i++) { gtrfreq[i] = atof(argv[++iArg]); sum += gtrfreq[i]; if (gtrfreq[i] < 1e-5) { fprintf(stderr, "Illegal or too small value of GTR frequency: %s\n", argv[iArg]); exit(1); } } if (fabs(1.0-sum) > 0.01) { fprintf(stderr, "-gtrfreq values do not sum to 1\n"); exit(1); } for (i = 0; i < 4; i++) gtrfreq[i] /= sum; } else if (strcmp(argv[iArg],"-log") == 0 && iArg < argc-1) { iArg++; logfile = argv[iArg]; } else if (strcmp(argv[iArg],"-gamma") == 0) { gammaLogLk = true; } else if (strcmp(argv[iArg],"-out") == 0 && iArg < argc-1) { iArg++; fpOut = fopen(argv[iArg],"w"); if(fpOut==NULL) { fprintf(stderr,"Cannot write to %s\n",argv[iArg]); exit(1); } } else if (argv[iArg][0] == '-') { fprintf(stderr, "Unknown or incorrect use of option %s\n%s", argv[iArg], usage); exit(1); } else break; } if(iArg < argc-1) { fprintf(stderr, "%s", usage); exit(1); } codesString = nCodes == 20 ? codesStringAA : codesStringNT; if (nCodes == 4 && matrixPrefix == NULL) useMatrix = false; /* no default nucleotide matrix */ char *fileName = iArg == (argc-1) ? argv[argc-1] : NULL; if (slow && fastest) { fprintf(stderr,"Cannot be both slow and fastest\n"); exit(1); } if (slow && tophitsMult > 0) { tophitsMult = 0.0; } FILE *fpLog = NULL; if (logfile != NULL) { fpLog = fopen(logfile, "w"); if (fpLog == NULL) { fprintf(stderr, "Cannot write to: %s\n", logfile); exit(1); } fprintf(fpLog, "Command:"); int i; for (i=0; i < argc; i++) fprintf(fpLog, " %s", argv[i]); fprintf(fpLog,"\n"); fflush(fpLog); } int i; FILE *fps[2] = {NULL,NULL}; int nFPs = 0; if (verbose) fps[nFPs++] = stderr; if (fpLog != NULL) fps[nFPs++] = fpLog; if (!make_matrix) { /* Report settings */ char tophitString[100] = "no"; char tophitsCloseStr[100] = "default"; if(tophitsClose > 0) sprintf(tophitsCloseStr,"%.2f",tophitsClose); if(tophitsMult>0) sprintf(tophitString,"%.2f*sqrtN close=%s refresh=%.2f", tophitsMult, tophitsCloseStr, tophitsRefresh); char supportString[100] = "none"; if (nBootstrap>0) { if (MLnni != 0 || MLlen) sprintf(supportString, "SH-like %d", nBootstrap); else sprintf(supportString,"Local boot %d",nBootstrap); } char nniString[100] = "(no NNI)"; if (nni > 0) sprintf(nniString, "+NNI (%d rounds)", nni); if (nni == -1) strcpy(nniString, "+NNI"); char sprString[100] = "(no SPR)"; if (spr > 0) sprintf(sprString, "+SPR (%d rounds range %d)", spr, maxSPRLength); char mlnniString[100] = "(no ML-NNI)"; if(MLnni > 0) sprintf(mlnniString, "+ML-NNI (%d rounds)", MLnni); else if (MLnni == -1) sprintf(mlnniString, "+ML-NNI"); else if (MLlen) sprintf(mlnniString, "+ML branch lengths"); if ((MLlen || MLnni != 0) && !exactML) strcat(mlnniString, " approx"); if (MLnni != 0) sprintf(mlnniString+strlen(mlnniString), " opt-each=%d",mlAccuracy); for (i = 0; i < nFPs; i++) { FILE *fp = fps[i]; fprintf(fp,"FastTree Version %s %s%s\nAlignment: %s", FT_VERSION, SSE_STRING, OpenMPString(), fileName != NULL ? fileName : "standard input"); if (nAlign>1) fprintf(fp, " (%d alignments)", nAlign); fprintf(fp,"\n%s distances: %s Joins: %s Support: %s\n", nCodes == 20 ? "Amino acid" : "Nucleotide", matrixPrefix ? matrixPrefix : (useMatrix? "BLOSUM45" : (nCodes==4 && logdist ? "Jukes-Cantor" : "%different")), bionj ? "weighted" : "balanced" , supportString); if (intreeFile == NULL) fprintf(fp, "Search: %s%s %s %s %s\nTopHits: %s\n", slow?"Exhaustive (slow)" : (fastest ? "Fastest" : "Normal"), useTopHits2nd ? "+2nd" : "", nniString, sprString, mlnniString, tophitString); else fprintf(fp, "Start at tree from %s %s %s\n", intreeFile, nniString, sprString); if (MLnni != 0 || MLlen) { fprintf(fp, "ML Model: %s,", (nCodes == 4) ? (bUseGtr ? "Generalized Time-Reversible" : "Jukes-Cantor") : (bUseWag ? "Whelan-And-Goldman" : "Jones-Taylor-Thorton")); if (nRateCats == 1) fprintf(fp, " No rate variation across sites"); else fprintf(fp, " CAT approximation with %d rate categories", nRateCats); fprintf(fp, "\n"); if (nCodes == 4 && bUseGtrRates) fprintf(fp, "GTR rates(ac ag at cg ct gt) %.4f %.4f %.4f %.4f %.4f %.4f\n", gtrrates[0],gtrrates[1],gtrrates[2],gtrrates[3],gtrrates[4],gtrrates[5]); if (nCodes == 4 && bUseGtrFreq) fprintf(fp, "GTR frequencies(A C G T) %.4f %.4f %.4f %.4f\n", gtrfreq[0],gtrfreq[1],gtrfreq[2],gtrfreq[3]); } if (constraintsFile != NULL) fprintf(fp, "Constraints: %s Weight: %.3f\n", constraintsFile, constraintWeight); if (pseudoWeight > 0) fprintf(fp, "Pseudocount weight for comparing sequences with little overlap: %.3lf\n",pseudoWeight); fflush(fp); } } if (matrixPrefix != NULL) { if (!useMatrix) { fprintf(stderr,"Cannot use both -matrix and -nomatrix arguments!"); exit(1); } distance_matrix = ReadDistanceMatrix(matrixPrefix); } else if (useMatrix) { /* use default matrix */ assert(nCodes==20); distance_matrix = &matrixBLOSUM45; SetupDistanceMatrix(distance_matrix); } else { distance_matrix = NULL; } int iAln; FILE *fpIn = fileName != NULL ? fopen(fileName, "r") : stdin; if (fpIn == NULL) { fprintf(stderr, "Cannot read %s\n", fileName); exit(1); } FILE *fpConstraints = NULL; if (constraintsFile != NULL) { fpConstraints = fopen(constraintsFile, "r"); if (fpConstraints == NULL) { fprintf(stderr, "Cannot read %s\n", constraintsFile); exit(1); } } FILE *fpInTree = NULL; if (intreeFile != NULL) { fpInTree = fopen(intreeFile,"r"); if (fpInTree == NULL) { fprintf(stderr, "Cannot read %s\n", intreeFile); exit(1); } } for(iAln = 0; iAln < nAlign; iAln++) { alignment_t *aln = ReadAlignment(fpIn, bQuote); if (aln->nSeq < 1) { fprintf(stderr, "No alignment sequences\n"); exit(1); } if (fpLog) { fprintf(fpLog, "Read %d sequences, %d positions\n", aln->nSeq, aln->nPos); fflush(fpLog); } struct timeval clock_start; gettimeofday(&clock_start,NULL); ProgressReport("Read alignment",0,0,0,0); /* Check that all names in alignment are unique */ hashstrings_t *hashnames = MakeHashtable(aln->names, aln->nSeq); int i; for (i=0; i<aln->nSeq; i++) { hashiterator_t hi = FindMatch(hashnames,aln->names[i]); if (HashCount(hashnames,hi) != 1) { fprintf(stderr,"Non-unique name '%s' in the alignment\n",aln->names[i]); exit(1); } } /* Make a list of unique sequences -- note some lists are bigger than required */ ProgressReport("Hashed the names",0,0,0,0); if (make_matrix) { NJ_t *NJ = InitNJ(aln->seqs, aln->nSeq, aln->nPos, /*constraintSeqs*/NULL, /*nConstraints*/0, distance_matrix, /*transmat*/NULL); printf(" %d\n",aln->nSeq); int i,j; for(i = 0; i < NJ->nSeq; i++) { printf("%s",aln->names[i]); for (j = 0; j < NJ->nSeq; j++) { besthit_t hit; SeqDist(NJ->profiles[i]->codes,NJ->profiles[j]->codes,NJ->nPos,NJ->distance_matrix,/*OUT*/&hit); if (logdist) hit.dist = LogCorrect(hit.dist); /* Make sure -0 prints as 0 */ printf(" %f", hit.dist <= 0.0 ? 0.0 : hit.dist); } printf("\n"); } } else { /* reset counters*/ profileOps = 0; outprofileOps = 0; seqOps = 0; profileAvgOps = 0; nHillBetter = 0; nCloseUsed = 0; nClose2Used = 0; nRefreshTopHits = 0; nVisibleUpdate = 0; nNNI = 0; nML_NNI = 0; nProfileFreqAlloc = 0; nProfileFreqAvoid = 0; szAllAlloc = 0; mymallocUsed = 0; maxmallocHeap = 0; nLkCompute = 0; nPosteriorCompute = 0; nAAPosteriorExact = 0; nAAPosteriorRough = 0; nStarTests = 0; uniquify_t *unique = UniquifyAln(aln); ProgressReport("Identified unique sequences",0,0,0,0); /* read constraints */ alignment_t *constraints = NULL; char **uniqConstraints = NULL; if (constraintsFile != NULL) { constraints = ReadAlignment(fpConstraints, bQuote); if (constraints->nSeq < 4) { fprintf(stderr, "Warning: constraints file with less than 4 sequences ignored:\nalignment #%d in %s\n", iAln+1, constraintsFile); constraints = FreeAlignment(constraints); } else { uniqConstraints = AlnToConstraints(constraints, unique, hashnames); ProgressReport("Read the constraints",0,0,0,0); } } /* end load constraints */ transition_matrix_t *transmat = NULL; if (nCodes == 20) { transmat = bUseWag? CreateTransitionMatrix(matrixWAG01,statWAG01) : CreateTransitionMatrix(matrixJTT92,statJTT92); } else if (nCodes == 4 && bUseGtr && (bUseGtrRates || bUseGtrFreq)) { transmat = CreateGTR(gtrrates,gtrfreq); } NJ_t *NJ = InitNJ(unique->uniqueSeq, unique->nUnique, aln->nPos, uniqConstraints, uniqConstraints != NULL ? constraints->nPos : 0, /* nConstraints */ distance_matrix, transmat); if (verbose>2) fprintf(stderr, "read %s seqs %d (%d unique) positions %d nameLast %s seqLast %s\n", fileName ? fileName : "standard input", aln->nSeq, unique->nUnique, aln->nPos, aln->names[aln->nSeq-1], aln->seqs[aln->nSeq-1]); FreeAlignmentSeqs(/*IN/OUT*/aln); /*no longer needed*/ if (fpInTree != NULL) { if (intree1) fseek(fpInTree, 0L, SEEK_SET); ReadTree(/*IN/OUT*/NJ, /*IN*/unique, /*IN*/hashnames, /*READ*/fpInTree); if (verbose > 2) fprintf(stderr, "Read tree from %s\n", intreeFile); if (verbose > 2) PrintNJ(stderr, NJ, aln->names, unique, /*support*/false, bQuote); } else { FastNJ(NJ); } LogTree("NJ", 0, fpLog, NJ, aln->names, unique, bQuote); /* profile-frequencies for the "up-profiles" in ReliabilityNJ take only diameter(Tree)*L*a space not N*L*a space, because we can free them as we go. And up-profile by their nature tend to be complicated. So save the profile-frequency memory allocation counters now to exclude later results. */ #ifdef TRACK_MEMORY long svProfileFreqAlloc = nProfileFreqAlloc; long svProfileFreqAvoid = nProfileFreqAvoid; #endif int nniToDo = nni == -1 ? (int)(0.5 + 4.0 * log(NJ->nSeq)/log(2)) : nni; int sprRemaining = spr; int MLnniToDo = (MLnni != -1) ? MLnni : (int)(0.5 + 2.0*log(NJ->nSeq)/log(2)); if(verbose>0) { if (fpInTree == NULL) fprintf(stderr, "Initial topology in %.2f seconds\n", clockDiff(&clock_start)); if (spr > 0 || nniToDo > 0 || MLnniToDo > 0) fprintf(stderr,"Refining topology: %d rounds ME-NNIs, %d rounds ME-SPRs, %d rounds ML-NNIs\n", nniToDo, spr, MLnniToDo); } if (nniToDo>0) { int i; bool bConverged = false; nni_stats_t *nni_stats = InitNNIStats(NJ); for (i=0; i < nniToDo; i++) { double maxDelta; if (!bConverged) { int nChange = NNI(/*IN/OUT*/NJ, i, nniToDo, /*use ml*/false, /*IN/OUT*/nni_stats, /*OUT*/&maxDelta); LogTree("ME_NNI%d",i+1, fpLog, NJ, aln->names, unique, bQuote); if (nChange == 0) { bConverged = true; if (verbose>1) fprintf(stderr, "Min_evolution NNIs converged at round %d -- skipping some rounds\n", i+1); if (fpLog) fprintf(fpLog, "Min_evolution NNIs converged at round %d -- skipping some rounds\n", i+1); } } /* Interleave SPRs with NNIs (typically 1/3rd NNI, SPR, 1/3rd NNI, SPR, 1/3rd NNI */ if (sprRemaining > 0 && (nniToDo/(spr+1) > 0 && ((i+1) % (nniToDo/(spr+1))) == 0)) { SPR(/*IN/OUT*/NJ, maxSPRLength, spr-sprRemaining, spr); LogTree("ME_SPR%d",spr-sprRemaining+1, fpLog, NJ, aln->names, unique, bQuote); sprRemaining--; /* Restart the NNIs -- set all ages to 0, etc. */ bConverged = false; nni_stats = FreeNNIStats(nni_stats, NJ); nni_stats = InitNNIStats(NJ); } } nni_stats = FreeNNIStats(nni_stats, NJ); } while(sprRemaining > 0) { /* do any remaining SPR rounds */ SPR(/*IN/OUT*/NJ, maxSPRLength, spr-sprRemaining, spr); LogTree("ME_SPR%d",spr-sprRemaining+1, fpLog, NJ, aln->names, unique, bQuote); sprRemaining--; } /* In minimum-evolution mode, update branch lengths, even if no NNIs or SPRs, so that they are log-corrected, do not include penalties from constraints, and avoid errors due to approximation of out-distances. If doing maximum-likelihood NNIs, then we'll also use these to get estimates of starting distances for quartets, etc. */ UpdateBranchLengths(/*IN/OUT*/NJ); LogTree("ME_Lengths",0, fpLog, NJ, aln->names, unique, bQuote); if(verbose>0 || fpLog) { double total_len = 0; int iNode; for (iNode = 0; iNode < NJ->maxnode; iNode++) total_len += fabs(NJ->branchlength[iNode]); if (verbose>0) { fprintf(stderr, "Total branch-length %.3f after %.2f sec\n", total_len, clockDiff(&clock_start)); fflush(stderr); } if (fpLog) { fprintf(fpLog, "Total branch-length %.3f after %.2f sec\n", total_len, clockDiff(&clock_start)); fflush(stderr); } } #ifdef TRACK_MEMORY if (verbose>1) { struct mallinfo mi = mallinfo(); fprintf(stderr, "Memory @ end of ME phase: %.2f MB (%.1f byte/pos) useful %.2f expected %.2f\n", (mi.arena+mi.hblkhd)/1.0e6, (mi.arena+mi.hblkhd)/(double)(NJ->nSeq*(double)NJ->nPos), mi.uordblks/1.0e6, mymallocUsed/1e6); } #endif SplitCount_t splitcount = {0,0,0,0,0.0,0.0}; if (MLnniToDo > 0 || MLlen) { /* Do maximum-likelihood computations */ /* Convert profiles to use the transition matrix */ distance_matrix_t *tmatAsDist = TransMatToDistanceMat(/*OPTIONAL*/NJ->transmat); RecomputeProfiles(NJ, /*OPTIONAL*/tmatAsDist); tmatAsDist = myfree(tmatAsDist, sizeof(distance_matrix_t)); double lastloglk = -1e20; nni_stats_t *nni_stats = InitNNIStats(NJ); bool resetGtr = nCodes == 4 && bUseGtr && !bUseGtrRates; if (MLlen) { int iRound; int maxRound = (int)(0.5 + log(NJ->nSeq)/log(2)); double dLastLogLk = -1e20; for (iRound = 1; iRound <= maxRound; iRound++) { int node; numeric_t *oldlength = (numeric_t*)mymalloc(sizeof(numeric_t)*NJ->maxnodes); for (node = 0; node < NJ->maxnode; node++) oldlength[node] = NJ->branchlength[node]; OptimizeAllBranchLengths(/*IN/OUT*/NJ); LogTree("ML_Lengths",iRound, fpLog, NJ, aln->names, unique, bQuote); double dMaxChange = 0; /* biggest change in branch length */ for (node = 0; node < NJ->maxnode; node++) { double d = fabs(oldlength[node] - NJ->branchlength[node]); if (dMaxChange < d) dMaxChange = d; } oldlength = myfree(oldlength, sizeof(numeric_t)*NJ->maxnodes); double loglk = TreeLogLk(NJ, /*site_likelihoods*/NULL); bool bConverged = iRound > 1 && (dMaxChange < 0.001 || loglk < (dLastLogLk+treeLogLkDelta)); if (verbose) fprintf(stderr, "%d rounds ML lengths: LogLk %s= %.3lf Max-change %.4lf%s Time %.2f\n", iRound, exactML || nCodes != 20 ? "" : "~", loglk, dMaxChange, bConverged ? " (converged)" : "", clockDiff(&clock_start)); if (fpLog) fprintf(fpLog, "TreeLogLk\tLength%d\t%.4lf\tMaxChange\t%.4lf\n", iRound, loglk, dMaxChange); if (iRound == 1) { if (resetGtr) SetMLGtr(/*IN/OUT*/NJ, bUseGtrFreq ? gtrfreq : NULL, fpLog); SetMLRates(/*IN/OUT*/NJ, nRateCats); LogMLRates(fpLog, NJ); } if (bConverged) break; } } if (MLnniToDo > 0) { /* This may help us converge faster, and is fast */ OptimizeAllBranchLengths(/*IN/OUT*/NJ); LogTree("ML_Lengths%d",1, fpLog, NJ, aln->names, unique, bQuote); } int iMLnni; double maxDelta; bool bConverged = false; for (iMLnni = 0; iMLnni < MLnniToDo; iMLnni++) { int changes = NNI(/*IN/OUT*/NJ, iMLnni, MLnniToDo, /*use ml*/true, /*IN/OUT*/nni_stats, /*OUT*/&maxDelta); LogTree("ML_NNI%d",iMLnni+1, fpLog, NJ, aln->names, unique, bQuote); double loglk = TreeLogLk(NJ, /*site_likelihoods*/NULL); bool bConvergedHere = (iMLnni > 0) && ((loglk < lastloglk + treeLogLkDelta) || maxDelta < treeLogLkDelta); if (verbose) fprintf(stderr, "ML-NNI round %d: LogLk %s= %.3f NNIs %d max delta %.2f Time %.2f%s\n", iMLnni+1, exactML || nCodes != 20 ? "" : "~", loglk, changes, maxDelta, clockDiff(&clock_start), bConverged ? " (final)" : ""); if (fpLog) fprintf(fpLog, "TreeLogLk\tML_NNI%d\t%.4lf\tMaxChange\t%.4lf\n", iMLnni+1, loglk, maxDelta); if (bConverged) break; /* we did our extra round */ if (bConvergedHere) bConverged = true; if (bConverged || iMLnni == MLnniToDo-2) { /* last round uses high-accuracy seettings -- reset NNI stats to tone down heuristics */ nni_stats = FreeNNIStats(nni_stats, NJ); nni_stats = InitNNIStats(NJ); if (verbose) fprintf(stderr, "Turning off heuristics for final round of ML NNIs%s\n", bConvergedHere? " (converged)" : ""); if (fpLog) fprintf(fpLog, "Turning off heuristics for final round of ML NNIs%s\n", bConvergedHere? " (converged)" : ""); } lastloglk = loglk; if (iMLnni == 0 && NJ->rates.nRateCategories == 1) { if (resetGtr) SetMLGtr(/*IN/OUT*/NJ, bUseGtrFreq ? gtrfreq : NULL, fpLog); SetMLRates(/*IN/OUT*/NJ, nRateCats); LogMLRates(fpLog, NJ); } } nni_stats = FreeNNIStats(nni_stats, NJ); /* This does not take long and improves the results */ if (MLnniToDo > 0) { OptimizeAllBranchLengths(/*IN/OUT*/NJ); LogTree("ML_Lengths%d",2, fpLog, NJ, aln->names, unique, bQuote); if (verbose || fpLog) { double loglk = TreeLogLk(NJ, /*site_likelihoods*/NULL); if (verbose) fprintf(stderr, "Optimize all lengths: LogLk %s= %.3f Time %.2f\n", exactML || nCodes != 20 ? "" : "~", loglk, clockDiff(&clock_start)); if (fpLog) { fprintf(fpLog, "TreeLogLk\tML_Lengths%d\t%.4f\n", 2, loglk); fflush(fpLog); } } } /* Count bad splits and compute SH-like supports if desired */ if ((MLnniToDo > 0 && !fastest) || nBootstrap > 0) TestSplitsML(NJ, /*OUT*/&splitcount, nBootstrap); /* Compute gamma-based likelihood? */ if (gammaLogLk && nRateCats > 1) { numeric_t *rates = MLSiteRates(nRateCats); double *site_loglk = MLSiteLikelihoodsByRate(NJ, rates, nRateCats); double scale = RescaleGammaLogLk(NJ->nPos, nRateCats, rates, /*IN*/site_loglk, /*OPTIONAL*/fpLog); rates = myfree(rates, sizeof(numeric_t) * nRateCats); site_loglk = myfree(site_loglk, sizeof(double) * nRateCats * NJ->nPos); for (i = 0; i < NJ->maxnodes; i++) NJ->branchlength[i] *= scale; } } else { /* Minimum evolution supports */ TestSplitsMinEvo(NJ, /*OUT*/&splitcount); if (nBootstrap > 0) ReliabilityNJ(NJ, nBootstrap); } for (i = 0; i < nFPs; i++) { FILE *fp = fps[i]; fprintf(fp, "Total time: %.2f seconds Unique: %d/%d Bad splits: %d/%d", clockDiff(&clock_start), NJ->nSeq, aln->nSeq, splitcount.nBadSplits, splitcount.nSplits); if (splitcount.dWorstDeltaUnconstrained > 0) fprintf(fp, " Worst %sdelta-%s %.3f", uniqConstraints != NULL ? "unconstrained " : "", (MLnniToDo > 0 || MLlen) ? "LogLk" : "Len", splitcount.dWorstDeltaUnconstrained); fprintf(fp,"\n"); if (NJ->nSeq > 3 && NJ->nConstraints > 0) { fprintf(fp, "Violating constraints: %d both bad: %d", splitcount.nConstraintViolations, splitcount.nBadBoth); if (splitcount.dWorstDeltaConstrained > 0) fprintf(fp, " Worst delta-%s due to constraints: %.3f", (MLnniToDo > 0 || MLlen) ? "LogLk" : "Len", splitcount.dWorstDeltaConstrained); fprintf(fp,"\n"); } if (verbose > 1 || fp == fpLog) { double dN2 = NJ->nSeq*(double)NJ->nSeq; fprintf(fp, "Dist/N**2: by-profile %.3f (out %.3f) by-leaf %.3f avg-prof %.3f\n", profileOps/dN2, outprofileOps/dN2, seqOps/dN2, profileAvgOps/dN2); if (nCloseUsed>0 || nClose2Used > 0 || nRefreshTopHits>0) fprintf(fp, "Top hits: close neighbors %ld/%d 2nd-level %ld refreshes %ld", nCloseUsed, NJ->nSeq, nClose2Used, nRefreshTopHits); if(!slow) fprintf(fp, " Hill-climb: %ld Update-best: %ld\n", nHillBetter, nVisibleUpdate); if (nniToDo > 0 || spr > 0 || MLnniToDo > 0) fprintf(fp, "NNI: %ld SPR: %ld ML-NNI: %ld\n", nNNI, nSPR, nML_NNI); if (MLnniToDo > 0) { fprintf(fp, "Max-lk operations: lk %ld posterior %ld", nLkCompute, nPosteriorCompute); if (nAAPosteriorExact > 0 || nAAPosteriorRough > 0) fprintf(fp, " approximate-posteriors %.2f%%", (100.0*nAAPosteriorRough)/(double)(nAAPosteriorExact+nAAPosteriorRough)); if (mlAccuracy < 2) fprintf(fp, " star-only %ld", nStarTests); fprintf(fp, "\n"); } } #ifdef TRACK_MEMORY fprintf(fp, "Memory: %.2f MB (%.1f byte/pos) ", maxmallocHeap/1.0e6, maxmallocHeap/(double)(aln->nSeq*(double)aln->nPos)); /* Only report numbers from before we do reliability estimates */ fprintf(fp, "profile-freq-alloc %ld avoided %.2f%%\n", svProfileFreqAlloc, svProfileFreqAvoid > 0 ? 100.0*svProfileFreqAvoid/(double)(svProfileFreqAlloc+svProfileFreqAvoid) : 0); #endif fflush(fp); } PrintNJ(fpOut, NJ, aln->names, unique, /*support*/nBootstrap > 0, bQuote); fflush(fpOut); if (fpLog) { fprintf(fpLog,"TreeCompleted\n"); fflush(fpLog); } FreeNJ(NJ); if (uniqConstraints != NULL) uniqConstraints = myfree(uniqConstraints, sizeof(char*) * unique->nUnique); constraints = FreeAlignment(constraints); unique = FreeUniquify(unique); } /* end build tree */ hashnames = FreeHashtable(hashnames); aln = FreeAlignment(aln); } /* end loop over alignments */ if (fpLog != NULL) fclose(fpLog); if (fpOut != stdout) fclose(fpOut); exit(0); } void ProgressReport(char *format, int i1, int i2, int i3, int i4) { static bool time_set = false; static struct timeval time_last; static struct timeval time_begin; if (!showProgress) return; static struct timeval time_now; gettimeofday(&time_now,NULL); if (!time_set) { time_begin = time_last = time_now; time_set = true; } static struct timeval elapsed; timeval_subtract(&elapsed,&time_now,&time_last); if (elapsed.tv_sec > 1 || elapsed.tv_usec > 100*1000 || verbose > 1) { timeval_subtract(&elapsed,&time_now,&time_begin); fprintf(stderr, "%7i.%2.2i seconds: ", (int)elapsed.tv_sec, (int)(elapsed.tv_usec/10000)); fprintf(stderr, format, i1, i2, i3, i4); if (verbose > 1 || !isatty(STDERR_FILENO)) { fprintf(stderr, "\n"); } else { fprintf(stderr, " \r"); } fflush(stderr); time_last = time_now; } } void LogMLRates(/*OPTIONAL WRITE*/FILE *fpLog, NJ_t *NJ) { if (fpLog != NULL) { rates_t *rates = &NJ->rates; fprintf(fpLog, "NCategories\t%d\nRates",rates->nRateCategories); assert(rates->nRateCategories > 0); int iRate; for (iRate = 0; iRate < rates->nRateCategories; iRate++) fprintf(fpLog, " %f", rates->rates[iRate]); fprintf(fpLog,"\nSiteCategories"); int iPos; for (iPos = 0; iPos < NJ->nPos; iPos++) { iRate = rates->ratecat[iPos]; fprintf(fpLog," %d",iRate+1); } fprintf(fpLog,"\n"); fflush(fpLog); } } void LogTree(char *format, int i, /*OPTIONAL WRITE*/FILE *fpLog, NJ_t *NJ, char **names, uniquify_t *unique, bool bQuote) { if(fpLog != NULL) { fprintf(fpLog, format, i); fprintf(fpLog, "\t"); PrintNJ(fpLog, NJ, names, unique, /*support*/false, bQuote); fflush(fpLog); } } NJ_t *InitNJ(char **sequences, int nSeq, int nPos, /*OPTIONAL*/char **constraintSeqs, int nConstraints, /*OPTIONAL*/distance_matrix_t *distance_matrix, /*OPTIONAL*/transition_matrix_t *transmat) { int iNode; NJ_t *NJ = (NJ_t*)mymalloc(sizeof(NJ_t)); NJ->root = -1; /* set at end of FastNJ() */ NJ->maxnode = NJ->nSeq = nSeq; NJ->nPos = nPos; NJ->maxnodes = 2*nSeq; NJ->seqs = sequences; NJ->distance_matrix = distance_matrix; NJ->transmat = transmat; NJ->nConstraints = nConstraints; NJ->constraintSeqs = constraintSeqs; NJ->profiles = (profile_t **)mymalloc(sizeof(profile_t*) * NJ->maxnodes); unsigned long counts[256]; int i; for (i = 0; i < 256; i++) counts[i] = 0; for (iNode = 0; iNode < NJ->nSeq; iNode++) { NJ->profiles[iNode] = SeqToProfile(NJ, NJ->seqs[iNode], nPos, constraintSeqs != NULL ? constraintSeqs[iNode] : NULL, nConstraints, iNode, /*IN/OUT*/counts); } unsigned long totCount = 0; for (i = 0; i < 256; i++) totCount += counts[i]; /* warnings about unknown characters */ for (i = 0; i < 256; i++) { if (counts[i] == 0 || i == '.' || i == '-') continue; unsigned char *codesP; bool bMatched = false; for (codesP = codesString; *codesP != '\0'; codesP++) { if (*codesP == i || tolower(*codesP) == i) { bMatched = true; break; } } if (!bMatched) fprintf(stderr, "Ignored unknown character %c (seen %lu times)\n", i, counts[i]); } /* warnings about the counts */ double fACGTUN = (counts['A'] + counts['C'] + counts['G'] + counts['T'] + counts['U'] + counts['N'] + counts['a'] + counts['c'] + counts['g'] + counts['t'] + counts['u'] + counts['n']) / (double)(totCount - counts['-'] - counts['.']); if (nCodes == 4 && fACGTUN < 0.9) fprintf(stderr, "WARNING! ONLY %.1f%% NUCLEOTIDE CHARACTERS -- IS THIS REALLY A NUCLEOTIDE ALIGNMENT?\n", 100.0 * fACGTUN); else if (nCodes == 20 && fACGTUN >= 0.9) fprintf(stderr, "WARNING! %.1f%% NUCLEOTIDE CHARACTERS -- IS THIS REALLY A PROTEIN ALIGNMENT?\n", 100.0 * fACGTUN); if(verbose>10) fprintf(stderr,"Made sequence profiles\n"); for (iNode = NJ->nSeq; iNode < NJ->maxnodes; iNode++) NJ->profiles[iNode] = NULL; /* not yet exists */ NJ->outprofile = OutProfile(NJ->profiles, NJ->nSeq, NJ->nPos, NJ->nConstraints, NJ->distance_matrix); if(verbose>10) fprintf(stderr,"Made out-profile\n"); NJ->totdiam = 0.0; NJ->diameter = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes); for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->diameter[iNode] = 0; NJ->varDiameter = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes); for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->varDiameter[iNode] = 0; NJ->selfdist = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes); for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->selfdist[iNode] = 0; NJ->selfweight = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes); for (iNode = 0; iNode < NJ->nSeq; iNode++) NJ->selfweight[iNode] = NJ->nPos - NGaps(NJ,iNode); NJ->outDistances = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes); NJ->nOutDistActive = (int *)mymalloc(sizeof(int)*NJ->maxnodes); for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->nOutDistActive[iNode] = NJ->nSeq * 10; /* unreasonably high value */ NJ->parent = NULL; /* so SetOutDistance ignores it */ for (iNode = 0; iNode < NJ->nSeq; iNode++) SetOutDistance(/*IN/UPDATE*/NJ, iNode, /*nActive*/NJ->nSeq); if (verbose>2) { for (iNode = 0; iNode < 4 && iNode < NJ->nSeq; iNode++) fprintf(stderr, "Node %d outdist %f\n", iNode, NJ->outDistances[iNode]); } NJ->parent = (int *)mymalloc(sizeof(int)*NJ->maxnodes); for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->parent[iNode] = -1; NJ->branchlength = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes); /* distance to parent */ for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->branchlength[iNode] = 0; NJ->support = (numeric_t *)mymalloc(sizeof(numeric_t)*NJ->maxnodes); for (iNode = 0; iNode < NJ->maxnodes; iNode++) NJ->support[iNode] = -1.0; NJ->child = (children_t*)mymalloc(sizeof(children_t)*NJ->maxnodes); for (iNode= 0; iNode < NJ->maxnode; iNode++) NJ->child[iNode].nChild = 0; NJ->rates.nRateCategories = 0; NJ->rates.rates = NULL; NJ->rates.ratecat = NULL; AllocRateCategories(&NJ->rates, 1, NJ->nPos); return(NJ); } NJ_t *FreeNJ(NJ_t *NJ) { if (NJ==NULL) return(NJ); int i; for (i=0; i < NJ->maxnode; i++) NJ->profiles[i] = FreeProfile(NJ->profiles[i], NJ->nPos, NJ->nConstraints); NJ->profiles = myfree(NJ->profiles, sizeof(profile_t*) * NJ->maxnodes); NJ->outprofile = FreeProfile(NJ->outprofile, NJ->nPos, NJ->nConstraints); NJ->diameter = myfree(NJ->diameter, sizeof(numeric_t)*NJ->maxnodes); NJ->varDiameter = myfree(NJ->varDiameter, sizeof(numeric_t)*NJ->maxnodes); NJ->selfdist = myfree(NJ->selfdist, sizeof(numeric_t)*NJ->maxnodes); NJ->selfweight = myfree(NJ->selfweight, sizeof(numeric_t)*NJ->maxnodes); NJ->outDistances = myfree(NJ->outDistances, sizeof(numeric_t)*NJ->maxnodes); NJ->nOutDistActive = myfree(NJ->nOutDistActive, sizeof(int)*NJ->maxnodes); NJ->parent = myfree(NJ->parent, sizeof(int)*NJ->maxnodes); NJ->branchlength = myfree(NJ->branchlength, sizeof(numeric_t)*NJ->maxnodes); NJ->support = myfree(NJ->support, sizeof(numeric_t)*NJ->maxnodes); NJ->child = myfree(NJ->child, sizeof(children_t)*NJ->maxnodes); NJ->transmat = myfree(NJ->transmat, sizeof(transition_matrix_t)); AllocRateCategories(&NJ->rates, 0, NJ->nPos); return(myfree(NJ, sizeof(NJ_t))); } /* Allocate or reallocate the rate categories, and set every position to category 0 and every category's rate to 1.0 If nRateCategories=0, just deallocate */ void AllocRateCategories(/*IN/OUT*/rates_t *rates, int nRateCategories, int nPos) { assert(nRateCategories >= 0); rates->rates = myfree(rates->rates, sizeof(numeric_t)*rates->nRateCategories); rates->ratecat = myfree(rates->ratecat, sizeof(unsigned int)*nPos); rates->nRateCategories = nRateCategories; if (rates->nRateCategories > 0) { rates->rates = (numeric_t*)mymalloc(sizeof(numeric_t)*rates->nRateCategories); int i; for (i = 0; i < nRateCategories; i++) rates->rates[i] = 1.0; rates->ratecat = (unsigned int *)mymalloc(sizeof(unsigned int)*nPos); for (i = 0; i < nPos; i++) rates->ratecat[i] = 0; } } void FastNJ(NJ_t *NJ) { int iNode; assert(NJ->nSeq >= 1); if (NJ->nSeq < 3) { NJ->root = NJ->maxnode++; NJ->child[NJ->root].nChild = NJ->nSeq; for (iNode = 0; iNode < NJ->nSeq; iNode++) { NJ->parent[iNode] = NJ->root; NJ->child[NJ->root].child[iNode] = iNode; } if (NJ->nSeq == 1) { NJ->branchlength[0] = 0; } else { assert (NJ->nSeq == 2); besthit_t hit; SeqDist(NJ->profiles[0]->codes,NJ->profiles[1]->codes,NJ->nPos,NJ->distance_matrix,/*OUT*/&hit); NJ->branchlength[0] = hit.dist/2.0; NJ->branchlength[1] = hit.dist/2.0; } return; } /* else 3 or more sequences */ /* The visible set stores the best hit of each node (unless using top hits, in which case it is handled by the top hits routines) */ besthit_t *visible = NULL; /* Not used if doing top hits */ besthit_t *besthitNew = NULL; /* All hits of new node -- not used if doing top-hits */ /* The top-hits lists, with the key parameter m = length of each top-hit list */ top_hits_t *tophits = NULL; int m = 0; /* maximum length of a top-hits list */ if (tophitsMult > 0) { m = (int)(0.5 + tophitsMult*sqrt(NJ->nSeq)); if(m<4 || 2*m >= NJ->nSeq) { m=0; if(verbose>1) fprintf(stderr,"Too few leaves, turning off top-hits\n"); } else { if(verbose>2) fprintf(stderr,"Top-hit-list size = %d of %d\n", m, NJ->nSeq); } } assert(!(slow && m>0)); /* Initialize top-hits or visible set */ if (m>0) { tophits = InitTopHits(NJ, m); SetAllLeafTopHits(/*IN/UPDATE*/NJ, /*OUT*/tophits); ResetTopVisible(/*IN/UPDATE*/NJ, /*nActive*/NJ->nSeq, /*IN/OUT*/tophits); } else if (!slow) { visible = (besthit_t*)mymalloc(sizeof(besthit_t)*NJ->maxnodes); besthitNew = (besthit_t*)mymalloc(sizeof(besthit_t)*NJ->maxnodes); for (iNode = 0; iNode < NJ->nSeq; iNode++) SetBestHit(iNode, NJ, /*nActive*/NJ->nSeq, /*OUT*/&visible[iNode], /*OUT IGNORED*/NULL); } /* Iterate over joins */ int nActiveOutProfileReset = NJ->nSeq; int nActive; for (nActive = NJ->nSeq; nActive > 3; nActive--) { int nJoinsDone = NJ->nSeq - nActive; if (nJoinsDone > 0 && (nJoinsDone % 100) == 0) ProgressReport("Joined %6d of %6d", nJoinsDone, NJ->nSeq-3, 0, 0); besthit_t join; /* the join to do */ if (slow) { ExhaustiveNJSearch(NJ,nActive,/*OUT*/&join); } else if (m>0) { TopHitNJSearch(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits, /*OUT*/&join); } else { FastNJSearch(NJ, nActive, /*IN/OUT*/visible, /*OUT*/&join); } if (verbose>2) { double penalty = constraintWeight * (double)JoinConstraintPenalty(NJ, join.i, join.j); if (penalty > 0.001) { fprintf(stderr, "Constraint violation during neighbor-joining %d %d into %d penalty %.3f\n", join.i, join.j, NJ->maxnode, penalty); int iC; for (iC = 0; iC < NJ->nConstraints; iC++) { int local = JoinConstraintPenaltyPiece(NJ, join.i, join.j, iC); if (local > 0) fprintf(stderr, "Constraint %d piece %d %d/%d %d/%d %d/%d\n", iC, local, NJ->profiles[join.i]->nOn[iC], NJ->profiles[join.i]->nOff[iC], NJ->profiles[join.j]->nOn[iC], NJ->profiles[join.j]->nOff[iC], NJ->outprofile->nOn[iC] - NJ->profiles[join.i]->nOn[iC] - NJ->profiles[join.j]->nOn[iC], NJ->outprofile->nOff[iC] - NJ->profiles[join.i]->nOff[iC] - NJ->profiles[join.j]->nOff[iC]); } } } /* because of the stale out-distance heuristic, make sure that these are up-to-date */ SetOutDistance(NJ, join.i, nActive); SetOutDistance(NJ, join.j, nActive); /* Make sure weight is set and criterion is up to date */ SetDistCriterion(NJ, nActive, /*IN/OUT*/&join); assert(NJ->nOutDistActive[join.i] == nActive); assert(NJ->nOutDistActive[join.j] == nActive); int newnode = NJ->maxnode++; NJ->parent[join.i] = newnode; NJ->parent[join.j] = newnode; NJ->child[newnode].nChild = 2; NJ->child[newnode].child[0] = join.i < join.j ? join.i : join.j; NJ->child[newnode].child[1] = join.i > join.j ? join.i : join.j; double rawIJ = join.dist + NJ->diameter[join.i] + NJ->diameter[join.j]; double distIJ = join.dist; double deltaDist = (NJ->outDistances[join.i]-NJ->outDistances[join.j])/(double)(nActive-2); NJ->branchlength[join.i] = (distIJ + deltaDist)/2; NJ->branchlength[join.j] = (distIJ - deltaDist)/2; double bionjWeight = 0.5; /* IJ = bionjWeight*I + (1-bionjWeight)*J */ double varIJ = rawIJ - NJ->varDiameter[join.i] - NJ->varDiameter[join.j]; if (bionj && join.weight > 0.01 && varIJ > 0.001) { /* Set bionjWeight according to the BIONJ formula, where the variance matrix is approximated by Vij = ProfileVar(i,j) - varDiameter(i) - varDiameter(j) ProfileVar(i,j) = distance(i,j) = top(i,j)/weight(i,j) (The node's distance diameter does not affect the variances.) The BIONJ formula is equation 9 from Gascuel 1997: bionjWeight = 1/2 + sum(k!=i,j) (Vjk - Vik) / ((nActive-2)*Vij) sum(k!=i,j) (Vjk - Vik) = sum(k!=i,j) Vik - varDiameter(j) + varDiameter(i) = sum(k!=i,j) ProfileVar(j,k) - sum(k!=i,j) ProfileVar(i,k) + (nActive-2)*(varDiameter(i)-varDiameter(j)) sum(k!=i,j) ProfileVar(i,k) ~= (sum(k!=i,j) distance(i,k) * weight(i,k))/(mean(k!=i,j) weight(i,k)) ~= (N-2) * top(i, Out-i-j) / weight(i, Out-i-j) weight(i, Out-i-j) = N*weight(i,Out) - weight(i,i) - weight(i,j) top(i, Out-i-j) = N*top(i,Out) - top(i,i) - top(i,j) */ besthit_t outI; besthit_t outJ; ProfileDist(NJ->profiles[join.i],NJ->outprofile,NJ->nPos,NJ->distance_matrix,/*OUT*/&outI); ProfileDist(NJ->profiles[join.j],NJ->outprofile,NJ->nPos,NJ->distance_matrix,/*OUT*/&outJ); outprofileOps += 2; double varIWeight = (nActive * outI.weight - NJ->selfweight[join.i] - join.weight); double varJWeight = (nActive * outJ.weight - NJ->selfweight[join.j] - join.weight); double varITop = outI.dist * outI.weight * nActive - NJ->selfdist[join.i] * NJ->selfweight[join.i] - rawIJ * join.weight; double varJTop = outJ.dist * outJ.weight * nActive - NJ->selfdist[join.j] * NJ->selfweight[join.j] - rawIJ * join.weight; double deltaProfileVarOut = (nActive-2) * (varJTop/varJWeight - varITop/varIWeight); double deltaVarDiam = (nActive-2)*(NJ->varDiameter[join.i] - NJ->varDiameter[join.j]); if (varJWeight > 0.01 && varIWeight > 0.01) bionjWeight = 0.5 + (deltaProfileVarOut+deltaVarDiam)/(2*(nActive-2)*varIJ); if(bionjWeight<0) bionjWeight=0; if(bionjWeight>1) bionjWeight=1; if (verbose>2) fprintf(stderr,"dVarO %f dVarDiam %f varIJ %f from dist %f weight %f (pos %d) bionjWeight %f %f\n", deltaProfileVarOut, deltaVarDiam, varIJ, join.dist, join.weight, NJ->nPos, bionjWeight, 1-bionjWeight); if (verbose>3 && (newnode%5) == 0) { /* Compare weight estimated from outprofiles from weight made by summing over other nodes */ double deltaProfileVarTot = 0; for (iNode = 0; iNode < newnode; iNode++) { if (NJ->parent[iNode] < 0) { /* excludes join.i, join.j */ besthit_t di, dj; ProfileDist(NJ->profiles[join.i],NJ->profiles[iNode],NJ->nPos,NJ->distance_matrix,/*OUT*/&di); ProfileDist(NJ->profiles[join.j],NJ->profiles[iNode],NJ->nPos,NJ->distance_matrix,/*OUT*/&dj); deltaProfileVarTot += dj.dist - di.dist; } } double lambdaTot = 0.5 + (deltaProfileVarTot+deltaVarDiam)/(2*(nActive-2)*varIJ); if (lambdaTot < 0) lambdaTot = 0; if (lambdaTot > 1) lambdaTot = 1; if (fabs(bionjWeight-lambdaTot) > 0.01 || verbose > 4) fprintf(stderr, "deltaProfileVar actual %.6f estimated %.6f lambda actual %.3f estimated %.3f\n", deltaProfileVarTot,deltaProfileVarOut,lambdaTot,bionjWeight); } } if (verbose > 2) fprintf(stderr, "Join\t%d\t%d\t%.6f\tlambda\t%.6f\tselfw\t%.3f\t%.3f\tnew\t%d\n", join.i < join.j ? join.i : join.j, join.i < join.j ? join.j : join.i, join.criterion, bionjWeight, NJ->selfweight[join.i < join.j ? join.i : join.j], NJ->selfweight[join.i < join.j ? join.j : join.i], newnode); NJ->diameter[newnode] = bionjWeight * (NJ->branchlength[join.i] + NJ->diameter[join.i]) + (1-bionjWeight) * (NJ->branchlength[join.j] + NJ->diameter[join.j]); NJ->varDiameter[newnode] = bionjWeight * NJ->varDiameter[join.i] + (1-bionjWeight) * NJ->varDiameter[join.j] + bionjWeight * (1-bionjWeight) * varIJ; NJ->profiles[newnode] = AverageProfile(NJ->profiles[join.i],NJ->profiles[join.j], NJ->nPos, NJ->nConstraints, NJ->distance_matrix, bionj ? bionjWeight : /*noweight*/-1.0); /* Update out-distances and total diameters */ int changedActiveOutProfile = nActiveOutProfileReset - (nActive-1); if (changedActiveOutProfile >= nResetOutProfile && changedActiveOutProfile >= fResetOutProfile * nActiveOutProfileReset) { /* Recompute the outprofile from scratch to avoid roundoff error */ profile_t **activeProfiles = (profile_t**)mymalloc(sizeof(profile_t*)*(nActive-1)); int nSaved = 0; NJ->totdiam = 0; for (iNode=0;iNode<NJ->maxnode;iNode++) { if (NJ->parent[iNode]<0) { assert(nSaved < nActive-1); activeProfiles[nSaved++] = NJ->profiles[iNode]; NJ->totdiam += NJ->diameter[iNode]; } } assert(nSaved==nActive-1); FreeProfile(NJ->outprofile, NJ->nPos, NJ->nConstraints); if(verbose>2) fprintf(stderr,"Recomputing outprofile %d %d\n",nActiveOutProfileReset,nActive-1); NJ->outprofile = OutProfile(activeProfiles, nSaved, NJ->nPos, NJ->nConstraints, NJ->distance_matrix); activeProfiles = myfree(activeProfiles, sizeof(profile_t*)*(nActive-1)); nActiveOutProfileReset = nActive-1; } else { UpdateOutProfile(/*OUT*/NJ->outprofile, NJ->profiles[join.i], NJ->profiles[join.j], NJ->profiles[newnode], nActive, NJ->nPos, NJ->nConstraints, NJ->distance_matrix); NJ->totdiam += NJ->diameter[newnode] - NJ->diameter[join.i] - NJ->diameter[join.j]; } /* Store self-dist for use in other computations */ besthit_t selfdist; ProfileDist(NJ->profiles[newnode],NJ->profiles[newnode],NJ->nPos,NJ->distance_matrix,/*OUT*/&selfdist); NJ->selfdist[newnode] = selfdist.dist; NJ->selfweight[newnode] = selfdist.weight; /* Find the best hit of the joined node IJ */ if (m>0) { TopHitJoin(newnode, /*IN/UPDATE*/NJ, nActive-1, /*IN/OUT*/tophits); } else { /* Not using top-hits, so we update all out-distances */ for (iNode = 0; iNode < NJ->maxnode; iNode++) { if (NJ->parent[iNode] < 0) { /* True nActive is now nActive-1 */ SetOutDistance(/*IN/UPDATE*/NJ, iNode, nActive-1); } } if(visible != NULL) { SetBestHit(newnode, NJ, nActive-1, /*OUT*/&visible[newnode], /*OUT OPTIONAL*/besthitNew); if (verbose>2) fprintf(stderr,"Visible %d %d %f %f\n", visible[newnode].i, visible[newnode].j, visible[newnode].dist, visible[newnode].criterion); if (besthitNew != NULL) { /* Use distances to new node to update visible set entries that are non-optimal */ for (iNode = 0; iNode < NJ->maxnode; iNode++) { if (NJ->parent[iNode] >= 0 || iNode == newnode) continue; int iOldVisible = visible[iNode].j; assert(iOldVisible>=0); assert(visible[iNode].i == iNode); /* Update the criterion; use nActive-1 because haven't decremented nActive yet */ if (NJ->parent[iOldVisible] < 0) SetCriterion(/*IN/OUT*/NJ, nActive-1, &visible[iNode]); if (NJ->parent[iOldVisible] >= 0 || besthitNew[iNode].criterion < visible[iNode].criterion) { if(verbose>3) fprintf(stderr,"Visible %d reset from %d to %d (%f vs. %f)\n", iNode, iOldVisible, newnode, visible[iNode].criterion, besthitNew[iNode].criterion); if(NJ->parent[iOldVisible] < 0) nVisibleUpdate++; visible[iNode].j = newnode; visible[iNode].dist = besthitNew[iNode].dist; visible[iNode].criterion = besthitNew[iNode].criterion; } } /* end loop over all nodes */ } /* end if recording all hits of new node */ } /* end if keeping a visible set */ } /* end else (m==0) */ } /* end loop over nActive */ #ifdef TRACK_MEMORY if (verbose>1) { struct mallinfo mi = mallinfo(); fprintf(stderr, "Memory @ end of FastNJ(): %.2f MB (%.1f byte/pos) useful %.2f expected %.2f\n", (mi.arena+mi.hblkhd)/1.0e6, (mi.arena+mi.hblkhd)/(double)(NJ->nSeq*(double)NJ->nPos), mi.uordblks/1.0e6, mymallocUsed/1e6); } #endif /* We no longer need the tophits, visible set, etc. */ if (visible != NULL) visible = myfree(visible,sizeof(besthit_t)*NJ->maxnodes); if (besthitNew != NULL) besthitNew = myfree(besthitNew,sizeof(besthit_t)*NJ->maxnodes); tophits = FreeTopHits(tophits); /* Add a root for the 3 remaining nodes */ int top[3]; int nTop = 0; for (iNode = 0; iNode < NJ->maxnode; iNode++) { if (NJ->parent[iNode] < 0) { assert(nTop <= 2); top[nTop++] = iNode; } } assert(nTop==3); NJ->root = NJ->maxnode++; NJ->child[NJ->root].nChild = 3; for (nTop = 0; nTop < 3; nTop++) { NJ->parent[top[nTop]] = NJ->root; NJ->child[NJ->root].child[nTop] = top[nTop]; } besthit_t dist01, dist02, dist12; ProfileDist(NJ->profiles[top[0]], NJ->profiles[top[1]], NJ->nPos, NJ->distance_matrix, /*OUT*/&dist01); ProfileDist(NJ->profiles[top[0]], NJ->profiles[top[2]], NJ->nPos, NJ->distance_matrix, /*OUT*/&dist02); ProfileDist(NJ->profiles[top[1]], NJ->profiles[top[2]], NJ->nPos, NJ->distance_matrix, /*OUT*/&dist12); double d01 = dist01.dist - NJ->diameter[top[0]] - NJ->diameter[top[1]]; double d02 = dist02.dist - NJ->diameter[top[0]] - NJ->diameter[top[2]]; double d12 = dist12.dist - NJ->diameter[top[1]] - NJ->diameter[top[2]]; NJ->branchlength[top[0]] = (d01 + d02 - d12)/2; NJ->branchlength[top[1]] = (d01 + d12 - d02)/2; NJ->branchlength[top[2]] = (d02 + d12 - d01)/2; /* Check how accurate the outprofile is */ if (verbose>2) { profile_t *p[3] = {NJ->profiles[top[0]], NJ->profiles[top[1]], NJ->profiles[top[2]]}; profile_t *out = OutProfile(p, 3, NJ->nPos, NJ->nConstraints, NJ->distance_matrix); int i; double freqerror = 0; double weighterror = 0; for (i=0;i<NJ->nPos;i++) { weighterror += fabs(out->weights[i] - NJ->outprofile->weights[i]); int k; for(k=0;k<nCodes;k++) freqerror += fabs(out->vectors[nCodes*i+k] - NJ->outprofile->vectors[nCodes*i+k]); } fprintf(stderr,"Roundoff error in outprofile@end: WeightError %f FreqError %f\n", weighterror, freqerror); FreeProfile(out, NJ->nPos, NJ->nConstraints); } return; } void ExhaustiveNJSearch(NJ_t *NJ, int nActive, /*OUT*/besthit_t *join) { join->i = -1; join->j = -1; join->weight = 0; join->dist = 1e20; join->criterion = 1e20; double bestCriterion = 1e20; int i, j; for (i = 0; i < NJ->maxnode-1; i++) { if (NJ->parent[i] < 0) { for (j = i+1; j < NJ->maxnode; j++) { if (NJ->parent[j] < 0) { besthit_t hit; hit.i = i; hit.j = j; SetDistCriterion(NJ, nActive, /*IN/OUT*/&hit); if (hit.criterion < bestCriterion) { *join = hit; bestCriterion = hit.criterion; } } } } } assert (join->i >= 0 && join->j >= 0); } void FastNJSearch(NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *besthits, /*OUT*/besthit_t *join) { join->i = -1; join->j = -1; join->dist = 1e20; join->weight = 0; join->criterion = 1e20; int iNode; for (iNode = 0; iNode < NJ->maxnode; iNode++) { int jNode = besthits[iNode].j; if (NJ->parent[iNode] < 0 && NJ->parent[jNode] < 0) { /* both i and j still active */ /* recompute criterion to reflect the current out-distances */ SetCriterion(NJ, nActive, /*IN/OUT*/&besthits[iNode]); if (besthits[iNode].criterion < join->criterion) *join = besthits[iNode]; } } if(!fastest) { int changed; do { changed = 0; assert(join->i >= 0 && join->j >= 0); SetBestHit(join->i, NJ, nActive, /*OUT*/&besthits[join->i], /*OUT IGNORED*/NULL); if (besthits[join->i].j != join->j) { changed = 1; if (verbose>2) fprintf(stderr,"BetterI\t%d\t%d\t%d\t%d\t%f\t%f\n", join->i,join->j,besthits[join->i].i,besthits[join->i].j, join->criterion,besthits[join->i].criterion); } /* Save the best hit either way, because the out-distance has probably changed since we started the computation. */ join->j = besthits[join->i].j; join->weight = besthits[join->i].weight; join->dist = besthits[join->i].dist; join->criterion = besthits[join->i].criterion; SetBestHit(join->j, NJ, nActive, /*OUT*/&besthits[join->j], /*OUT IGNORE*/NULL); if (besthits[join->j].j != join->i) { changed = 1; if (verbose>2) fprintf(stderr,"BetterJ\t%d\t%d\t%d\t%d\t%f\t%f\n", join->i,join->j,besthits[join->j].i,besthits[join->j].j, join->criterion,besthits[join->j].criterion); join->i = besthits[join->j].j; join->weight = besthits[join->j].weight; join->dist = besthits[join->j].dist; join->criterion = besthits[join->j].criterion; } if(changed) nHillBetter++; } while(changed); } } /* A token is one of ():;, or an alphanumeric string without whitespace Any whitespace between tokens is ignored */ char *ReadTreeToken(FILE *fp) { static char buf[BUFFER_SIZE]; int len = 0; int c; for (c = fgetc(fp); c != EOF; c = fgetc(fp)) { if (c == '(' || c == ')' || c == ':' || c == ';' || c == ',') { /* standalone token */ if (len == 0) { buf[len++] = c; buf[len] = '\0'; return(buf); } else { ungetc(c, fp); buf[len] = '\0'; return(buf); } } else if (isspace(c)) { if (len > 0) { buf[len] = '\0'; return(buf); } /* else ignore whitespace at beginning of token */ } else { /* not whitespace or standalone token */ buf[len++] = c; if (len >= BUFFER_SIZE) { buf[BUFFER_SIZE-1] = '\0'; fprintf(stderr, "Token too long in tree file, token begins with\n%s\n", buf); exit(1); } } } if (len > 0) { /* return the token we have so far */ buf[len] = '\0'; return(buf); } /* else */ return(NULL); } void ReadTreeError(char *err, char *token) { fprintf(stderr, "Tree parse error: unexpected token '%s' -- %s\n", token == NULL ? "(End of file)" : token, err); exit(1); } void ReadTreeAddChild(int parent, int child, /*IN/OUT*/int *parents, /*IN/OUT*/children_t *children) { assert(parent >= 0); assert(child >= 0); assert(parents[child] < 0); assert(children[parent].nChild < 3); parents[child] = parent; children[parent].child[children[parent].nChild++] = child; } void ReadTreeMaybeAddLeaf(int parent, char *name, hashstrings_t *hashnames, uniquify_t *unique, /*IN/OUT*/int *parents, /*IN/OUT*/children_t *children) { hashiterator_t hi = FindMatch(hashnames,name); if (HashCount(hashnames,hi) != 1) ReadTreeError("not recognized as a sequence name", name); int iSeqNonunique = HashFirst(hashnames,hi); assert(iSeqNonunique >= 0 && iSeqNonunique < unique->nSeq); int iSeqUnique = unique->alnToUniq[iSeqNonunique]; assert(iSeqUnique >= 0 && iSeqUnique < unique->nUnique); /* Either record this leaves' parent (if it is -1) or ignore this leaf (if already seen) */ if (parents[iSeqUnique] < 0) { ReadTreeAddChild(parent, iSeqUnique, /*IN/OUT*/parents, /*IN/OUT*/children); if(verbose > 5) fprintf(stderr, "Found leaf uniq%d name %s child of %d\n", iSeqUnique, name, parent); } else { if (verbose > 5) fprintf(stderr, "Skipped redundant leaf uniq%d name %s\n", iSeqUnique, name); } } void ReadTreeRemove(/*IN/OUT*/int *parents, /*IN/OUT*/children_t *children, int node) { if(verbose > 5) fprintf(stderr,"Removing node %d parent %d\n", node, parents[node]); assert(parents[node] >= 0); int parent = parents[node]; parents[node] = -1; children_t *pc = &children[parent]; int oldn; for (oldn = 0; oldn < pc->nChild; oldn++) { if (pc->child[oldn] == node) break; } assert(oldn < pc->nChild); /* move successor nodes back in child list and shorten list */ int i; for (i = oldn; i < pc->nChild-1; i++) pc->child[i] = pc->child[i+1]; pc->nChild--; /* add its children to parent's child list */ children_t *nc = &children[node]; if (nc->nChild > 0) { assert(nc->nChild<=2); assert(pc->nChild < 3); assert(pc->nChild + nc->nChild <= 3); int j; for (j = 0; j < nc->nChild; j++) { if(verbose > 5) fprintf(stderr,"Repointing parent %d to child %d\n", parent, nc->child[j]); pc->child[pc->nChild++] = nc->child[j]; parents[nc->child[j]] = parent; } nc->nChild = 0; } } void ReadTree(/*IN/OUT*/NJ_t *NJ, /*IN*/uniquify_t *unique, /*IN*/hashstrings_t *hashnames, /*READ*/FILE *fpInTree) { assert(NJ->nSeq == unique->nUnique); /* First, do a preliminary parse of the tree to with non-unique leaves ignored We need to store this separately from NJ because it may have too many internal nodes (matching sequences show up once in the NJ but could be in multiple places in the tree) Will use iUnique as the index of nodes, as in the NJ structure */ int maxnodes = unique->nSeq*2; int maxnode = unique->nSeq; int *parent = (int*)mymalloc(sizeof(int)*maxnodes); children_t *children = (children_t *)mymalloc(sizeof(children_t)*maxnodes); int root = maxnode++; int i; for (i = 0; i < maxnodes; i++) { parent[i] = -1; children[i].nChild = 0; } /* The stack is the current path to the root, with the root at the first (top) position */ int stack_size = 1; int *stack = (int*)mymalloc(sizeof(int)*maxnodes); stack[0] = root; int nDown = 0; int nUp = 0; char *token; token = ReadTreeToken(fpInTree); if (token == NULL || *token != '(') ReadTreeError("No '(' at start", token); /* nDown is still 0 because we have created the root */ while ((token = ReadTreeToken(fpInTree)) != NULL) { if (nDown > 0) { /* In a stream of parentheses */ if (*token == '(') nDown++; else if (*token == ',' || *token == ';' || *token == ':' || *token == ')') ReadTreeError("while reading parentheses", token); else { /* Add intermediate nodes if nDown was > 1 (for nDown=1, the only new node is the leaf) */ while (nDown-- > 0) { int new = maxnode++; assert(new < maxnodes); ReadTreeAddChild(stack[stack_size-1], new, /*IN/OUT*/parent, /*IN/OUT*/children); if(verbose > 5) fprintf(stderr, "Added internal child %d of %d, stack size increase to %d\n", new, stack[stack_size-1],stack_size+1); stack[stack_size++] = new; assert(stack_size < maxnodes); } ReadTreeMaybeAddLeaf(stack[stack_size-1], token, hashnames, unique, /*IN/OUT*/parent, /*IN/OUT*/children); } } else if (nUp > 0) { if (*token == ';') { /* end the tree? */ if (nUp != stack_size) ReadTreeError("unbalanced parentheses", token); else break; } else if (*token == ')') nUp++; else if (*token == '(') ReadTreeError("unexpected '(' after ')'", token); else if (*token == ':') { token = ReadTreeToken(fpInTree); /* Read the branch length and ignore it */ if (token == NULL || (*token != '-' && !isdigit(*token))) ReadTreeError("not recognized as a branch length", token); } else if (*token == ',') { /* Go back up the stack the correct #times */ while (nUp-- > 0) { stack_size--; if(verbose > 5) fprintf(stderr, "Up to nUp=%d stack size %d at %d\n", nUp, stack_size, stack[stack_size-1]); if (stack_size <= 0) ReadTreeError("too many ')'", token); } nUp = 0; } else if (*token == '-' || isdigit(*token)) ; /* ignore bootstrap value */ else fprintf(stderr, "Warning while parsing tree: non-numeric label %s for internal node\n", token); } else if (*token == '(') { nDown = 1; } else if (*token == ')') { nUp = 1; } else if (*token == ':') { token = ReadTreeToken(fpInTree); if (token == NULL || (*token != '-' && !isdigit(*token))) ReadTreeError("not recognized as a branch length", token); } else if (*token == ',') { ; /* do nothing */ } else if (*token == ';') ReadTreeError("unexpected token", token); else ReadTreeMaybeAddLeaf(stack[stack_size-1], token, hashnames, unique, /*IN/OUT*/parent, /*IN/OUT*/children); } /* Verify that all sequences were seen */ for (i = 0; i < unique->nUnique; i++) { if (parent[i] < 0) { fprintf(stderr, "Alignment sequence %d (unique %d) absent from input tree\n" "The starting tree (the argument to -intree) must include all sequences in the alignment!\n", unique->uniqueFirst[i], i); exit(1); } } /* Simplify the tree -- remove all internal nodes with < 2 children Keep trying until no nodes get removed */ int nRemoved; do { nRemoved = 0; /* Here stack is the list of nodes we haven't visited yet while doing a tree traversal */ stack_size = 1; stack[0] = root; while (stack_size > 0) { int node = stack[--stack_size]; if (node >= unique->nUnique) { /* internal node */ if (children[node].nChild <= 1) { if (node != root) { ReadTreeRemove(/*IN/OUT*/parent,/*IN/OUT*/children,node); nRemoved++; } else if (node == root && children[node].nChild == 1) { int newroot = children[node].child[0]; parent[newroot] = -1; children[root].nChild = 0; nRemoved++; if(verbose > 5) fprintf(stderr,"Changed root from %d to %d\n",root,newroot); root = newroot; stack[stack_size++] = newroot; } } else { int j; for (j = 0; j < children[node].nChild; j++) { assert(stack_size < maxnodes); stack[stack_size++] = children[node].child[j]; if(verbose > 5) fprintf(stderr,"Added %d to stack\n", stack[stack_size-1]); } } } } } while (nRemoved > 0); /* Simplify the root node to 3 children if it has 2 */ if (children[root].nChild == 2) { for (i = 0; i < 2; i++) { int child = children[root].child[i]; assert(child >= 0 && child < maxnodes); if (children[child].nChild == 2) { ReadTreeRemove(parent,children,child); /* replace root -> child -> A,B with root->A,B */ break; } } } for (i = 0; i < maxnodes; i++) if(verbose > 5) fprintf(stderr,"Simplfied node %d has parent %d nchild %d\n", i, parent[i], children[i].nChild); /* Map the remaining internal nodes to NJ nodes */ int *map = (int*)mymalloc(sizeof(int)*maxnodes); for (i = 0; i < unique->nUnique; i++) map[i] = i; for (i = unique->nUnique; i < maxnodes; i++) map[i] = -1; stack_size = 1; stack[0] = root; while (stack_size > 0) { int node = stack[--stack_size]; if (node >= unique->nUnique) { /* internal node */ assert(node == root || children[node].nChild > 1); map[node] = NJ->maxnode++; for (i = 0; i < children[node].nChild; i++) { assert(stack_size < maxnodes); stack[stack_size++] = children[node].child[i]; } } } for (i = 0; i < maxnodes; i++) if(verbose > 5) fprintf(stderr,"Map %d to %d (parent %d nchild %d)\n", i, map[i], parent[i], children[i].nChild); /* Set NJ->parent, NJ->children, NJ->root */ NJ->root = map[root]; int node; for (node = 0; node < maxnodes; node++) { int njnode = map[node]; if (njnode >= 0) { NJ->child[njnode].nChild = children[node].nChild; for (i = 0; i < children[node].nChild; i++) { assert(children[node].child[i] >= 0 && children[node].child[i] < maxnodes); NJ->child[njnode].child[i] = map[children[node].child[i]]; } if (parent[node] >= 0) NJ->parent[njnode] = map[parent[node]]; } } /* Make sure that parent/child relationships match */ for (i = 0; i < NJ->maxnode; i++) { children_t *c = &NJ->child[i]; int j; for (j = 0; j < c->nChild;j++) assert(c->child[j] >= 0 && c->child[j] < NJ->maxnode && NJ->parent[c->child[j]] == i); } assert(NJ->parent[NJ->root] < 0); map = myfree(map,sizeof(int)*maxnodes); stack = myfree(stack,sizeof(int)*maxnodes); children = myfree(children,sizeof(children_t)*maxnodes); parent = myfree(parent,sizeof(int)*maxnodes); /* Compute profiles as balanced -- the NNI stage will recompute these profiles anyway */ traversal_t traversal = InitTraversal(NJ); node = NJ->root; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { if (node >= NJ->nSeq && node != NJ->root) SetProfile(/*IN/OUT*/NJ, node, /*noweight*/-1.0); } traversal = FreeTraversal(traversal,NJ); } /* Print topology using node indices as node names */ void PrintNJInternal(FILE *fp, NJ_t *NJ, bool useLen) { if (NJ->nSeq < 4) { return; } typedef struct { int node; int end; } stack_t; stack_t *stack = (stack_t *)mymalloc(sizeof(stack_t)*NJ->maxnodes); int stackSize = 1; stack[0].node = NJ->root; stack[0].end = 0; while(stackSize>0) { stack_t *last = &stack[stackSize-1]; stackSize--; /* Save last, as we are about to overwrite it */ int node = last->node; int end = last->end; if (node < NJ->nSeq) { if (NJ->child[NJ->parent[node]].child[0] != node) fputs(",",fp); fprintf(fp, "%d", node); if (useLen) fprintf(fp, ":%.4f", NJ->branchlength[node]); } else if (end) { fprintf(fp, ")%d", node); if (useLen) fprintf(fp, ":%.4f", NJ->branchlength[node]); } else { if (node != NJ->root && NJ->child[NJ->parent[node]].child[0] != node) fprintf(fp, ","); fprintf(fp, "("); stackSize++; stack[stackSize-1].node = node; stack[stackSize-1].end = 1; children_t *c = &NJ->child[node]; /* put children on in reverse order because we use the last one first */ int i; for (i = c->nChild-1; i >=0; i--) { stackSize++; stack[stackSize-1].node = c->child[i]; stack[stackSize-1].end = 0; } } } fprintf(fp, ";\n"); stack = myfree(stack, sizeof(stack_t)*NJ->maxnodes); } void PrintNJ(FILE *fp, NJ_t *NJ, char **names, uniquify_t *unique, bool bShowSupport, bool bQuote) { /* And print the tree: depth first search * The stack contains * list of remaining children with their depth * parent node, with a flag of -1 so I know to print right-paren */ if (NJ->nSeq==1 && unique->alnNext[unique->uniqueFirst[0]] >= 0) { /* Special case -- otherwise we end up with double parens */ int first = unique->uniqueFirst[0]; assert(first >= 0 && first < unique->nSeq); fprintf(fp, bQuote ? "('%s':0.0" : "(%s:0.0", names[first]); int iName = unique->alnNext[first]; while (iName >= 0) { assert(iName < unique->nSeq); fprintf(fp, bQuote ? ",'%s':0.0" : ",%s:0.0", names[iName]); iName = unique->alnNext[iName]; } fprintf(fp,");\n"); return; } typedef struct { int node; int end; } stack_t; stack_t *stack = (stack_t *)mymalloc(sizeof(stack_t)*NJ->maxnodes); int stackSize = 1; stack[0].node = NJ->root; stack[0].end = 0; while(stackSize>0) { stack_t *last = &stack[stackSize-1]; stackSize--; /* Save last, as we are about to overwrite it */ int node = last->node; int end = last->end; if (node < NJ->nSeq) { if (NJ->child[NJ->parent[node]].child[0] != node) fputs(",",fp); int first = unique->uniqueFirst[node]; assert(first >= 0 && first < unique->nSeq); /* Print the name, or the subtree of duplicate names */ if (unique->alnNext[first] == -1) { fprintf(fp, bQuote ? "'%s'" : "%s", names[first]); } else { fprintf(fp, bQuote ? "('%s':0.0" : "(%s:0.0", names[first]); int iName = unique->alnNext[first]; while (iName >= 0) { assert(iName < unique->nSeq); fprintf(fp, bQuote ? ",'%s':0.0" : ",%s:0.0", names[iName]); iName = unique->alnNext[iName]; } fprintf(fp,")"); } /* Print the branch length */ fprintf(fp, ":%.5f", NJ->branchlength[node]); } else if (end) { if (node == NJ->root) fprintf(fp, ")"); else if (bShowSupport) fprintf(fp, ")%.3f:%.5f", NJ->support[node], NJ->branchlength[node]); else fprintf(fp, "):%.5f", NJ->branchlength[node]); } else { if (node != NJ->root && NJ->child[NJ->parent[node]].child[0] != node) fprintf(fp, ","); fprintf(fp, "("); stackSize++; stack[stackSize-1].node = node; stack[stackSize-1].end = 1; children_t *c = &NJ->child[node]; /* put children on in reverse order because we use the last one first */ int i; for (i = c->nChild-1; i >=0; i--) { stackSize++; stack[stackSize-1].node = c->child[i]; stack[stackSize-1].end = 0; } } } fprintf(fp, ";\n"); stack = myfree(stack, sizeof(stack_t)*NJ->maxnodes); } alignment_t *ReadAlignment(/*IN*/FILE *fp, bool bQuote) { /* bQuote supports the -quote option */ int nSeq = 0; int nPos = 0; char **names = NULL; char **seqs = NULL; char buf[BUFFER_SIZE] = ""; if (fgets(buf,sizeof(buf),fp) == NULL) { fprintf(stderr, "Error reading header line\n"); exit(1); } int nSaved = 100; if (buf[0] == '>') { /* FASTA, truncate names at any of these */ char *nameStop = bQuote ? "'\t\r\n" : "(),: \t\r\n"; char *seqSkip = " \t\r\n"; /* skip these characters in the sequence */ seqs = (char**)mymalloc(sizeof(char*) * nSaved); names = (char**)mymalloc(sizeof(char*) * nSaved); do { /* loop over lines */ if (buf[0] == '>') { /* truncate the name */ char *p, *q; for (p = buf+1; *p != '\0'; p++) { for (q = nameStop; *q != '\0'; q++) { if (*p == *q) { *p = '\0'; break; } } if (*p == '\0') break; } /* allocate space for another sequence */ nSeq++; if (nSeq > nSaved) { int nNewSaved = nSaved*2; seqs = myrealloc(seqs,sizeof(char*)*nSaved,sizeof(char*)*nNewSaved, /*copy*/false); names = myrealloc(names,sizeof(char*)*nSaved,sizeof(char*)*nNewSaved, /*copy*/false); nSaved = nNewSaved; } names[nSeq-1] = (char*)mymemdup(buf+1,strlen(buf)); seqs[nSeq-1] = NULL; } else { /* count non-space characters and append to sequence */ int nKeep = 0; char *p, *q; for (p=buf; *p != '\0'; p++) { for (q=seqSkip; *q != '\0'; q++) { if (*p == *q) break; } if (*p != *q) nKeep++; } int nOld = (seqs[nSeq-1] == NULL) ? 0 : strlen(seqs[nSeq-1]); seqs[nSeq-1] = (char*)myrealloc(seqs[nSeq-1], nOld, nOld+nKeep+1, /*copy*/false); if (nOld+nKeep > nPos) nPos = nOld + nKeep; char *out = seqs[nSeq-1] + nOld; for (p=buf; *p != '\0'; p++) { for (q=seqSkip; *q != '\0'; q++) { if (*p == *q) break; } if (*p != *q) { *out = *p; out++; } } assert(out-seqs[nSeq-1] == nKeep + nOld); *out = '\0'; } } while(fgets(buf,sizeof(buf),fp) != NULL); if (seqs[nSeq-1] == NULL) { fprintf(stderr, "No sequence data for last entry %s\n",names[nSeq-1]); exit(1); } names = myrealloc(names,sizeof(char*)*nSaved,sizeof(char*)*nSeq, /*copy*/false); seqs = myrealloc(seqs,sizeof(char*)*nSaved,sizeof(char*)*nSeq, /*copy*/false); } else { /* PHYLIP interleaved-like format Allow arbitrary length names, require spaces between names and sequences Allow multiple alignments, either separated by a single empty line (e.g. seqboot output) or not. */ if (buf[0] == '\n' || buf[0] == '\r') { if (fgets(buf,sizeof(buf),fp) == NULL) { fprintf(stderr, "Empty header line followed by EOF\n"); exit(1); } } if (sscanf(buf, "%d%d", &nSeq, &nPos) != 2 || nSeq < 1 || nPos < 1) { fprintf(stderr, "Error parsing header line:%s\n", buf); exit(1); } names = (char **)mymalloc(sizeof(char*) * nSeq); seqs = (char **)mymalloc(sizeof(char*) * nSeq); nSaved = nSeq; int i; for (i = 0; i < nSeq; i++) { names[i] = NULL; seqs[i] = (char *)mymalloc(nPos+1); /* null-terminate */ seqs[i][0] = '\0'; } int iSeq = 0; while(fgets(buf,sizeof(buf),fp)) { if ((buf[0] == '\n' || buf[0] == '\r') && (iSeq == nSeq || iSeq == 0)) { iSeq = 0; } else { int j = 0; /* character just past end of name */ if (buf[0] == ' ') { if (names[iSeq] == NULL) { fprintf(stderr, "No name in phylip line %s", buf); exit(1); } } else { while (buf[j] != '\n' && buf[j] != '\0' && buf[j] != ' ') j++; if (buf[j] != ' ' || j == 0) { fprintf(stderr, "No sequence in phylip line %s", buf); exit(1); } if (iSeq >= nSeq) { fprintf(stderr, "No empty line between sequence blocks (is the sequence count wrong?)\n"); exit(1); } if (names[iSeq] == NULL) { /* save the name */ names[iSeq] = (char *)mymalloc(j+1); int k; for (k = 0; k < j; k++) names[iSeq][k] = buf[k]; names[iSeq][j] = '\0'; } else { /* check the name */ int k; int match = 1; for (k = 0; k < j; k++) { if (names[iSeq][k] != buf[k]) { match = 0; break; } } if (!match || names[iSeq][j] != '\0') { fprintf(stderr, "Wrong name in phylip line %s\nExpected %s\n", buf, names[iSeq]); exit(1); } } } int seqlen = strlen(seqs[iSeq]); for (; buf[j] != '\n' && buf[j] != '\0'; j++) { if (buf[j] != ' ') { if (seqlen >= nPos) { fprintf(stderr, "Too many characters (expected %d) for sequence named %s\nSo far have:\n%s\n", nPos, names[iSeq], seqs[iSeq]); exit(1); } seqs[iSeq][seqlen++] = toupper(buf[j]); } } seqs[iSeq][seqlen] = '\0'; /* null-terminate */ if(verbose>10) fprintf(stderr,"Read iSeq %d name %s seqsofar %s\n", iSeq, names[iSeq], seqs[iSeq]); iSeq++; if (iSeq == nSeq && strlen(seqs[0]) == nPos) break; /* finished alignment */ } /* end else non-empty phylip line */ } if (iSeq != nSeq && iSeq != 0) { fprintf(stderr, "Wrong number of sequences: expected %d\n", nSeq); exit(1); } } /* Check lengths of sequences */ int i; for (i = 0; i < nSeq; i++) { int seqlen = strlen(seqs[i]); if (seqlen != nPos) { fprintf(stderr, "Wrong number of characters for %s: expected %d but have %d instead.\n" "This sequence may be truncated, or another sequence may be too long.\n", names[i], nPos, seqlen); exit(1); } } /* Replace "." with "-" and warn if we find any */ /* If nucleotide sequences, replace U with T and N with X */ bool findDot = false; for (i = 0; i < nSeq; i++) { char *p; for (p = seqs[i]; *p != '\0'; p++) { if (*p == '.') { findDot = true; *p = '-'; } if (nCodes == 4 && *p == 'U') *p = 'T'; if (nCodes == 4 && *p == 'N') *p = 'X'; } } if (findDot) fprintf(stderr, "Warning! Found \".\" character(s). These are treated as gaps\n"); if (ferror(fp)) { fprintf(stderr, "Error reading input file\n"); exit(1); } alignment_t *align = (alignment_t*)mymalloc(sizeof(alignment_t)); align->nSeq = nSeq; align->nPos = nPos; align->names = names; align->seqs = seqs; align->nSaved = nSaved; return(align); } void FreeAlignmentSeqs(/*IN/OUT*/alignment_t *aln) { assert(aln != NULL); int i; for (i = 0; i < aln->nSeq; i++) aln->seqs[i] = myfree(aln->seqs[i], aln->nPos+1); } alignment_t *FreeAlignment(alignment_t *aln) { if(aln==NULL) return(NULL); int i; for (i = 0; i < aln->nSeq; i++) { aln->names[i] = myfree(aln->names[i],strlen(aln->names[i])+1); aln->seqs[i] = myfree(aln->seqs[i], aln->nPos+1); } aln->names = myfree(aln->names, sizeof(char*)*aln->nSaved); aln->seqs = myfree(aln->seqs, sizeof(char*)*aln->nSaved); myfree(aln, sizeof(alignment_t)); return(NULL); } char **AlnToConstraints(alignment_t *constraints, uniquify_t *unique, hashstrings_t *hashnames) { /* look up constraints as names and map to unique-space */ char ** uniqConstraints = (char**)mymalloc(sizeof(char*) * unique->nUnique); int i; for (i = 0; i < unique->nUnique; i++) uniqConstraints[i] = NULL; for (i = 0; i < constraints->nSeq; i++) { char *name = constraints->names[i]; char *constraintSeq = constraints->seqs[i]; hashiterator_t hi = FindMatch(hashnames,name); if (HashCount(hashnames,hi) != 1) { fprintf(stderr, "Sequence %s from constraints file is not in the alignment\n", name); exit(1); } int iSeqNonunique = HashFirst(hashnames,hi); assert(iSeqNonunique >= 0 && iSeqNonunique < unique->nSeq); int iSeqUnique = unique->alnToUniq[iSeqNonunique]; assert(iSeqUnique >= 0 && iSeqUnique < unique->nUnique); if (uniqConstraints[iSeqUnique] != NULL) { /* Already set a constraint for this group of sequences! Warn that we are ignoring this one unless the constraints match */ if (strcmp(uniqConstraints[iSeqUnique],constraintSeq) != 0) { fprintf(stderr, "Warning: ignoring constraints for %s:\n%s\n" "Another sequence has the same sequence but different constraints\n", name, constraintSeq); } } else { uniqConstraints[iSeqUnique] = constraintSeq; } } return(uniqConstraints); } profile_t *SeqToProfile(/*IN/OUT*/NJ_t *NJ, char *seq, int nPos, /*OPTIONAL*/char *constraintSeq, int nConstraints, int iNode, unsigned long counts[256]) { static unsigned char charToCode[256]; static int codeSet = 0; int c, i; if (!codeSet) { for (c = 0; c < 256; c++) { charToCode[c] = nCodes; } for (i = 0; codesString[i]; i++) { charToCode[codesString[i]] = i; charToCode[tolower(codesString[i])] = i; } charToCode['-'] = NOCODE; codeSet=1; } assert(strlen(seq) == nPos); profile_t *profile = NewProfile(nPos,nConstraints); for (i = 0; i < nPos; i++) { unsigned int character = (unsigned int) seq[i]; counts[character]++; c = charToCode[character]; if(verbose>10 && i < 2) fprintf(stderr,"pos %d char %c code %d\n", i, seq[i], c); /* treat unknowns as gaps */ if (c == nCodes || c == NOCODE) { profile->codes[i] = NOCODE; profile->weights[i] = 0.0; } else { profile->codes[i] = c; profile->weights[i] = 1.0; } } if (nConstraints > 0) { for (i = 0; i < nConstraints; i++) { profile->nOn[i] = 0; profile->nOff[i] = 0; } bool bWarn = false; if (constraintSeq != NULL) { assert(strlen(constraintSeq) == nConstraints); for (i = 0; i < nConstraints; i++) { if (constraintSeq[i] == '1') { profile->nOn[i] = 1; } else if (constraintSeq[i] == '0') { profile->nOff[i] = 1; } else if (constraintSeq[i] != '-') { if (!bWarn) { fprintf(stderr, "Constraint characters in unique sequence %d replaced with gap:", iNode+1); bWarn = true; } fprintf(stderr, " %c%d", constraintSeq[i], i+1); /* For the benefit of ConstraintSequencePenalty -- this is a bit of a hack, as this modifies the value read from the alignment */ constraintSeq[i] = '-'; } } if (bWarn) fprintf(stderr, "\n"); } } return profile; } void SeqDist(unsigned char *codes1, unsigned char *codes2, int nPos, distance_matrix_t *dmat, /*OUT*/besthit_t *hit) { double top = 0; /* summed over positions */ int nUse = 0; int i; if (dmat==NULL) { int nDiff = 0; for (i = 0; i < nPos; i++) { if (codes1[i] != NOCODE && codes2[i] != NOCODE) { nUse++; if (codes1[i] != codes2[i]) nDiff++; } } top = (double)nDiff; } else { for (i = 0; i < nPos; i++) { if (codes1[i] != NOCODE && codes2[i] != NOCODE) { nUse++; top += dmat->distances[(unsigned int)codes1[i]][(unsigned int)codes2[i]]; } } } hit->weight = (double)nUse; hit->dist = nUse > 0 ? top/(double)nUse : 1.0; seqOps++; } void CorrectedPairDistances(profile_t **profiles, int nProfiles, /*OPTIONAL*/distance_matrix_t *distance_matrix, int nPos, /*OUT*/double *distances) { assert(distances != NULL); assert(profiles != NULL); assert(nProfiles>1 && nProfiles <= 4); besthit_t hit[6]; int iHit,i,j; for (iHit=0, i=0; i < nProfiles; i++) { for (j=i+1; j < nProfiles; j++, iHit++) { ProfileDist(profiles[i],profiles[j],nPos,distance_matrix,/*OUT*/&hit[iHit]); distances[iHit] = hit[iHit].dist; } } if (pseudoWeight > 0) { /* Estimate the prior distance */ double dTop = 0; double dBottom = 0; for (iHit=0; iHit < (nProfiles*(nProfiles-1))/2; iHit++) { dTop += hit[iHit].dist * hit[iHit].weight; dBottom += hit[iHit].weight; } double prior = (dBottom > 0.01) ? dTop/dBottom : 3.0; for (iHit=0; iHit < (nProfiles*(nProfiles-1))/2; iHit++) distances[iHit] = (distances[iHit] * hit[iHit].weight + prior * pseudoWeight) / (hit[iHit].weight + pseudoWeight); } if (logdist) { for (iHit=0; iHit < (nProfiles*(nProfiles-1))/2; iHit++) distances[iHit] = LogCorrect(distances[iHit]); } } /* During the neighbor-joining phase, a join only violates our constraints if node1, node2, and other are all represented in the constraint and if one of the 3 is split and the other two do not agree */ int JoinConstraintPenalty(/*IN*/NJ_t *NJ, int node1, int node2) { if (NJ->nConstraints == 0) return(0.0); int penalty = 0; int iC; for (iC = 0; iC < NJ->nConstraints; iC++) penalty += JoinConstraintPenaltyPiece(NJ, node1, node2, iC); return(penalty); } int JoinConstraintPenaltyPiece(NJ_t *NJ, int node1, int node2, int iC) { profile_t *pOut = NJ->outprofile; profile_t *p1 = NJ->profiles[node1]; profile_t *p2 = NJ->profiles[node2]; int nOn1 = p1->nOn[iC]; int nOff1 = p1->nOff[iC]; int nOn2 = p2->nOn[iC]; int nOff2 = p2->nOff[iC]; int nOnOut = pOut->nOn[iC] - nOn1 - nOn2; int nOffOut = pOut->nOff[iC] - nOff1 - nOff2; if ((nOn1+nOff1) > 0 && (nOn2+nOff2) > 0 && (nOnOut+nOffOut) > 0) { /* code is -1 for split, 0 for off, 1 for on */ int code1 = (nOn1 > 0 && nOff1 > 0) ? -1 : (nOn1 > 0 ? 1 : 0); int code2 = (nOn2 > 0 && nOff2 > 0) ? -1 : (nOn2 > 0 ? 1 : 0); int code3 = (nOnOut > 0 && nOffOut) > 0 ? -1 : (nOnOut > 0 ? 1 : 0); int nSplit = (code1 == -1 ? 1 : 0) + (code2 == -1 ? 1 : 0) + (code3 == -1 ? 1 : 0); int nOn = (code1 == 1 ? 1 : 0) + (code2 == 1 ? 1 : 0) + (code3 == 1 ? 1 : 0); if (nSplit == 1 && nOn == 1) return(SplitConstraintPenalty(nOn1+nOn2, nOff1+nOff2, nOnOut, nOffOut)); } /* else */ return(0); } void QuartetConstraintPenalties(profile_t *profiles[4], int nConstraints, /*OUT*/double penalty[3]) { int i; for (i=0; i < 3; i++) penalty[i] = 0.0; if(nConstraints == 0) return; int iC; for (iC = 0; iC < nConstraints; iC++) { double part[3]; if (QuartetConstraintPenaltiesPiece(profiles, iC, /*OUT*/part)) { for (i=0;i<3;i++) penalty[i] += part[i]; if (verbose>2 && (fabs(part[ABvsCD]-part[ACvsBD]) > 0.001 || fabs(part[ABvsCD]-part[ADvsBC]) > 0.001)) fprintf(stderr, "Constraint Penalties at %d: ABvsCD %.3f ACvsBD %.3f ADvsBC %.3f %d/%d %d/%d %d/%d %d/%d\n", iC, part[ABvsCD], part[ACvsBD], part[ADvsBC], profiles[0]->nOn[iC], profiles[0]->nOff[iC], profiles[1]->nOn[iC], profiles[1]->nOff[iC], profiles[2]->nOn[iC], profiles[2]->nOff[iC], profiles[3]->nOn[iC], profiles[3]->nOff[iC]); } } if (verbose>2) fprintf(stderr, "Total Constraint Penalties: ABvsCD %.3f ACvsBD %.3f ADvsBC %.3f\n", penalty[ABvsCD], penalty[ACvsBD], penalty[ADvsBC]); } double PairConstraintDistance(int nOn1, int nOff1, int nOn2, int nOff2) { double f1 = nOn1/(double)(nOn1+nOff1); double f2 = nOn2/(double)(nOn2+nOff2); /* 1 - f1 * f2 - (1-f1)*(1-f2) = 1 - f1 * f2 - 1 + f1 + f2 - f1 * f2 */ return(f1 + f2 - 2.0 * f1 * f2); } bool QuartetConstraintPenaltiesPiece(profile_t *profiles[4], int iC, /*OUT*/double piece[3]) { int nOn[4]; int nOff[4]; int i; int nSplit = 0; int nPlus = 0; int nMinus = 0; for (i=0; i < 4; i++) { nOn[i] = profiles[i]->nOn[iC]; nOff[i] = profiles[i]->nOff[iC]; if (nOn[i] + nOff[i] == 0) return(false); /* ignore */ else if (nOn[i] > 0 && nOff[i] > 0) nSplit++; else if (nOn[i] > 0) nPlus++; else nMinus++; } /* If just one of them is split or on the other side and the others all agree, also ignore */ if (nPlus >= 3 || nMinus >= 3) return(false); piece[ABvsCD] = constraintWeight * (PairConstraintDistance(nOn[0],nOff[0],nOn[1],nOff[1]) + PairConstraintDistance(nOn[2],nOff[2],nOn[3],nOff[3])); piece[ACvsBD] = constraintWeight * (PairConstraintDistance(nOn[0],nOff[0],nOn[2],nOff[2]) + PairConstraintDistance(nOn[1],nOff[1],nOn[3],nOff[3])); piece[ADvsBC] = constraintWeight * (PairConstraintDistance(nOn[0],nOff[0],nOn[3],nOff[3]) + PairConstraintDistance(nOn[2],nOff[2],nOn[1],nOff[1])); return(true); } /* Minimum number of constrained leaves that need to be moved to satisfy the constraint (or 0 if constraint is satisfied) Defining it this way should ensure that SPR moves that break constraints get a penalty */ int SplitConstraintPenalty(int nOn1, int nOff1, int nOn2, int nOff2) { return(nOn1 + nOff2 < nOn2 + nOff1 ? (nOn1 < nOff2 ? nOn1 : nOff2) : (nOn2 < nOff1 ? nOn2 : nOff1)); } bool SplitViolatesConstraint(profile_t *profiles[4], int iConstraint) { int i; int codes[4]; /* 0 for off, 1 for on, -1 for split (quit if not constrained at all) */ for (i = 0; i < 4; i++) { if (profiles[i]->nOn[iConstraint] + profiles[i]->nOff[iConstraint] == 0) return(false); else if (profiles[i]->nOn[iConstraint] > 0 && profiles[i]->nOff[iConstraint] == 0) codes[i] = 1; else if (profiles[i]->nOn[iConstraint] == 0 && profiles[i]->nOff[iConstraint] > 0) codes[i] = 0; else codes[i] = -1; } int n0 = 0; int n1 = 0; for (i = 0; i < 4; i++) { if (codes[i] == 0) n0++; else if (codes[i] == 1) n1++; } /* 3 on one side means no violation, even if other is code -1 otherwise must have code != -1 and agreement on the split */ if (n0 >= 3 || n1 >= 3) return(false); if (n0==2 && n1==2 && codes[0] == codes[1] && codes[2] == codes[3]) return(false); return(true); } double LogCorrect(double dist) { const double maxscore = 3.0; if (nCodes == 4 && !useMatrix) { /* Jukes-Cantor */ dist = dist < 0.74 ? -0.75*log(1.0 - dist * 4.0/3.0) : maxscore; } else { /* scoredist-like */ dist = dist < 0.99 ? -1.3*log(1.0 - dist) : maxscore; } return (dist < maxscore ? dist : maxscore); } /* A helper function -- f1 and f2 can be NULL if the corresponding code != NOCODE */ double ProfileDistPiece(unsigned int code1, unsigned int code2, numeric_t *f1, numeric_t *f2, /*OPTIONAL*/distance_matrix_t *dmat, /*OPTIONAL*/numeric_t *codeDist2) { if (dmat) { if (code1 != NOCODE && code2 != NOCODE) { /* code1 vs code2 */ return(dmat->distances[code1][code2]); } else if (codeDist2 != NULL && code1 != NOCODE) { /* code1 vs. codeDist2 */ return(codeDist2[code1]); } else { /* f1 vs f2 */ if (f1 == NULL) { if(code1 == NOCODE) return(10.0); f1 = &dmat->codeFreq[code1][0]; } if (f2 == NULL) { if(code2 == NOCODE) return(10.0); f2 = &dmat->codeFreq[code2][0]; } return(vector_multiply3_sum(f1,f2,dmat->eigenval,nCodes)); } } else { /* no matrix */ if (code1 != NOCODE) { if (code2 != NOCODE) { return(code1 == code2 ? 0.0 : 1.0); /* code1 vs code2 */ } else { if(f2 == NULL) return(10.0); return(1.0 - f2[code1]); /* code1 vs. f2 */ } } else { if (code2 != NOCODE) { if(f1 == NULL) return(10.0); return(1.0 - f1[code2]); /* f1 vs code2 */ } else { /* f1 vs. f2 */ if (f1 == NULL || f2 == NULL) return(10.0); double piece = 1.0; int k; for (k = 0; k < nCodes; k++) { piece -= f1[k] * f2[k]; } return(piece); } } } assert(0); } /* E.g. GET_FREQ(profile,iPos,iVector) Gets the next element of the vectors (and updates iVector), or returns NULL if we didn't store a vector */ #define GET_FREQ(P,I,IVECTOR) \ (P->weights[I] > 0 && P->codes[I] == NOCODE ? &P->vectors[nCodes*(IVECTOR++)] : NULL) void ProfileDist(profile_t *profile1, profile_t *profile2, int nPos, /*OPTIONAL*/distance_matrix_t *dmat, /*OUT*/besthit_t *hit) { double top = 0; double denom = 0; int iFreq1 = 0; int iFreq2 = 0; int i = 0; for (i = 0; i < nPos; i++) { numeric_t *f1 = GET_FREQ(profile1,i,/*IN/OUT*/iFreq1); numeric_t *f2 = GET_FREQ(profile2,i,/*IN/OUT*/iFreq2); if (profile1->weights[i] > 0 && profile2->weights[i] > 0) { double weight = profile1->weights[i] * profile2->weights[i]; denom += weight; double piece = ProfileDistPiece(profile1->codes[i],profile2->codes[i],f1,f2,dmat, profile2->codeDist ? &profile2->codeDist[i*nCodes] : NULL); top += weight * piece; } } assert(iFreq1 == profile1->nVectors); assert(iFreq2 == profile2->nVectors); hit->weight = denom > 0 ? denom : 0.01; /* 0.01 is an arbitrarily low value of weight (normally >>1) */ hit->dist = denom > 0 ? top/denom : 1; profileOps++; } /* This should not be called if the update weight is 0, as in that case code==NOCODE and in=NULL is possible, and then it will fail. */ void AddToFreq(/*IN/OUT*/numeric_t *fOut, double weight, unsigned int codeIn, /*OPTIONAL*/numeric_t *fIn, /*OPTIONAL*/distance_matrix_t *dmat) { assert(fOut != NULL); if (fIn != NULL) { vector_add_mult(fOut, fIn, weight, nCodes); } else if (dmat) { assert(codeIn != NOCODE); vector_add_mult(fOut, dmat->codeFreq[codeIn], weight, nCodes); } else { assert(codeIn != NOCODE); fOut[codeIn] += weight; } } void SetProfile(/*IN/OUT*/NJ_t *NJ, int node, double weight1) { children_t *c = &NJ->child[node]; assert(c->nChild == 2); assert(NJ->profiles[c->child[0]] != NULL); assert(NJ->profiles[c->child[1]] != NULL); if (NJ->profiles[node] != NULL) FreeProfile(NJ->profiles[node], NJ->nPos, NJ->nConstraints); NJ->profiles[node] = AverageProfile(NJ->profiles[c->child[0]], NJ->profiles[c->child[1]], NJ->nPos, NJ->nConstraints, NJ->distance_matrix, weight1); } /* bionjWeight is the weight of the first sequence (between 0 and 1), or -1 to do the average. */ profile_t *AverageProfile(profile_t *profile1, profile_t *profile2, int nPos, int nConstraints, distance_matrix_t *dmat, double bionjWeight) { int i; if (bionjWeight < 0) { bionjWeight = 0.5; } /* First, set codes and weights and see how big vectors will be */ profile_t *out = NewProfile(nPos, nConstraints); for (i = 0; i < nPos; i++) { out->weights[i] = bionjWeight * profile1->weights[i] + (1-bionjWeight) * profile2->weights[i]; out->codes[i] = NOCODE; if (out->weights[i] > 0) { if (profile1->weights[i] > 0 && profile1->codes[i] != NOCODE && (profile2->weights[i] <= 0 || profile1->codes[i] == profile2->codes[i])) { out->codes[i] = profile1->codes[i]; } else if (profile1->weights[i] <= 0 && profile2->weights[i] > 0 && profile2->codes[i] != NOCODE) { out->codes[i] = profile2->codes[i]; } if (out->codes[i] == NOCODE) out->nVectors++; } } /* Allocate and set the vectors */ out->vectors = (numeric_t*)mymalloc(sizeof(numeric_t)*nCodes*out->nVectors); for (i = 0; i < nCodes * out->nVectors; i++) out->vectors[i] = 0; nProfileFreqAlloc += out->nVectors; nProfileFreqAvoid += nPos - out->nVectors; int iFreqOut = 0; int iFreq1 = 0; int iFreq2 = 0; for (i=0; i < nPos; i++) { numeric_t *f = GET_FREQ(out,i,/*IN/OUT*/iFreqOut); numeric_t *f1 = GET_FREQ(profile1,i,/*IN/OUT*/iFreq1); numeric_t *f2 = GET_FREQ(profile2,i,/*IN/OUT*/iFreq2); if (f != NULL) { if (profile1->weights[i] > 0) AddToFreq(/*IN/OUT*/f, profile1->weights[i] * bionjWeight, profile1->codes[i], f1, dmat); if (profile2->weights[i] > 0) AddToFreq(/*IN/OUT*/f, profile2->weights[i] * (1.0-bionjWeight), profile2->codes[i], f2, dmat); NormalizeFreq(/*IN/OUT*/f, dmat); } /* end if computing f */ if (verbose > 10 && i < 5) { fprintf(stderr,"Average profiles: pos %d in-w1 %f in-w2 %f bionjWeight %f to weight %f code %d\n", i, profile1->weights[i], profile2->weights[i], bionjWeight, out->weights[i], out->codes[i]); if (f!= NULL) { int k; for (k = 0; k < nCodes; k++) fprintf(stderr, "\t%c:%f", codesString[k], f ? f[k] : -1.0); fprintf(stderr,"\n"); } } } /* end loop over positions */ assert(iFreq1 == profile1->nVectors); assert(iFreq2 == profile2->nVectors); assert(iFreqOut == out->nVectors); /* compute total constraints */ for (i = 0; i < nConstraints; i++) { out->nOn[i] = profile1->nOn[i] + profile2->nOn[i]; out->nOff[i] = profile1->nOff[i] + profile2->nOff[i]; } profileAvgOps++; return(out); } /* Make the (unrotated) frequencies sum to 1 Simply dividing by total_weight is not ideal because of roundoff error So compute total_freq instead */ void NormalizeFreq(/*IN/OUT*/numeric_t *freq, distance_matrix_t *dmat) { double total_freq = 0; int k; if (dmat != NULL) { /* The total frequency is dot_product(true_frequencies, 1) So we rotate the 1 vector by eigeninv (stored in eigentot) */ total_freq = vector_multiply_sum(freq, dmat->eigentot, nCodes); } else { for (k = 0; k < nCodes; k++) total_freq += freq[k]; } if (total_freq > 1e-10) { numeric_t inverse_weight = 1.0/total_freq; vector_multiply_by(/*IN/OUT*/freq, inverse_weight, nCodes); } else { /* This can happen if we are in a very low-weight region, e.g. if a mostly-gap position gets weighted down repeatedly; just set them all to arbitrary but legal values */ if (dmat == NULL) { for (k = 0; k < nCodes; k++) freq[k] = 1.0/nCodes; } else { for (k = 0; k < nCodes; k++) freq[k] = dmat->codeFreq[0][k];/*XXX gapFreq[k];*/ } } } /* OutProfile() computes the out-profile */ profile_t *OutProfile(profile_t **profiles, int nProfiles, int nPos, int nConstraints, distance_matrix_t *dmat) { int i; /* position */ int in; /* profile */ profile_t *out = NewProfile(nPos, nConstraints); double inweight = 1.0/(double)nProfiles; /* The maximal output weight is 1.0 */ /* First, set weights -- code is always NOCODE, prevent weight=0 */ for (i = 0; i < nPos; i++) { out->weights[i] = 0; for (in = 0; in < nProfiles; in++) out->weights[i] += profiles[in]->weights[i] * inweight; if (out->weights[i] <= 0) out->weights[i] = 1e-20; /* always store a vector */ out->nVectors++; out->codes[i] = NOCODE; /* outprofile is normally complicated */ } /* Initialize the frequencies to 0 */ out->vectors = (numeric_t*)mymalloc(sizeof(numeric_t)*nCodes*out->nVectors); for (i = 0; i < nCodes*out->nVectors; i++) out->vectors[i] = 0; /* Add up the weights, going through each sequence in turn */ for (in = 0; in < nProfiles; in++) { int iFreqOut = 0; int iFreqIn = 0; for (i = 0; i < nPos; i++) { numeric_t *fIn = GET_FREQ(profiles[in],i,/*IN/OUT*/iFreqIn); numeric_t *fOut = GET_FREQ(out,i,/*IN/OUT*/iFreqOut); if (profiles[in]->weights[i] > 0) AddToFreq(/*IN/OUT*/fOut, profiles[in]->weights[i], profiles[in]->codes[i], fIn, dmat); } assert(iFreqOut == out->nVectors); assert(iFreqIn == profiles[in]->nVectors); } /* And normalize the frequencies to sum to 1 */ int iFreqOut = 0; for (i = 0; i < nPos; i++) { numeric_t *fOut = GET_FREQ(out,i,/*IN/OUT*/iFreqOut); if (fOut) NormalizeFreq(/*IN/OUT*/fOut, dmat); } assert(iFreqOut == out->nVectors); if (verbose > 10) fprintf(stderr,"Average %d profiles\n", nProfiles); if(dmat) SetCodeDist(/*IN/OUT*/out, nPos, dmat); /* Compute constraints */ for (i = 0; i < nConstraints; i++) { out->nOn[i] = 0; out->nOff[i] = 0; for (in = 0; in < nProfiles; in++) { out->nOn[i] += profiles[in]->nOn[i]; out->nOff[i] += profiles[in]->nOff[i]; } } return(out); } void UpdateOutProfile(/*IN/OUT*/profile_t *out, profile_t *old1, profile_t *old2, profile_t *new, int nActiveOld, int nPos, int nConstraints, distance_matrix_t *dmat) { int i, k; int iFreqOut = 0; int iFreq1 = 0; int iFreq2 = 0; int iFreqNew = 0; assert(nActiveOld > 0); for (i = 0; i < nPos; i++) { numeric_t *fOut = GET_FREQ(out,i,/*IN/OUT*/iFreqOut); numeric_t *fOld1 = GET_FREQ(old1,i,/*IN/OUT*/iFreq1); numeric_t *fOld2 = GET_FREQ(old2,i,/*IN/OUT*/iFreq2); numeric_t *fNew = GET_FREQ(new,i,/*IN/OUT*/iFreqNew); assert(out->codes[i] == NOCODE && fOut != NULL); /* No no-vector optimization for outprofiles */ if (verbose > 3 && i < 3) { fprintf(stderr,"Updating out-profile position %d weight %f (mult %f)\n", i, out->weights[i], out->weights[i]*nActiveOld); } double originalMult = out->weights[i]*nActiveOld; double newMult = originalMult + new->weights[i] - old1->weights[i] - old2->weights[i]; out->weights[i] = newMult/(nActiveOld-1); if (out->weights[i] <= 0) out->weights[i] = 1e-20; /* always use the vector */ for (k = 0; k < nCodes; k++) fOut[k] *= originalMult; if (old1->weights[i] > 0) AddToFreq(/*IN/OUT*/fOut, -old1->weights[i], old1->codes[i], fOld1, dmat); if (old2->weights[i] > 0) AddToFreq(/*IN/OUT*/fOut, -old2->weights[i], old2->codes[i], fOld2, dmat); if (new->weights[i] > 0) AddToFreq(/*IN/OUT*/fOut, new->weights[i], new->codes[i], fNew, dmat); /* And renormalize */ NormalizeFreq(/*IN/OUT*/fOut, dmat); if (verbose > 2 && i < 3) { fprintf(stderr,"Updated out-profile position %d weight %f (mult %f)", i, out->weights[i], out->weights[i]*nActiveOld); if(out->weights[i] > 0) for (k=0;k<nCodes;k++) fprintf(stderr, " %c:%f", dmat?'?':codesString[k], fOut[k]); fprintf(stderr,"\n"); } } assert(iFreqOut == out->nVectors); assert(iFreq1 == old1->nVectors); assert(iFreq2 == old2->nVectors); assert(iFreqNew == new->nVectors); if(dmat) SetCodeDist(/*IN/OUT*/out,nPos,dmat); /* update constraints -- note in practice this should be a no-op */ for (i = 0; i < nConstraints; i++) { out->nOn[i] += new->nOn[i] - old1->nOn[i] - old2->nOn[i]; out->nOff[i] += new->nOff[i] - old1->nOff[i] - old2->nOff[i]; } } void SetCodeDist(/*IN/OUT*/profile_t *profile, int nPos, distance_matrix_t *dmat) { if (profile->codeDist == NULL) profile->codeDist = (numeric_t*)mymalloc(sizeof(numeric_t)*nPos*nCodes); int i; int iFreq = 0; for (i = 0; i < nPos; i++) { numeric_t *f = GET_FREQ(profile,i,/*IN/OUT*/iFreq); int k; for (k = 0; k < nCodes; k++) profile->codeDist[i*nCodes+k] = ProfileDistPiece(/*code1*/profile->codes[i], /*code2*/k, /*f1*/f, /*f2*/NULL, dmat, NULL); } assert(iFreq==profile->nVectors); } void SetBestHit(int node, NJ_t *NJ, int nActive, /*OUT*/besthit_t *bestjoin, /*OUT OPTIONAL*/besthit_t *allhits) { assert(NJ->parent[node] < 0); bestjoin->i = node; bestjoin->j = -1; bestjoin->dist = 1e20; bestjoin->criterion = 1e20; int j; besthit_t tmp; #ifdef OPENMP /* Note -- if we are already in a parallel region, this will be ignored */ #pragma omp parallel for schedule(dynamic, 50) #endif for (j = 0; j < NJ->maxnode; j++) { besthit_t *sv = allhits != NULL ? &allhits[j] : &tmp; sv->i = node; sv->j = j; if (NJ->parent[j] >= 0) { sv->i = -1; /* illegal/empty join */ sv->weight = 0.0; sv->criterion = sv->dist = 1e20; continue; } /* Note that we compute self-distances (allow j==node) because the top-hit heuristic expects self to be within its top hits, but we exclude those from the bestjoin that we return... */ SetDistCriterion(NJ, nActive, /*IN/OUT*/sv); if (sv->criterion < bestjoin->criterion && node != j) *bestjoin = *sv; } if (verbose>5) { fprintf(stderr, "SetBestHit %d %d %f %f\n", bestjoin->i, bestjoin->j, bestjoin->dist, bestjoin->criterion); } } void ReadMatrix(char *filename, /*OUT*/numeric_t codes[MAXCODES][MAXCODES], bool checkCodes) { char buf[BUFFER_SIZE] = ""; FILE *fp = fopen(filename, "r"); if (fp == NULL) { fprintf(stderr, "Cannot read %s\n",filename); exit(1); } if (fgets(buf,sizeof(buf),fp) == NULL) { fprintf(stderr, "Error reading header line for %s:\n%s\n", filename, buf); exit(1); } if (checkCodes) { int i; int iBufPos; for (iBufPos=0,i=0;i<nCodes;i++,iBufPos++) { if(buf[iBufPos] != codesString[i]) { fprintf(stderr,"Header line\n%s\nin file %s does not have expected code %c # %d in %s\n", buf, filename, codesString[i], i, codesString); exit(1); } iBufPos++; if(buf[iBufPos] != '\n' && buf[iBufPos] != '\r' && buf[iBufPos] != '\0' && buf[iBufPos] != '\t') { fprintf(stderr, "Header line in %s should be tab-delimited\n", filename); exit(1); } if (buf[iBufPos] == '\0' && i < nCodes-1) { fprintf(stderr, "Header line in %s ends prematurely\n",filename); exit(1); } } /* end loop over codes */ /* Should be at end, but allow \n because of potential DOS \r\n */ if(buf[iBufPos] != '\0' && buf[iBufPos] != '\n' && buf[iBufPos] != '\r') { fprintf(stderr, "Header line in %s has too many entries\n", filename); exit(1); } } int iLine; for (iLine = 0; iLine < nCodes; iLine++) { buf[0] = '\0'; if (fgets(buf,sizeof(buf),fp) == NULL) { fprintf(stderr, "Cannot read line %d from file %s\n", iLine+2, filename); exit(1); } char *field = strtok(buf,"\t\r\n"); field = strtok(NULL, "\t"); /* ignore first column */ int iColumn; for (iColumn = 0; iColumn < nCodes && field != NULL; iColumn++, field = strtok(NULL,"\t")) { if(sscanf(field,ScanNumericSpec,&codes[iLine][iColumn]) != 1) { fprintf(stderr,"Cannot parse field %s in file %s\n", field, filename); exit(1); } } } } void ReadVector(char *filename, /*OUT*/numeric_t codes[MAXCODES]) { FILE *fp = fopen(filename,"r"); if (fp == NULL) { fprintf(stderr, "Cannot read %s\n",filename); exit(1); } int i; for (i = 0; i < nCodes; i++) { if (fscanf(fp,ScanNumericSpec,&codes[i]) != 1) { fprintf(stderr,"Cannot read %d entry of %s\n",i+1,filename); exit(1); } } if (fclose(fp) != 0) { fprintf(stderr, "Error reading %s\n",filename); exit(1); } } distance_matrix_t *ReadDistanceMatrix(char *prefix) { char buffer[BUFFER_SIZE]; distance_matrix_t *dmat = (distance_matrix_t*)mymalloc(sizeof(distance_matrix_t)); if(strlen(prefix) > BUFFER_SIZE-20) { fprintf(stderr,"Filename %s too long\n", prefix); exit(1); } strcpy(buffer, prefix); strcat(buffer, ".distances"); ReadMatrix(buffer, /*OUT*/dmat->distances, /*checkCodes*/true); strcpy(buffer, prefix); strcat(buffer, ".inverses"); ReadMatrix(buffer, /*OUT*/dmat->eigeninv, /*checkCodes*/false); strcpy(buffer, prefix); strcat(buffer, ".eigenvalues"); ReadVector(buffer, /*OUT*/dmat->eigenval); if(verbose>1) fprintf(stderr, "Read distance matrix from %s\n",prefix); SetupDistanceMatrix(/*IN/OUT*/dmat); return(dmat); } void SetupDistanceMatrix(/*IN/OUT*/distance_matrix_t *dmat) { /* Check that the eigenvalues and eigen-inverse are consistent with the distance matrix and that the matrix is symmetric */ int i,j,k; for (i = 0; i < nCodes; i++) { for (j = 0; j < nCodes; j++) { if(fabs(dmat->distances[i][j]-dmat->distances[j][i]) > 1e-6) { fprintf(stderr,"Distance matrix not symmetric for %d,%d: %f vs %f\n", i+1,j+1, dmat->distances[i][j], dmat->distances[j][i]); exit(1); } double total = 0.0; for (k = 0; k < nCodes; k++) total += dmat->eigenval[k] * dmat->eigeninv[k][i] * dmat->eigeninv[k][j]; if(fabs(total - dmat->distances[i][j]) > 1e-6) { fprintf(stderr,"Distance matrix entry %d,%d should be %f but eigen-representation gives %f\n", i+1,j+1,dmat->distances[i][j],total); exit(1); } } } /* And compute eigentot */ for (k = 0; k < nCodes; k++) { dmat->eigentot[k] = 0.; int j; for (j = 0; j < nCodes; j++) dmat->eigentot[k] += dmat->eigeninv[k][j]; } /* And compute codeFreq */ int code; for(code = 0; code < nCodes; code++) { for (k = 0; k < nCodes; k++) { dmat->codeFreq[code][k] = dmat->eigeninv[k][code]; } } /* And gapFreq */ for(code = 0; code < nCodes; code++) { double gapFreq = 0.0; for (k = 0; k < nCodes; k++) gapFreq += dmat->codeFreq[k][code]; dmat->gapFreq[code] = gapFreq / nCodes; } if(verbose>10) fprintf(stderr, "Made codeFreq\n"); } nni_t ChooseNNI(profile_t *profiles[4], /*OPTIONAL*/distance_matrix_t *dmat, int nPos, int nConstraints, /*OUT*/double criteria[3]) { double d[6]; CorrectedPairDistances(profiles, 4, dmat, nPos, /*OUT*/d); double penalty[3]; /* indexed as nni_t */ QuartetConstraintPenalties(profiles, nConstraints, /*OUT*/penalty); criteria[ABvsCD] = d[qAB] + d[qCD] + penalty[ABvsCD]; criteria[ACvsBD] = d[qAC] + d[qBD] + penalty[ACvsBD]; criteria[ADvsBC] = d[qAD] + d[qBC] + penalty[ADvsBC]; nni_t choice = ABvsCD; if (criteria[ACvsBD] < criteria[ABvsCD] && criteria[ACvsBD] <= criteria[ADvsBC]) { choice = ACvsBD; } else if (criteria[ADvsBC] < criteria[ABvsCD] && criteria[ADvsBC] <= criteria[ACvsBD]) { choice = ADvsBC; } if (verbose > 1 && penalty[choice] > penalty[ABvsCD] + 1e-6) { fprintf(stderr, "Worsen constraint: from %.3f to %.3f distance %.3f to %.3f: ", penalty[ABvsCD], penalty[choice], criteria[ABvsCD], choice == ACvsBD ? criteria[ACvsBD] : criteria[ADvsBC]); int iC; for (iC = 0; iC < nConstraints; iC++) { double ppart[3]; if (QuartetConstraintPenaltiesPiece(profiles, iC, /*OUT*/ppart)) { double old_penalty = ppart[ABvsCD]; double new_penalty = ppart[choice]; if (new_penalty > old_penalty + 1e-6) fprintf(stderr, " %d (%d/%d %d/%d %d/%d %d/%d)", iC, profiles[0]->nOn[iC], profiles[0]->nOff[iC], profiles[1]->nOn[iC], profiles[1]->nOff[iC], profiles[2]->nOn[iC], profiles[2]->nOff[iC], profiles[3]->nOn[iC], profiles[3]->nOff[iC]); } } fprintf(stderr,"\n"); } if (verbose > 3) fprintf(stderr, "NNI scores ABvsCD %.5f ACvsBD %.5f ADvsBC %.5f choice %s\n", criteria[ABvsCD], criteria[ACvsBD], criteria[ADvsBC], choice == ABvsCD ? "AB|CD" : (choice == ACvsBD ? "AC|BD" : "AD|BC")); return(choice); } profile_t *PosteriorProfile(profile_t *p1, profile_t *p2, double len1, double len2, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, int nPos, int nConstraints) { if (len1 < MLMinBranchLength) len1 = MLMinBranchLength; if (len2 < MLMinBranchLength) len2 = MLMinBranchLength; int i,j,k; profile_t *out = NewProfile(nPos, nConstraints); for (i = 0; i < nPos; i++) { out->codes[i] = NOCODE; out->weights[i] = 1.0; } out->nVectors = nPos; out->vectors = (numeric_t*)mymalloc(sizeof(numeric_t)*nCodes*out->nVectors); for (i = 0; i < nCodes * out->nVectors; i++) out->vectors[i] = 0; int iFreqOut = 0; int iFreq1 = 0; int iFreq2 = 0; numeric_t *expeigenRates1 = NULL, *expeigenRates2 = NULL; if (transmat != NULL) { expeigenRates1 = ExpEigenRates(len1, transmat, rates); expeigenRates2 = ExpEigenRates(len2, transmat, rates); } if (transmat == NULL) { /* Jukes-Cantor */ assert(nCodes == 4); numeric_t fAll[128][4]; for (j = 0; j < 4; j++) for (k = 0; k < 4; k++) fAll[j][k] = (j==k) ? 1.0 : 0.0; for (k = 0; k < 4; k++) fAll[NOCODE][k] = 0.25; double *PSame1 = PSameVector(len1, rates); double *PDiff1 = PDiffVector(PSame1, rates); double *PSame2 = PSameVector(len2, rates); double *PDiff2 = PDiffVector(PSame2, rates); numeric_t mix1[4], mix2[4]; for (i=0; i < nPos; i++) { int iRate = rates->ratecat[i]; double w1 = p1->weights[i]; double w2 = p2->weights[i]; int code1 = p1->codes[i]; int code2 = p2->codes[i]; numeric_t *f1 = GET_FREQ(p1,i,/*IN/OUT*/iFreq1); numeric_t *f2 = GET_FREQ(p2,i,/*IN/OUT*/iFreq2); /* First try to store a simple profile */ if (f1 == NULL && f2 == NULL) { if (code1 == NOCODE && code2 == NOCODE) { out->codes[i] = NOCODE; out->weights[i] = 0.0; continue; } else if (code1 == NOCODE) { /* Posterior(parent | character & gap, len1, len2) = Posterior(parent | character, len1) = PSame() for matching characters and 1-PSame() for the rest = (pSame - pDiff) * character + (1-(pSame-pDiff)) * gap */ out->codes[i] = code2; out->weights[i] = w2 * (PSame2[iRate] - PDiff2[iRate]); continue; } else if (code2 == NOCODE) { out->codes[i] = code1; out->weights[i] = w1 * (PSame1[iRate] - PDiff1[iRate]); continue; } else if (code1 == code2) { out->codes[i] = code1; double f12code = (w1*PSame1[iRate] + (1-w1)*0.25) * (w2*PSame2[iRate] + (1-w2)*0.25); double f12other = (w1*PDiff1[iRate] + (1-w1)*0.25) * (w2*PDiff2[iRate] + (1-w2)*0.25); /* posterior probability of code1/code2 after scaling */ double pcode = f12code/(f12code+3*f12other); /* Now f = w * (code ? 1 : 0) + (1-w) * 0.25, so to get pcode we need fcode = 1/4 + w1*3/4 or w = (f-1/4)*4/3 */ out->weights[i] = (pcode - 0.25) * 4.0/3.0; /* This can be zero because of numerical problems, I think */ if (out->weights[i] < 1e-6) { if (verbose > 1) fprintf(stderr, "Replaced weight %f with %f from w1 %f w2 %f PSame %f %f f12code %f f12other %f\n", out->weights[i], 1e-6, w1, w2, PSame1[iRate], PSame2[iRate], f12code, f12other); out->weights[i] = 1e-6; } continue; } } /* if we did not compute a simple profile, then do the full computation and store the full vector */ if (f1 == NULL) { for (j = 0; j < 4; j++) mix1[j] = (1-w1)*0.25; if(code1 != NOCODE) mix1[code1] += w1; f1 = mix1; } if (f2 == NULL) { for (j = 0; j < 4; j++) mix2[j] = (1-w2)*0.25; if(code2 != NOCODE) mix2[code2] += w2; f2 = mix2; } out->codes[i] = NOCODE; out->weights[i] = 1.0; numeric_t *f = GET_FREQ(out,i,/*IN/OUT*/iFreqOut); double lkAB = 0; for (j = 0; j < 4; j++) { f[j] = (f1[j] * PSame1[iRate] + (1.0-f1[j]) * PDiff1[iRate]) * (f2[j] * PSame2[iRate] + (1.0-f2[j]) * PDiff2[iRate]); lkAB += f[j]; } double lkABInv = 1.0/lkAB; for (j = 0; j < 4; j++) f[j] *= lkABInv; } PSame1 = myfree(PSame1, sizeof(double) * rates->nRateCategories); PSame2 = myfree(PSame2, sizeof(double) * rates->nRateCategories); PDiff1 = myfree(PDiff1, sizeof(double) * rates->nRateCategories); PDiff2 = myfree(PDiff2, sizeof(double) * rates->nRateCategories); } else if (nCodes == 4) { /* matrix model on nucleotides */ numeric_t *fGap = &transmat->codeFreq[NOCODE][0]; numeric_t f1mix[4], f2mix[4]; for (i=0; i < nPos; i++) { if (p1->codes[i] == NOCODE && p2->codes[i] == NOCODE && p1->weights[i] == 0 && p2->weights[i] == 0) { /* aligning gap with gap -- just output a gap out->codes[i] is already set to NOCODE so need not set that */ out->weights[i] = 0; continue; } int iRate = rates->ratecat[i]; numeric_t *expeigen1 = &expeigenRates1[iRate*4]; numeric_t *expeigen2 = &expeigenRates2[iRate*4]; numeric_t *f1 = GET_FREQ(p1,i,/*IN/OUT*/iFreq1); numeric_t *f2 = GET_FREQ(p2,i,/*IN/OUT*/iFreq2); numeric_t *fOut = GET_FREQ(out,i,/*IN/OUT*/iFreqOut); assert(fOut != NULL); if (f1 == NULL) { f1 = &transmat->codeFreq[p1->codes[i]][0]; /* codeFreq includes an entry for NOCODE */ double w = p1->weights[i]; if (w > 0.0 && w < 1.0) { for (j = 0; j < 4; j++) f1mix[j] = w * f1[j] + (1.0-w) * fGap[j]; f1 = f1mix; } } if (f2 == NULL) { f2 = &transmat->codeFreq[p2->codes[i]][0]; double w = p2->weights[i]; if (w > 0.0 && w < 1.0) { for (j = 0; j < 4; j++) f2mix[j] = w * f2[j] + (1.0-w) * fGap[j]; f2 = f2mix; } } numeric_t fMult1[4] ALIGNED; /* rotated1 * expeigen1 */ numeric_t fMult2[4] ALIGNED; /* rotated2 * expeigen2 */ #if 0 /* SSE3 is slower */ vector_multiply(f1, expeigen1, 4, /*OUT*/fMult1); vector_multiply(f2, expeigen2, 4, /*OUT*/fMult2); #else for (j = 0; j < 4; j++) { fMult1[j] = f1[j]*expeigen1[j]; fMult2[j] = f2[j]*expeigen2[j]; } #endif numeric_t fPost[4] ALIGNED; /* in unrotated space */ for (j = 0; j < 4; j++) { #if 0 /* SSE3 is slower */ fPost[j] = vector_dot_product_rot(fMult1, fMult2, &transmat->codeFreq[j][0], 4) * transmat->statinv[j]; */ #else double out1 = 0; double out2 = 0; for (k = 0; k < 4; k++) { out1 += fMult1[k] * transmat->codeFreq[j][k]; out2 += fMult2[k] * transmat->codeFreq[j][k]; } fPost[j] = out1*out2*transmat->statinv[j]; #endif } double fPostTot = 0; for (j = 0; j < 4; j++) fPostTot += fPost[j]; assert(fPostTot > 1e-10); double fPostInv = 1.0/fPostTot; #if 0 /* SSE3 is slower */ vector_multiply_by(fPost, fPostInv, 4); #else for (j = 0; j < 4; j++) fPost[j] *= fPostInv; #endif /* and finally, divide by stat again & rotate to give the new frequencies */ matrixt_by_vector4(transmat->eigeninvT, fPost, /*OUT*/fOut); } /* end loop over position i */ } else if (nCodes == 20) { /* matrix model on amino acids */ numeric_t *fGap = &transmat->codeFreq[NOCODE][0]; numeric_t f1mix[20] ALIGNED; numeric_t f2mix[20] ALIGNED; for (i=0; i < nPos; i++) { if (p1->codes[i] == NOCODE && p2->codes[i] == NOCODE && p1->weights[i] == 0 && p2->weights[i] == 0) { /* aligning gap with gap -- just output a gap out->codes[i] is already set to NOCODE so need not set that */ out->weights[i] = 0; continue; } int iRate = rates->ratecat[i]; numeric_t *expeigen1 = &expeigenRates1[iRate*20]; numeric_t *expeigen2 = &expeigenRates2[iRate*20]; numeric_t *f1 = GET_FREQ(p1,i,/*IN/OUT*/iFreq1); numeric_t *f2 = GET_FREQ(p2,i,/*IN/OUT*/iFreq2); numeric_t *fOut = GET_FREQ(out,i,/*IN/OUT*/iFreqOut); assert(fOut != NULL); if (f1 == NULL) { f1 = &transmat->codeFreq[p1->codes[i]][0]; /* codeFreq includes an entry for NOCODE */ double w = p1->weights[i]; if (w > 0.0 && w < 1.0) { for (j = 0; j < 20; j++) f1mix[j] = w * f1[j] + (1.0-w) * fGap[j]; f1 = f1mix; } } if (f2 == NULL) { f2 = &transmat->codeFreq[p2->codes[i]][0]; double w = p2->weights[i]; if (w > 0.0 && w < 1.0) { for (j = 0; j < 20; j++) f2mix[j] = w * f2[j] + (1.0-w) * fGap[j]; f2 = f2mix; } } numeric_t fMult1[20] ALIGNED; /* rotated1 * expeigen1 */ numeric_t fMult2[20] ALIGNED; /* rotated2 * expeigen2 */ vector_multiply(f1, expeigen1, 20, /*OUT*/fMult1); vector_multiply(f2, expeigen2, 20, /*OUT*/fMult2); numeric_t fPost[20] ALIGNED; /* in unrotated space */ for (j = 0; j < 20; j++) { numeric_t value = vector_dot_product_rot(fMult1, fMult2, &transmat->codeFreq[j][0], 20) * transmat->statinv[j]; /* Added this logic try to avoid rare numerical problems */ fPost[j] = value >= 0 ? value : 0; } double fPostTot = vector_sum(fPost, 20); assert(fPostTot > 1e-10); double fPostInv = 1.0/fPostTot; vector_multiply_by(/*IN/OUT*/fPost, fPostInv, 20); int ch = -1; /* the dominant character, if any */ if (!exactML) { for (j = 0; j < 20; j++) { if (fPost[j] >= approxMLminf) { ch = j; break; } } } /* now, see if we can use the approximation fPost ~= (1 or 0) * w + nearP * (1-w) to avoid rotating */ double w = 0; if (ch >= 0) { w = (fPost[ch] - transmat->nearP[ch][ch]) / (1.0 - transmat->nearP[ch][ch]); for (j = 0; j < 20; j++) { if (j != ch) { double fRough = (1.0-w) * transmat->nearP[ch][j]; if (fRough < fPost[j] * approxMLminratio) { ch = -1; /* give up on the approximation */ break; } } } } if (ch >= 0) { nAAPosteriorRough++; double wInvStat = w * transmat->statinv[ch]; for (j = 0; j < 20; j++) fOut[j] = wInvStat * transmat->codeFreq[ch][j] + (1.0-w) * transmat->nearFreq[ch][j]; } else { /* and finally, divide by stat again & rotate to give the new frequencies */ nAAPosteriorExact++; for (j = 0; j < 20; j++) fOut[j] = vector_multiply_sum(fPost, &transmat->eigeninv[j][0], 20); } } /* end loop over position i */ } else { assert(0); /* illegal nCodes */ } if (transmat != NULL) { expeigenRates1 = myfree(expeigenRates1, sizeof(numeric_t) * rates->nRateCategories * nCodes); expeigenRates2 = myfree(expeigenRates2, sizeof(numeric_t) * rates->nRateCategories * nCodes); } /* Reallocate out->vectors to be the right size */ out->nVectors = iFreqOut; if (out->nVectors == 0) out->vectors = (numeric_t*)myfree(out->vectors, sizeof(numeric_t)*nCodes*nPos); else out->vectors = (numeric_t*)myrealloc(out->vectors, /*OLDSIZE*/sizeof(numeric_t)*nCodes*nPos, /*NEWSIZE*/sizeof(numeric_t)*nCodes*out->nVectors, /*copy*/true); /* try to save space */ nProfileFreqAlloc += out->nVectors; nProfileFreqAvoid += nPos - out->nVectors; /* compute total constraints */ for (i = 0; i < nConstraints; i++) { out->nOn[i] = p1->nOn[i] + p2->nOn[i]; out->nOff[i] = p1->nOff[i] + p2->nOff[i]; } nPosteriorCompute++; return(out); } double *PSameVector(double length, rates_t *rates) { double *pSame = mymalloc(sizeof(double) * rates->nRateCategories); int iRate; for (iRate = 0; iRate < rates->nRateCategories; iRate++) pSame[iRate] = 0.25 + 0.75 * exp((-4.0/3.0) * fabs(length*rates->rates[iRate])); return(pSame); } double *PDiffVector(double *pSame, rates_t *rates) { double *pDiff = mymalloc(sizeof(double) * rates->nRateCategories); int iRate; for (iRate = 0; iRate < rates->nRateCategories; iRate++) pDiff[iRate] = (1.0 - pSame[iRate])/3.0; return(pDiff); } numeric_t *ExpEigenRates(double length, transition_matrix_t *transmat, rates_t *rates) { numeric_t *expeigen = mymalloc(sizeof(numeric_t) * nCodes * rates->nRateCategories); int iRate, j; for (iRate = 0; iRate < rates->nRateCategories; iRate++) { for (j = 0; j < nCodes; j++) { double relLen = length * rates->rates[iRate]; /* very short branch lengths lead to numerical problems so prevent them */ if (relLen < MLMinRelBranchLength) relLen = MLMinRelBranchLength; expeigen[iRate*nCodes + j] = exp(relLen * transmat->eigenval[j]); } } return(expeigen); } double PairLogLk(profile_t *pA, profile_t *pB, double length, int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, /*OPTIONAL IN/OUT*/double *site_likelihoods) { double lk = 1.0; double loglk = 0.0; /* stores underflow of lk during the loop over positions */ int i,j,k; assert(rates != NULL && rates->nRateCategories > 0); numeric_t *expeigenRates = NULL; if (transmat != NULL) expeigenRates = ExpEigenRates(length, transmat, rates); if (transmat == NULL) { /* Jukes-Cantor */ assert (nCodes == 4); double *pSame = PSameVector(length, rates); double *pDiff = PDiffVector(pSame, rates); numeric_t fAll[128][4]; for (j = 0; j < 4; j++) for (k = 0; k < 4; k++) fAll[j][k] = (j==k) ? 1.0 : 0.0; for (k = 0; k < 4; k++) fAll[NOCODE][k] = 0.25; int iFreqA = 0; int iFreqB = 0; for (i = 0; i < nPos; i++) { int iRate = rates->ratecat[i]; double wA = pA->weights[i]; double wB = pB->weights[i]; int codeA = pA->codes[i]; int codeB = pB->codes[i]; numeric_t *fA = GET_FREQ(pA,i,/*IN/OUT*/iFreqA); numeric_t *fB = GET_FREQ(pB,i,/*IN/OUT*/iFreqB); double lkAB = 0; if (fA == NULL && fB == NULL) { if (codeA == NOCODE) { /* A is all gaps */ /* gap to gap is sum(j) 0.25 * (0.25 * pSame + 0.75 * pDiff) = sum(i) 0.25*0.25 = 0.25 gap to any character gives the same result */ lkAB = 0.25; } else if (codeB == NOCODE) { /* B is all gaps */ lkAB = 0.25; } else if (codeA == codeB) { /* A and B match */ lkAB = pSame[iRate] * wA*wB + 0.25 * (1-wA*wB); } else { /* codeA != codeB */ lkAB = pDiff[iRate] * wA*wB + 0.25 * (1-wA*wB); } } else if (fA == NULL) { /* Compare codeA to profile of B */ if (codeA == NOCODE) lkAB = 0.25; else lkAB = wA * (pDiff[iRate] + fB[codeA] * (pSame[iRate]-pDiff[iRate])) + (1.0-wA) * 0.25; /* because lkAB = wA * P(codeA->B) + (1-wA) * 0.25 P(codeA -> B) = sum(j) P(B==j) * (j==codeA ? pSame : pDiff) = sum(j) P(B==j) * pDiff + = pDiff + P(B==codeA) * (pSame-pDiff) */ } else if (fB == NULL) { /* Compare codeB to profile of A */ if (codeB == NOCODE) lkAB = 0.25; else lkAB = wB * (pDiff[iRate] + fA[codeB] * (pSame[iRate]-pDiff[iRate])) + (1.0-wB) * 0.25; } else { /* both are full profiles */ for (j = 0; j < 4; j++) lkAB += fB[j] * (fA[j] * pSame[iRate] + (1-fA[j])* pDiff[iRate]); /* P(A|B) */ } assert(lkAB > 0); lk *= lkAB; while (lk < LkUnderflow) { lk *= LkUnderflowInv; loglk -= LogLkUnderflow; } if (site_likelihoods != NULL) site_likelihoods[i] *= lkAB; } pSame = myfree(pSame, sizeof(double) * rates->nRateCategories); pDiff = myfree(pDiff, sizeof(double) * rates->nRateCategories); } else if (nCodes == 4) { /* matrix model on nucleotides */ int iFreqA = 0; int iFreqB = 0; numeric_t fAmix[4], fBmix[4]; numeric_t *fGap = &transmat->codeFreq[NOCODE][0]; for (i = 0; i < nPos; i++) { int iRate = rates->ratecat[i]; numeric_t *expeigen = &expeigenRates[iRate*4]; double wA = pA->weights[i]; double wB = pB->weights[i]; if (wA == 0 && wB == 0 && pA->codes[i] == NOCODE && pB->codes[i] == NOCODE) { /* Likelihood of A vs B is 1, so nothing changes Do not need to advance iFreqA or iFreqB */ continue; } numeric_t *fA = GET_FREQ(pA,i,/*IN/OUT*/iFreqA); numeric_t *fB = GET_FREQ(pB,i,/*IN/OUT*/iFreqB); if (fA == NULL) fA = &transmat->codeFreq[pA->codes[i]][0]; if (wA > 0.0 && wA < 1.0) { for (j = 0; j < 4; j++) fAmix[j] = wA*fA[j] + (1.0-wA)*fGap[j]; fA = fAmix; } if (fB == NULL) fB = &transmat->codeFreq[pB->codes[i]][0]; if (wB > 0.0 && wB < 1.0) { for (j = 0; j < 4; j++) fBmix[j] = wB*fB[j] + (1.0-wB)*fGap[j]; fB = fBmix; } /* SSE3 instructions do not speed this step up: numeric_t lkAB = vector_multiply3_sum(expeigen, fA, fB); */ double lkAB = 0; for (j = 0; j < 4; j++) lkAB += expeigen[j]*fA[j]*fB[j]; assert(lkAB > 0); if (site_likelihoods != NULL) site_likelihoods[i] *= lkAB; lk *= lkAB; while (lk < LkUnderflow) { lk *= LkUnderflowInv; loglk -= LogLkUnderflow; } while (lk > LkUnderflowInv) { lk *= LkUnderflow; loglk += LogLkUnderflow; } } } else if (nCodes == 20) { /* matrix model on amino acids */ int iFreqA = 0; int iFreqB = 0; numeric_t fAmix[20], fBmix[20]; numeric_t *fGap = &transmat->codeFreq[NOCODE][0]; for (i = 0; i < nPos; i++) { int iRate = rates->ratecat[i]; numeric_t *expeigen = &expeigenRates[iRate*20]; double wA = pA->weights[i]; double wB = pB->weights[i]; if (wA == 0 && wB == 0 && pA->codes[i] == NOCODE && pB->codes[i] == NOCODE) { /* Likelihood of A vs B is 1, so nothing changes Do not need to advance iFreqA or iFreqB */ continue; } numeric_t *fA = GET_FREQ(pA,i,/*IN/OUT*/iFreqA); numeric_t *fB = GET_FREQ(pB,i,/*IN/OUT*/iFreqB); if (fA == NULL) fA = &transmat->codeFreq[pA->codes[i]][0]; if (wA > 0.0 && wA < 1.0) { for (j = 0; j < 20; j++) fAmix[j] = wA*fA[j] + (1.0-wA)*fGap[j]; fA = fAmix; } if (fB == NULL) fB = &transmat->codeFreq[pB->codes[i]][0]; if (wB > 0.0 && wB < 1.0) { for (j = 0; j < 20; j++) fBmix[j] = wB*fB[j] + (1.0-wB)*fGap[j]; fB = fBmix; } numeric_t lkAB = vector_multiply3_sum(expeigen, fA, fB, 20); if (!(lkAB > 0)) { /* If this happens, it indicates a numerical problem that needs to be addressed elsewhere, so report all the details */ fprintf(stderr, "# FastTree.c::PairLogLk -- numerical problem!\n"); fprintf(stderr, "# This block is intended for loading into R\n"); fprintf(stderr, "lkAB = %.8g\n", lkAB); fprintf(stderr, "Branch_length= %.8g\nalignment_position=%d\nnCodes=%d\nrate_category=%d\nrate=%.8g\n", length, i, nCodes, iRate, rates->rates[iRate]); fprintf(stderr, "wA=%.8g\nwB=%.8g\n", wA, wB); fprintf(stderr, "codeA = %d\ncodeB = %d\n", pA->codes[i], pB->codes[i]); fprintf(stderr, "fA = c("); for (j = 0; j < nCodes; j++) fprintf(stderr, "%s %.8g", j==0?"":",", fA[j]); fprintf(stderr,")\n"); fprintf(stderr, "fB = c("); for (j = 0; j < nCodes; j++) fprintf(stderr, "%s %.8g", j==0?"":",", fB[j]); fprintf(stderr,")\n"); fprintf(stderr, "stat = c("); for (j = 0; j < nCodes; j++) fprintf(stderr, "%s %.8g", j==0?"":",", transmat->stat[j]); fprintf(stderr,")\n"); fprintf(stderr, "eigenval = c("); for (j = 0; j < nCodes; j++) fprintf(stderr, "%s %.8g", j==0?"":",", transmat->eigenval[j]); fprintf(stderr,")\n"); fprintf(stderr, "expeigen = c("); for (j = 0; j < nCodes; j++) fprintf(stderr, "%s %.8g", j==0?"":",", expeigen[j]); fprintf(stderr,")\n"); int k; fprintf(stderr, "codeFreq = c("); for (j = 0; j < nCodes; j++) for(k = 0; k < nCodes; k++) fprintf(stderr, "%s %.8g", j==0 && k==0?"":",", transmat->codeFreq[j][k]); fprintf(stderr,")\n"); fprintf(stderr, "eigeninv = c("); for (j = 0; j < nCodes; j++) for(k = 0; k < nCodes; k++) fprintf(stderr, "%s %.8g", j==0 && k==0?"":",", transmat->eigeninv[j][k]); fprintf(stderr,")\n"); fprintf(stderr, "# Transform into matrices and compute un-rotated vectors for profiles A and B\n"); fprintf(stderr, "codeFreq = matrix(codeFreq,nrow=20);\n"); fprintf(stderr, "eigeninv = matrix(eigeninv,nrow=20);\n"); fputs("unrotA = stat * (eigeninv %*% fA)\n", stderr); fputs("unrotB = stat * (eigeninv %*% fB)\n", stderr); fprintf(stderr,"# End of R block\n"); } assert(lkAB > 0); if (site_likelihoods != NULL) site_likelihoods[i] *= lkAB; lk *= lkAB; while (lk < LkUnderflow) { lk *= LkUnderflowInv; loglk -= LogLkUnderflow; } while (lk > LkUnderflowInv) { lk *= LkUnderflow; loglk += LogLkUnderflow; } } } else { assert(0); /* illegal nCodes */ } if (transmat != NULL) expeigenRates = myfree(expeigenRates, sizeof(numeric_t) * rates->nRateCategories * 20); loglk += log(lk); nLkCompute++; return(loglk); } double MLQuartetLogLk(profile_t *pA, profile_t *pB, profile_t *pC, profile_t *pD, int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, /*IN*/double branch_lengths[5], /*OPTIONAL OUT*/double *site_likelihoods) { profile_t *pAB = PosteriorProfile(pA, pB, branch_lengths[0], branch_lengths[1], transmat, rates, nPos, /*nConstraints*/0); profile_t *pCD = PosteriorProfile(pC, pD, branch_lengths[2], branch_lengths[3], transmat, rates, nPos, /*nConstraints*/0); if (site_likelihoods != NULL) { int i; for (i = 0; i < nPos; i++) site_likelihoods[i] = 1.0; } /* Roughly, P(A,B,C,D) = P(A) P(B|A) P(D|C) P(AB | CD) */ double loglk = PairLogLk(pA, pB, branch_lengths[0]+branch_lengths[1], nPos, transmat, rates, /*OPTIONAL IN/OUT*/site_likelihoods) + PairLogLk(pC, pD, branch_lengths[2]+branch_lengths[3], nPos, transmat, rates, /*OPTIONAL IN/OUT*/site_likelihoods) + PairLogLk(pAB, pCD, branch_lengths[4], nPos, transmat, rates, /*OPTIONAL IN/OUT*/site_likelihoods); pAB = FreeProfile(pAB, nPos, /*nConstraints*/0); pCD = FreeProfile(pCD, nPos, /*nConstraints*/0); return(loglk); } double PairNegLogLk(double x, void *data) { quartet_opt_t *qo = (quartet_opt_t *)data; assert(qo != NULL); assert(qo->pair1 != NULL && qo->pair2 != NULL); qo->nEval++; double loglk = PairLogLk(qo->pair1, qo->pair2, x, qo->nPos, qo->transmat, qo->rates, /*site_lk*/NULL); assert(loglk < 1e100); if (verbose > 5) fprintf(stderr, "PairLogLk(%.4f) = %.4f\n", x, loglk); return(-loglk); } double MLQuartetOptimize(profile_t *pA, profile_t *pB, profile_t *pC, profile_t *pD, int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, /*IN/OUT*/double branch_lengths[5], /*OPTIONAL OUT*/bool *pStarTest, /*OPTIONAL OUT*/double *site_likelihoods) { int j; double start_length[5]; for (j = 0; j < 5; j++) { start_length[j] = branch_lengths[j]; if (branch_lengths[j] < MLMinBranchLength) branch_lengths[j] = MLMinBranchLength; } quartet_opt_t qopt = { nPos, transmat, rates, /*nEval*/0, /*pair1*/NULL, /*pair2*/NULL }; double f2x, negloglk; if (pStarTest != NULL) *pStarTest = false; /* First optimize internal branch, then branch to A, B, C, D, in turn May use star test to quit after internal branch */ profile_t *pAB = PosteriorProfile(pA, pB, branch_lengths[LEN_A], branch_lengths[LEN_B], transmat, rates, nPos, /*nConstraints*/0); profile_t *pCD = PosteriorProfile(pC, pD, branch_lengths[LEN_C], branch_lengths[LEN_D], transmat, rates, nPos, /*nConstraints*/0); qopt.pair1 = pAB; qopt.pair2 = pCD; branch_lengths[LEN_I] = onedimenmin(/*xmin*/MLMinBranchLength, /*xguess*/branch_lengths[LEN_I], /*xmax*/6.0, PairNegLogLk, /*data*/&qopt, /*ftol*/MLFTolBranchLength, /*atol*/MLMinBranchLengthTolerance, /*OUT*/&negloglk, /*OUT*/&f2x); if (pStarTest != NULL) { assert(site_likelihoods == NULL); double loglkStar = -PairNegLogLk(MLMinBranchLength, &qopt); if (loglkStar < -negloglk - closeLogLkLimit) { *pStarTest = true; double off = PairLogLk(pA, pB, branch_lengths[LEN_A] + branch_lengths[LEN_B], qopt.nPos, qopt.transmat, qopt.rates, /*site_lk*/NULL) + PairLogLk(pC, pD, branch_lengths[LEN_C] + branch_lengths[LEN_D], qopt.nPos, qopt.transmat, qopt.rates, /*site_lk*/NULL); pAB = FreeProfile(pAB, nPos, /*nConstraints*/0); pCD = FreeProfile(pCD, nPos, /*nConstraints*/0); return (-negloglk + off); } } pAB = FreeProfile(pAB, nPos, /*nConstraints*/0); profile_t *pBCD = PosteriorProfile(pB, pCD, branch_lengths[LEN_B], branch_lengths[LEN_I], transmat, rates, nPos, /*nConstraints*/0); qopt.pair1 = pA; qopt.pair2 = pBCD; branch_lengths[LEN_A] = onedimenmin(/*xmin*/MLMinBranchLength, /*xguess*/branch_lengths[LEN_A], /*xmax*/6.0, PairNegLogLk, /*data*/&qopt, /*ftol*/MLFTolBranchLength, /*atol*/MLMinBranchLengthTolerance, /*OUT*/&negloglk, /*OUT*/&f2x); pBCD = FreeProfile(pBCD, nPos, /*nConstraints*/0); profile_t *pACD = PosteriorProfile(pA, pCD, branch_lengths[LEN_A], branch_lengths[LEN_I], transmat, rates, nPos, /*nConstraints*/0); qopt.pair1 = pB; qopt.pair2 = pACD; branch_lengths[LEN_B] = onedimenmin(/*xmin*/MLMinBranchLength, /*xguess*/branch_lengths[LEN_B], /*xmax*/6.0, PairNegLogLk, /*data*/&qopt, /*ftol*/MLFTolBranchLength, /*atol*/MLMinBranchLengthTolerance, /*OUT*/&negloglk, /*OUT*/&f2x); pACD = FreeProfile(pACD, nPos, /*nConstraints*/0); pCD = FreeProfile(pCD, nPos, /*nConstraints*/0); pAB = PosteriorProfile(pA, pB, branch_lengths[LEN_A], branch_lengths[LEN_B], transmat, rates, nPos, /*nConstraints*/0); profile_t *pABD = PosteriorProfile(pAB, pD, branch_lengths[LEN_I], branch_lengths[LEN_D], transmat, rates, nPos, /*nConstraints*/0); qopt.pair1 = pC; qopt.pair2 = pABD; branch_lengths[LEN_C] = onedimenmin(/*xmin*/MLMinBranchLength, /*xguess*/branch_lengths[LEN_C], /*xmax*/6.0, PairNegLogLk, /*data*/&qopt, /*ftol*/MLFTolBranchLength, /*atol*/MLMinBranchLengthTolerance, /*OUT*/&negloglk, /*OUT*/&f2x); pABD = FreeProfile(pABD, nPos, /*nConstraints*/0); profile_t *pABC = PosteriorProfile(pAB, pC, branch_lengths[LEN_I], branch_lengths[LEN_C], transmat, rates, nPos, /*nConstraints*/0); qopt.pair1 = pD; qopt.pair2 = pABC; branch_lengths[LEN_D] = onedimenmin(/*xmin*/MLMinBranchLength, /*xguess*/branch_lengths[LEN_D], /*xmax*/6.0, PairNegLogLk, /*data*/&qopt, /*ftol*/MLFTolBranchLength, /*atol*/MLMinBranchLengthTolerance, /*OUT*/&negloglk, /*OUT*/&f2x); /* Compute the total quartet likelihood PairLogLk(ABC,D) + PairLogLk(AB,C) + PairLogLk(A,B) */ double loglkABCvsD = -negloglk; if (site_likelihoods) { for (j = 0; j < nPos; j++) site_likelihoods[j] = 1.0; PairLogLk(pABC, pD, branch_lengths[LEN_D], qopt.nPos, qopt.transmat, qopt.rates, /*IN/OUT*/site_likelihoods); } double quartetloglk = loglkABCvsD + PairLogLk(pAB, pC, branch_lengths[LEN_I] + branch_lengths[LEN_C], qopt.nPos, qopt.transmat, qopt.rates, /*IN/OUT*/site_likelihoods) + PairLogLk(pA, pB, branch_lengths[LEN_A] + branch_lengths[LEN_B], qopt.nPos, qopt.transmat, qopt.rates, /*IN/OUT*/site_likelihoods); pABC = FreeProfile(pABC, nPos, /*nConstraints*/0); pAB = FreeProfile(pAB, nPos, /*nConstraints*/0); if (verbose > 3) { double loglkStart = MLQuartetLogLk(pA, pB, pC, pD, nPos, transmat, rates, start_length, /*site_lk*/NULL); fprintf(stderr, "Optimize loglk from %.5f to %.5f eval %d lengths from\n" " %.5f %.5f %.5f %.5f %.5f to\n" " %.5f %.5f %.5f %.5f %.5f\n", loglkStart, quartetloglk, qopt.nEval, start_length[0], start_length[1], start_length[2], start_length[3], start_length[4], branch_lengths[0], branch_lengths[1], branch_lengths[2], branch_lengths[3], branch_lengths[4]); } return(quartetloglk); } nni_t MLQuartetNNI(profile_t *profiles[4], /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, int nPos, int nConstraints, /*OUT*/double criteria[3], /* The three potential quartet log-likelihoods */ /*IN/OUT*/numeric_t len[5], bool bFast) { int i; double lenABvsCD[5] = {len[LEN_A], len[LEN_B], len[LEN_C], len[LEN_D], len[LEN_I]}; double lenACvsBD[5] = {len[LEN_A], len[LEN_C], len[LEN_B], len[LEN_D], len[LEN_I]}; /* Swap B & C */ double lenADvsBC[5] = {len[LEN_A], len[LEN_D], len[LEN_C], len[LEN_B], len[LEN_I]}; /* Swap B & D */ bool bConsiderAC = true; bool bConsiderAD = true; int iRound; int nRounds = mlAccuracy < 2 ? 2 : mlAccuracy; double penalty[3]; QuartetConstraintPenalties(profiles, nConstraints, /*OUT*/penalty); if (penalty[ABvsCD] > penalty[ACvsBD] || penalty[ABvsCD] > penalty[ADvsBC]) bFast = false; #ifdef OPENMP bFast = false; /* turn off star topology test */ #endif for (iRound = 0; iRound < nRounds; iRound++) { bool bStarTest = false; { #ifdef OPENMP #pragma omp parallel #pragma omp sections #endif { #ifdef OPENMP #pragma omp section #endif { criteria[ABvsCD] = MLQuartetOptimize(profiles[0], profiles[1], profiles[2], profiles[3], nPos, transmat, rates, /*IN/OUT*/lenABvsCD, bFast ? &bStarTest : NULL, /*site_likelihoods*/NULL) - penalty[ABvsCD]; /* subtract penalty b/c we are trying to maximize log lk */ } #ifdef OPENMP #pragma omp section #else if (bStarTest) { nStarTests++; criteria[ACvsBD] = -1e20; criteria[ADvsBC] = -1e20; len[LEN_I] = lenABvsCD[LEN_I]; return(ABvsCD); } #endif { if (bConsiderAC) criteria[ACvsBD] = MLQuartetOptimize(profiles[0], profiles[2], profiles[1], profiles[3], nPos, transmat, rates, /*IN/OUT*/lenACvsBD, NULL, /*site_likelihoods*/NULL) - penalty[ACvsBD]; } #ifdef OPENMP #pragma omp section #endif { if (bConsiderAD) criteria[ADvsBC] = MLQuartetOptimize(profiles[0], profiles[3], profiles[2], profiles[1], nPos, transmat, rates, /*IN/OUT*/lenADvsBC, NULL, /*site_likelihoods*/NULL) - penalty[ADvsBC]; } } } /* end parallel sections */ if (mlAccuracy < 2) { /* If clearly worse then ABvsCD, or have short internal branch length and worse, then give up */ if (criteria[ACvsBD] < criteria[ABvsCD] - closeLogLkLimit || (lenACvsBD[LEN_I] <= 2.0*MLMinBranchLength && criteria[ACvsBD] < criteria[ABvsCD])) bConsiderAC = false; if (criteria[ADvsBC] < criteria[ABvsCD] - closeLogLkLimit || (lenADvsBC[LEN_I] <= 2.0*MLMinBranchLength && criteria[ADvsBC] < criteria[ABvsCD])) bConsiderAD = false; if (!bConsiderAC && !bConsiderAD) break; /* If clearly better than either alternative, then give up (Comparison is probably biased in favor of ABvsCD anyway) */ if (criteria[ACvsBD] > criteria[ABvsCD] + closeLogLkLimit && criteria[ACvsBD] > criteria[ADvsBC] + closeLogLkLimit) break; if (criteria[ADvsBC] > criteria[ABvsCD] + closeLogLkLimit && criteria[ADvsBC] > criteria[ACvsBD] + closeLogLkLimit) break; } } /* end loop over rounds */ if (verbose > 2) { fprintf(stderr, "Optimized quartet for %d rounds: ABvsCD %.5f ACvsBD %.5f ADvsBC %.5f\n", iRound, criteria[ABvsCD], criteria[ACvsBD], criteria[ADvsBC]); } if (criteria[ACvsBD] > criteria[ABvsCD] && criteria[ACvsBD] > criteria[ADvsBC]) { for (i = 0; i < 5; i++) len[i] = lenACvsBD[i]; return(ACvsBD); } else if (criteria[ADvsBC] > criteria[ABvsCD] && criteria[ADvsBC] > criteria[ACvsBD]) { for (i = 0; i < 5; i++) len[i] = lenADvsBC[i]; return(ADvsBC); } else { for (i = 0; i < 5; i++) len[i] = lenABvsCD[i]; return(ABvsCD); } } double TreeLength(/*IN/OUT*/NJ_t *NJ, bool recomputeProfiles) { if (recomputeProfiles) { traversal_t traversal2 = InitTraversal(NJ); int j = NJ->root; while((j = TraversePostorder(j, NJ, /*IN/OUT*/traversal2, /*pUp*/NULL)) >= 0) { /* nothing to do for leaves or root */ if (j >= NJ->nSeq && j != NJ->root) SetProfile(/*IN/OUT*/NJ, j, /*noweight*/-1.0); } traversal2 = FreeTraversal(traversal2,NJ); } UpdateBranchLengths(/*IN/OUT*/NJ); double total_len = 0; int iNode; for (iNode = 0; iNode < NJ->maxnode; iNode++) total_len += NJ->branchlength[iNode]; return(total_len); } double TreeLogLk(/*IN*/NJ_t *NJ, /*OPTIONAL OUT*/double *site_loglk) { int i; if (NJ->nSeq < 2) return(0.0); double loglk = 0.0; double *site_likelihood = NULL; if (site_loglk != NULL) { site_likelihood = mymalloc(sizeof(double)*NJ->nPos); for (i = 0; i < NJ->nPos; i++) { site_likelihood[i] = 1.0; site_loglk[i] = 0.0; } } traversal_t traversal = InitTraversal(NJ); int node = NJ->root; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { int nChild = NJ->child[node].nChild; if (nChild == 0) continue; assert(nChild >= 2); int *children = NJ->child[node].child; double loglkchild = PairLogLk(NJ->profiles[children[0]], NJ->profiles[children[1]], NJ->branchlength[children[0]]+NJ->branchlength[children[1]], NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/site_likelihood); loglk += loglkchild; if (site_likelihood != NULL) { /* prevent underflows */ for (i = 0; i < NJ->nPos; i++) { while(site_likelihood[i] < LkUnderflow) { site_likelihood[i] *= LkUnderflowInv; site_loglk[i] -= LogLkUnderflow; } } } if (verbose > 2) fprintf(stderr, "At %d: LogLk(%d:%.4f,%d:%.4f) = %.3f\n", node, children[0], NJ->branchlength[children[0]], children[1], NJ->branchlength[children[1]], loglkchild); if (NJ->child[node].nChild == 3) { assert(node == NJ->root); /* Infer the common parent of the 1st two to define the third... */ profile_t *pAB = PosteriorProfile(NJ->profiles[children[0]], NJ->profiles[children[1]], NJ->branchlength[children[0]], NJ->branchlength[children[1]], NJ->transmat, &NJ->rates, NJ->nPos, /*nConstraints*/0); double loglkup = PairLogLk(pAB, NJ->profiles[children[2]], NJ->branchlength[children[2]], NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/site_likelihood); loglk += loglkup; if (verbose > 2) fprintf(stderr, "At root %d: LogLk((%d/%d),%d:%.3f) = %.3f\n", node, children[0], children[1], children[2], NJ->branchlength[children[2]], loglkup); pAB = FreeProfile(pAB, NJ->nPos, NJ->nConstraints); } } traversal = FreeTraversal(traversal,NJ); if (site_likelihood != NULL) { for (i = 0; i < NJ->nPos; i++) { site_loglk[i] += log(site_likelihood[i]); } site_likelihood = myfree(site_likelihood, sizeof(double)*NJ->nPos); } /* For Jukes-Cantor, with a tree of size 4, if the children of the root are (A,B), C, and D, then P(ABCD) = P(A) P(B|A) P(C|AB) P(D|ABC) Above we compute P(B|A) P(C|AB) P(D|ABC) -- note P(B|A) is at the child of root and P(C|AB) P(D|ABC) is at root. Similarly if the children of the root are C, D, and (A,B), then P(ABCD) = P(C|D) P(A|B) P(AB|CD) P(D), and above we compute that except for P(D) So we need to multiply by P(A) = 0.25, so we pay log(4) at each position (if ungapped). Each gapped position in any sequence reduces the payment by log(4) For JTT or GTR, we are computing P(A & B) and the posterior profiles are scaled to take the prior into account, so we do not need any correction. codeFreq[NOCODE] is scaled x higher so that P(-) = 1 not P(-)=1/nCodes, so gaps do not need to be corrected either. */ if (nCodes == 4 && NJ->transmat == NULL) { int nGaps = 0; double logNCodes = log((double)nCodes); for (i = 0; i < NJ->nPos; i++) { int nGapsThisPos = 0; for (node = 0; node < NJ->nSeq; node++) { unsigned char *codes = NJ->profiles[node]->codes; if (codes[i] == NOCODE) nGapsThisPos++; } nGaps += nGapsThisPos; if (site_loglk != NULL) { site_loglk[i] += nGapsThisPos * logNCodes; if (nCodes == 4 && NJ->transmat == NULL) site_loglk[i] -= logNCodes; } } loglk -= NJ->nPos * logNCodes; loglk += nGaps * logNCodes; /* do not pay for gaps -- only Jukes-Cantor */ } return(loglk); } void SetMLGtr(/*IN/OUT*/NJ_t *NJ, /*OPTIONAL IN*/double *freq_in, /*OPTIONAL WRITE*/FILE *fpLog) { int i; assert(nCodes==4); gtr_opt_t gtr; gtr.NJ = NJ; if (freq_in != NULL) { for (i=0; i<4; i++) gtr.freq[i]=freq_in[i]; } else { int n[4] = {1,1,1,1}; /* pseudocounts */ for (i=0; i<NJ->nSeq; i++) { unsigned char *codes = NJ->profiles[i]->codes; int iPos; for (iPos=0; iPos<NJ->nPos; iPos++) if (codes[iPos] < 4) n[codes[iPos]]++; } int sum = n[0]+n[1]+n[2]+n[3]; for (i=0; i<4; i++) gtr.freq[i] = n[i]/(double)sum; } for (i=0; i<6; i++) gtr.rates[i] = 1.0; int nRounds = mlAccuracy < 2 ? 2 : mlAccuracy; for (i = 0; i < nRounds; i++) { for (gtr.iRate = 0; gtr.iRate < 6; gtr.iRate++) { ProgressReport("Optimizing GTR model, step %d of %d", i*6+gtr.iRate+1, 12, 0, 0); double negloglk, f2x; gtr.rates[gtr.iRate] = onedimenmin(/*xmin*/0.05, /*xguess*/gtr.rates[gtr.iRate], /*xmax*/20.0, GTRNegLogLk, /*data*/&gtr, /*ftol*/0.001, /*atol*/0.0001, /*OUT*/&negloglk, /*OUT*/&f2x); } } /* normalize gtr so last rate is 1 -- specifying that rate separately is useful for optimization only */ for (i = 0; i < 5; i++) gtr.rates[i] /= gtr.rates[5]; gtr.rates[5] = 1.0; if (verbose) { fprintf(stderr, "GTR Frequencies: %.4f %.4f %.4f %.4f\n", gtr.freq[0], gtr.freq[1], gtr.freq[2], gtr.freq[3]); fprintf(stderr, "GTR rates(ac ag at cg ct gt) %.4f %.4f %.4f %.4f %.4f %.4f\n", gtr.rates[0],gtr.rates[1],gtr.rates[2],gtr.rates[3],gtr.rates[4],gtr.rates[5]); } if (fpLog != NULL) { fprintf(fpLog, "GTRFreq\t%.4f\t%.4f\t%.4f\t%.4f\n", gtr.freq[0], gtr.freq[1], gtr.freq[2], gtr.freq[3]); fprintf(fpLog, "GTRRates\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f\t%.4f\n", gtr.rates[0],gtr.rates[1],gtr.rates[2],gtr.rates[3],gtr.rates[4],gtr.rates[5]); } myfree(NJ->transmat, sizeof(transition_matrix_t)); NJ->transmat = CreateGTR(gtr.rates, gtr.freq); RecomputeMLProfiles(/*IN/OUT*/NJ); OptimizeAllBranchLengths(/*IN/OUT*/NJ); } double GTRNegLogLk(double x, void *data) { gtr_opt_t *gtr = (gtr_opt_t*)data; assert(nCodes == 4); assert(gtr->NJ != NULL); assert(gtr->iRate >= 0 && gtr->iRate < 6); assert(x > 0); transition_matrix_t *old = gtr->NJ->transmat; double rates[6]; int i; for (i = 0; i < 6; i++) rates[i] = gtr->rates[i]; rates[gtr->iRate] = x; gtr->NJ->transmat = CreateGTR(rates, gtr->freq); RecomputeMLProfiles(/*IN/OUT*/gtr->NJ); double loglk = TreeLogLk(gtr->NJ, /*site_loglk*/NULL); myfree(gtr->NJ->transmat, sizeof(transition_matrix_t)); gtr->NJ->transmat = old; /* Do not recompute profiles -- assume the caller will do that */ if (verbose > 2) fprintf(stderr, "GTR LogLk(%.5f %.5f %.5f %.5f %.5f %.5f) = %f\n", rates[0], rates[1], rates[2], rates[3], rates[4], rates[5], loglk); return(-loglk); } /* Caller must free the resulting vector of n rates */ numeric_t *MLSiteRates(int nRateCategories) { /* Even spacing from 1/nRate to nRate */ double logNCat = log((double)nRateCategories); double logMinRate = -logNCat; double logMaxRate = logNCat; double logd = (logMaxRate-logMinRate)/(double)(nRateCategories-1); numeric_t *rates = mymalloc(sizeof(numeric_t)*nRateCategories); int i; for (i = 0; i < nRateCategories; i++) rates[i] = exp(logMinRate + logd*(double)i); return(rates); } double *MLSiteLikelihoodsByRate(/*IN*/NJ_t *NJ, /*IN*/numeric_t *rates, int nRateCategories) { double *site_loglk = mymalloc(sizeof(double)*NJ->nPos*nRateCategories); /* save the original rates */ assert(NJ->rates.nRateCategories > 0); numeric_t *oldRates = NJ->rates.rates; NJ->rates.rates = mymalloc(sizeof(numeric_t) * NJ->rates.nRateCategories); /* Compute site likelihood for each rate */ int iPos; int iRate; for (iRate = 0; iRate < nRateCategories; iRate++) { int i; for (i = 0; i < NJ->rates.nRateCategories; i++) NJ->rates.rates[i] = rates[iRate]; RecomputeMLProfiles(/*IN/OUT*/NJ); double loglk = TreeLogLk(NJ, /*OUT*/&site_loglk[NJ->nPos*iRate]); ProgressReport("Site likelihoods with rate category %d of %d", iRate+1, nRateCategories, 0, 0); if(verbose > 2) { fprintf(stderr, "Rate %.3f Loglk %.3f SiteLogLk", rates[iRate], loglk); for (iPos = 0; iPos < NJ->nPos; iPos++) fprintf(stderr,"\t%.3f", site_loglk[NJ->nPos*iRate + iPos]); fprintf(stderr,"\n"); } } /* restore original rates and profiles */ myfree(NJ->rates.rates, sizeof(numeric_t) * NJ->rates.nRateCategories); NJ->rates.rates = oldRates; RecomputeMLProfiles(/*IN/OUT*/NJ); return(site_loglk); } void SetMLRates(/*IN/OUT*/NJ_t *NJ, int nRateCategories) { assert(nRateCategories > 0); AllocRateCategories(/*IN/OUT*/&NJ->rates, 1, NJ->nPos); /* set to 1 category of rate 1 */ if (nRateCategories == 1) { RecomputeMLProfiles(/*IN/OUT*/NJ); return; } numeric_t *rates = MLSiteRates(nRateCategories); double *site_loglk = MLSiteLikelihoodsByRate(/*IN*/NJ, /*IN*/rates, nRateCategories); /* Select best rate for each site, correcting for the prior For a prior, use a gamma distribution with shape parameter 3, scale 1/3, so Prior(rate) ~ rate**2 * exp(-3*rate) log Prior(rate) = C + 2 * log(rate) - 3 * rate */ double sumRates = 0; int iPos; int iRate; for (iPos = 0; iPos < NJ->nPos; iPos++) { int iBest = -1; double dBest = -1e20; for (iRate = 0; iRate < nRateCategories; iRate++) { double site_loglk_with_prior = site_loglk[NJ->nPos*iRate + iPos] + 2.0 * log(rates[iRate]) - 3.0 * rates[iRate]; if (site_loglk_with_prior > dBest) { iBest = iRate; dBest = site_loglk_with_prior; } } if (verbose > 2) fprintf(stderr, "Selected rate category %d rate %.3f for position %d\n", iBest, rates[iBest], iPos+1); NJ->rates.ratecat[iPos] = iBest; sumRates += rates[iBest]; } site_loglk = myfree(site_loglk, sizeof(double)*NJ->nPos*nRateCategories); /* Force the rates to average to 1 */ double avgRate = sumRates/NJ->nPos; for (iRate = 0; iRate < nRateCategories; iRate++) rates[iRate] /= avgRate; /* Save the rates */ NJ->rates.rates = myfree(NJ->rates.rates, sizeof(numeric_t) * NJ->rates.nRateCategories); NJ->rates.rates = rates; NJ->rates.nRateCategories = nRateCategories; /* Update profiles based on rates */ RecomputeMLProfiles(/*IN/OUT*/NJ); if (verbose) { fprintf(stderr, "Switched to using %d rate categories (CAT approximation)\n", nRateCategories); fprintf(stderr, "Rate categories were divided by %.3f so that average rate = 1.0\n", avgRate); fprintf(stderr, "CAT-based log-likelihoods may not be comparable across runs\n"); if (!gammaLogLk) fprintf(stderr, "Use -gamma for approximate but comparable Gamma(20) log-likelihoods\n"); } } double GammaLogLk(/*IN*/siteratelk_t *s, /*OPTIONAL OUT*/double *gamma_loglk_sites) { int iRate, iPos; double *dRate = mymalloc(sizeof(double) * s->nRateCats); for (iRate = 0; iRate < s->nRateCats; iRate++) { /* The probability density for each rate is approximated by the total density between the midpoints */ double pMin = iRate == 0 ? 0.0 : PGamma(s->mult * (s->rates[iRate-1] + s->rates[iRate])/2.0, s->alpha); double pMax = iRate == s->nRateCats-1 ? 1.0 : PGamma(s->mult * (s->rates[iRate]+s->rates[iRate+1])/2.0, s->alpha); dRate[iRate] = pMax-pMin; } double loglk = 0.0; for (iPos = 0; iPos < s->nPos; iPos++) { /* Prevent underflow on large trees by comparing to maximum loglk */ double maxloglk = -1e20; for (iRate = 0; iRate < s->nRateCats; iRate++) { double site_loglk = s->site_loglk[s->nPos*iRate + iPos]; if (site_loglk > maxloglk) maxloglk = site_loglk; } double rellk = 0; /* likelihood scaled by exp(maxloglk) */ for (iRate = 0; iRate < s->nRateCats; iRate++) { double lk = exp(s->site_loglk[s->nPos*iRate + iPos] - maxloglk); rellk += lk * dRate[iRate]; } double loglk_site = maxloglk + log(rellk); loglk += loglk_site; if (gamma_loglk_sites != NULL) gamma_loglk_sites[iPos] = loglk_site; } dRate = myfree(dRate, sizeof(double)*s->nRateCats); return(loglk); } double OptAlpha(double alpha, void *data) { siteratelk_t *s = (siteratelk_t *)data; s->alpha = alpha; return(-GammaLogLk(s, NULL)); } double OptMult(double mult, void *data) { siteratelk_t *s = (siteratelk_t *)data; s->mult = mult; return(-GammaLogLk(s, NULL)); } /* Input site_loglk must be for each rate */ double RescaleGammaLogLk(int nPos, int nRateCats, /*IN*/numeric_t *rates, /*IN*/double *site_loglk, /*OPTIONAL*/FILE *fpLog) { siteratelk_t s = { /*mult*/1.0, /*alpha*/1.0, nPos, nRateCats, rates, site_loglk }; double fx, f2x; int i; fx = -GammaLogLk(&s, NULL); if (verbose>2) fprintf(stderr, "Optimizing alpha, starting at loglk %.3f\n", -fx); for (i = 0; i < 10; i++) { ProgressReport("Optimizing alpha round %d", i+1, 0, 0, 0); double start = fx; s.alpha = onedimenmin(0.01, s.alpha, 10.0, OptAlpha, &s, 0.001, 0.001, &fx, &f2x); if (verbose>2) fprintf(stderr, "Optimize alpha round %d to %.3f lk %.3f\n", i+1, s.alpha, -fx); s.mult = onedimenmin(0.01, s.mult, 10.0, OptMult, &s, 0.001, 0.001, &fx, &f2x); if (verbose>2) fprintf(stderr, "Optimize mult round %d to %.3f lk %.3f\n", i+1, s.mult, -fx); if (fx > start - 0.001) { if (verbose>2) fprintf(stderr, "Optimizing alpha & mult converged\n"); break; } } double *gamma_loglk_sites = mymalloc(sizeof(double) * nPos); double gammaLogLk = GammaLogLk(&s, /*OUT*/gamma_loglk_sites); if (verbose > 0) fprintf(stderr, "Gamma(%d) LogLk = %.3f alpha = %.3f rescaling lengths by %.3f\n", nRateCats, gammaLogLk, s.alpha, 1/s.mult); if (fpLog) { int iPos; int iRate; fprintf(fpLog, "Gamma%dLogLk\t%.3f\tApproximate\tAlpha\t%.3f\tRescale\t%.3f\n", nRateCats, gammaLogLk, s.alpha, 1/s.mult); fprintf(fpLog, "Gamma%d\tSite\tLogLk", nRateCats); for (iRate = 0; iRate < nRateCats; iRate++) fprintf(fpLog, "\tr=%.3f", rates[iRate]/s.mult); fprintf(fpLog,"\n"); for (iPos = 0; iPos < nPos; iPos++) { fprintf(fpLog, "Gamma%d\t%d\t%.3f", nRateCats, iPos, gamma_loglk_sites[iPos]); for (iRate = 0; iRate < nRateCats; iRate++) fprintf(fpLog, "\t%.3f", site_loglk[nPos*iRate + iPos]); fprintf(fpLog,"\n"); } } gamma_loglk_sites = myfree(gamma_loglk_sites, sizeof(double) * nPos); return(1.0/s.mult); } double MLPairOptimize(profile_t *pA, profile_t *pB, int nPos, /*OPTIONAL*/transition_matrix_t *transmat, rates_t *rates, /*IN/OUT*/double *branch_length) { double len5[5]; int j; for (j=0;j<5;j++) len5[j] = *branch_length; quartet_opt_t qopt = { nPos, transmat, rates, /*nEval*/0, /*pair1*/pA, /*pair2*/pB }; double f2x,negloglk; *branch_length = onedimenmin(/*xmin*/MLMinBranchLength, /*xguess*/*branch_length, /*xmax*/6.0, PairNegLogLk, /*data*/&qopt, /*ftol*/MLFTolBranchLength, /*atol*/MLMinBranchLengthTolerance, /*OUT*/&negloglk, /*OUT*/&f2x); return(-negloglk); /* the log likelihood */ } void OptimizeAllBranchLengths(/*IN/OUT*/NJ_t *NJ) { if (NJ->nSeq < 2) return; if (NJ->nSeq == 2) { int parent = NJ->root; assert(NJ->child[parent].nChild==2); int nodes[2] = { NJ->child[parent].child[0], NJ->child[parent].child[1] }; double length = 1.0; (void)MLPairOptimize(NJ->profiles[nodes[0]], NJ->profiles[nodes[1]], NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/&length); NJ->branchlength[nodes[0]] = length/2.0; NJ->branchlength[nodes[1]] = length/2.0; return; }; traversal_t traversal = InitTraversal(NJ); profile_t **upProfiles = UpProfiles(NJ); int node = NJ->root; int iDone = 0; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { int nChild = NJ->child[node].nChild; if (nChild > 0) { if ((iDone % 100) == 0) ProgressReport("ML Lengths %d of %d splits", iDone+1, NJ->maxnode - NJ->nSeq, 0, 0); iDone++; /* optimize the branch lengths between self, parent, and children, with two iterations */ assert(nChild == 2 || nChild == 3); int nodes[3] = { NJ->child[node].child[0], NJ->child[node].child[1], nChild == 3 ? NJ->child[node].child[2] : node }; profile_t *profiles[3] = { NJ->profiles[nodes[0]], NJ->profiles[nodes[1]], nChild == 3 ? NJ->profiles[nodes[2]] : GetUpProfile(/*IN/OUT*/upProfiles, NJ, node, /*useML*/true) }; int iter; for (iter = 0; iter < 2; iter++) { int i; for (i = 0; i < 3; i++) { profile_t *pA = profiles[i]; int b1 = (i+1) % 3; int b2 = (i+2) % 3; profile_t *pB = PosteriorProfile(profiles[b1], profiles[b2], NJ->branchlength[nodes[b1]], NJ->branchlength[nodes[b2]], NJ->transmat, &NJ->rates, NJ->nPos, /*nConstraints*/0); double len = NJ->branchlength[nodes[i]]; if (len < MLMinBranchLength) len = MLMinBranchLength; (void)MLPairOptimize(pA, pB, NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/&len); NJ->branchlength[nodes[i]] = len; pB = FreeProfile(pB, NJ->nPos, /*nConstraints*/0); if (verbose>3) fprintf(stderr, "Optimize length for %d to %.3f\n", nodes[i], NJ->branchlength[nodes[i]]); } } if (node != NJ->root) { RecomputeProfile(/*IN/OUT*/NJ, /*IN/OUT*/upProfiles, node, /*useML*/true); DeleteUpProfile(upProfiles, NJ, node); } } } traversal = FreeTraversal(traversal,NJ); upProfiles = FreeUpProfiles(upProfiles,NJ); } void RecomputeMLProfiles(/*IN/OUT*/NJ_t *NJ) { traversal_t traversal = InitTraversal(NJ); int node = NJ->root; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { if (NJ->child[node].nChild == 2) { NJ->profiles[node] = FreeProfile(NJ->profiles[node], NJ->nPos, NJ->nConstraints); int *children = NJ->child[node].child; NJ->profiles[node] = PosteriorProfile(NJ->profiles[children[0]], NJ->profiles[children[1]], NJ->branchlength[children[0]], NJ->branchlength[children[1]], NJ->transmat, &NJ->rates, NJ->nPos, NJ->nConstraints); } } traversal = FreeTraversal(traversal, NJ); } void RecomputeProfiles(/*IN/OUT*/NJ_t *NJ, /*OPTIONAL*/distance_matrix_t *dmat) { traversal_t traversal = InitTraversal(NJ); int node = NJ->root; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { if (NJ->child[node].nChild == 2) { int *child = NJ->child[node].child; NJ->profiles[node] = FreeProfile(NJ->profiles[node], NJ->nPos, NJ->nConstraints); NJ->profiles[node] = AverageProfile(NJ->profiles[child[0]], NJ->profiles[child[1]], NJ->nPos, NJ->nConstraints, dmat, /*unweighted*/-1.0); } } traversal = FreeTraversal(traversal,NJ); } int NNI(/*IN/OUT*/NJ_t *NJ, int iRound, int nRounds, bool useML, /*IN/OUT*/nni_stats_t *stats, /*OUT*/double *dMaxDelta) { /* For each non-root node N, with children A,B, sibling C, and uncle D, we compare the current topology AB|CD to the alternate topologies AC|BD and AD|BC, by using the 4 relevant profiles. If useML is true, it uses quartet maximum likelihood, and it updates branch lengths as it goes. If useML is false, it uses the minimum-evolution criterion with log-corrected distances on profiles. (If logdist is false, then the log correction is not done.) If useML is false, then NNI() does NOT modify the branch lengths. Regardless of whether it changes the topology, it recomputes the profile for the node, using the pairwise distances and BIONJ-like weightings (if bionj is set). The parent's profile has changed, but recomputing it is not necessary because we will visit it before we need it (we use postorder, so we may visit the sibling and its children before we visit the parent, but we never consider an ancestor's profile, so that is OK). When we change the parent's profile, this alters the uncle's up-profile, so we remove that. Finally, if the topology has changed, we remove the up-profiles of the nodes. If we do an NNI during post-order traversal, the result is a bit tricky. E.g. if we are at node N, and have visited its children A and B but not its uncle C, and we do an NNI that swaps B & C, then the post-order traversal will visit C, and its children, but then on the way back up, it will skip N, as it has already visited it. So, the profile of N will not be recomputed: any changes beneath C will not be reflected in the profile of N, and the profile of N will be slightly stale. This will be corrected on the next round of NNIs. */ double supportThreshold = useML ? treeLogLkDelta : MEMinDelta; int i; *dMaxDelta = 0.0; int nNNIThisRound = 0; if (NJ->nSeq <= 3) return(0); /* nothing to do */ if (verbose > 2) { fprintf(stderr, "Beginning round %d of NNIs with ml? %d\n", iRound, useML?1:0); PrintNJInternal(/*WRITE*/stderr, NJ, /*useLen*/useML && iRound > 0 ? 1 : 0); } /* For each node the upProfile or NULL */ profile_t **upProfiles = UpProfiles(NJ); traversal_t traversal = InitTraversal(NJ); /* Identify nodes we can skip traversing into */ int node; if (fastNNI) { for (node = 0; node < NJ->maxnode; node++) { if (node != NJ->root && node >= NJ->nSeq && stats[node].age >= 2 && stats[node].subtreeAge >= 2 && stats[node].support > supportThreshold) { int nodeABCD[4]; SetupABCD(NJ, node, NULL, NULL, /*OUT*/nodeABCD, useML); for (i = 0; i < 4; i++) if (stats[nodeABCD[i]].age == 0 && stats[nodeABCD[i]].support > supportThreshold) break; if (i == 4) { SkipTraversalInto(node, /*IN/OUT*/traversal); if (verbose > 2) fprintf(stderr, "Skipping subtree at %d: child %d %d parent %d age %d subtreeAge %d support %.3f\n", node, nodeABCD[0], nodeABCD[1], NJ->parent[node], stats[node].age, stats[node].subtreeAge, stats[node].support); } } } } int iDone = 0; bool bUp; node = NJ->root; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, &bUp)) >= 0) { if (node < NJ->nSeq || node == NJ->root) continue; /* nothing to do for leaves or root */ if (bUp) { if(verbose > 2) fprintf(stderr, "Going up back to node %d\n", node); /* No longer needed */ for (i = 0; i < NJ->child[node].nChild; i++) DeleteUpProfile(upProfiles, NJ, NJ->child[node].child[i]); DeleteUpProfile(upProfiles, NJ, node); RecomputeProfile(/*IN/OUT*/NJ, /*IN/OUT*/upProfiles, node, useML); continue; } if ((iDone % 100) == 0) { char buf[100]; sprintf(buf, "%s NNI round %%d of %%d, %%d of %%d splits", useML ? "ML" : "ME"); if (iDone > 0) sprintf(buf+strlen(buf), ", %d changes", nNNIThisRound); if (nNNIThisRound > 0) sprintf(buf+strlen(buf), " (max delta %.3f)", *dMaxDelta); ProgressReport(buf, iRound+1, nRounds, iDone+1, NJ->maxnode - NJ->nSeq); } iDone++; profile_t *profiles[4]; int nodeABCD[4]; /* Note -- during the first round of ML NNIs, we use the min-evo-based branch lengths, which may be suboptimal */ SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, useML); /* Given our 4 profiles, consider doing a swap */ int nodeA = nodeABCD[0]; int nodeB = nodeABCD[1]; int nodeC = nodeABCD[2]; int nodeD = nodeABCD[3]; nni_t choice = ABvsCD; if (verbose > 2) fprintf(stderr,"Considering NNI around %d: Swap A=%d B=%d C=%d D=up(%d) or parent %d\n", node, nodeA, nodeB, nodeC, nodeD, NJ->parent[node]); if (verbose > 3 && useML) { double len[5] = { NJ->branchlength[nodeA], NJ->branchlength[nodeB], NJ->branchlength[nodeC], NJ->branchlength[nodeD], NJ->branchlength[node] }; for (i=0; i < 5; i++) if (len[i] < MLMinBranchLength) len[i] = MLMinBranchLength; fprintf(stderr, "Starting quartet likelihood %.3f len %.3f %.3f %.3f %.3f %.3f\n", MLQuartetLogLk(profiles[0],profiles[1],profiles[2],profiles[3],NJ->nPos,NJ->transmat,&NJ->rates,len, /*site_lk*/NULL), len[0], len[1], len[2], len[3], len[4]); } numeric_t newlength[5]; double criteria[3]; if (useML) { for (i = 0; i < 4; i++) newlength[i] = NJ->branchlength[nodeABCD[i]]; newlength[4] = NJ->branchlength[node]; bool bFast = mlAccuracy < 2 && stats[node].age > 0; choice = MLQuartetNNI(profiles, NJ->transmat, &NJ->rates, NJ->nPos, NJ->nConstraints, /*OUT*/criteria, /*IN/OUT*/newlength, bFast); } else { choice = ChooseNNI(profiles, NJ->distance_matrix, NJ->nPos, NJ->nConstraints, /*OUT*/criteria); /* invert criteria so that higher is better, as in ML case, to simplify code below */ for (i = 0; i < 3; i++) criteria[i] = -criteria[i]; } if (choice == ACvsBD) { /* swap B and C */ ReplaceChild(/*IN/OUT*/NJ, node, nodeB, nodeC); ReplaceChild(/*IN/OUT*/NJ, NJ->parent[node], nodeC, nodeB); } else if (choice == ADvsBC) { /* swap A and C */ ReplaceChild(/*IN/OUT*/NJ, node, nodeA, nodeC); ReplaceChild(/*IN/OUT*/NJ, NJ->parent[node], nodeC, nodeA); } if (useML) { /* update branch length for the internal branch, and of any branches that lead to leaves, b/c those will not are not the internal branch for NNI and would not otherwise be set. */ if (choice == ADvsBC) { /* For ADvsBC, MLQuartetNNI swaps B with D, but we swap A with C */ double length2[5] = { newlength[LEN_C], newlength[LEN_D], newlength[LEN_A], newlength[LEN_B], newlength[LEN_I] }; int i; for (i = 0; i < 5; i++) newlength[i] = length2[i]; /* and swap A and C */ double tmp = newlength[LEN_A]; newlength[LEN_A] = newlength[LEN_C]; newlength[LEN_C] = tmp; } else if (choice == ACvsBD) { /* swap B and C */ double tmp = newlength[LEN_B]; newlength[LEN_B] = newlength[LEN_C]; newlength[LEN_C] = tmp; } NJ->branchlength[node] = newlength[LEN_I]; NJ->branchlength[nodeA] = newlength[LEN_A]; NJ->branchlength[nodeB] = newlength[LEN_B]; NJ->branchlength[nodeC] = newlength[LEN_C]; NJ->branchlength[nodeD] = newlength[LEN_D]; } if (verbose>2 && (choice != ABvsCD || verbose > 2)) fprintf(stderr,"NNI around %d: Swap A=%d B=%d C=%d D=out(C) -- choose %s %s %.4f\n", node, nodeA, nodeB, nodeC, choice == ACvsBD ? "AC|BD" : (choice == ABvsCD ? "AB|CD" : "AD|BC"), useML ? "delta-loglk" : "-deltaLen", criteria[choice] - criteria[ABvsCD]); if(verbose >= 3 && slow && useML) fprintf(stderr, "Old tree lk -- %.4f\n", TreeLogLk(NJ, /*site_likelihoods*/NULL)); /* update stats, *dMaxDelta, etc. */ if (choice == ABvsCD) { stats[node].age++; } else { if (useML) nML_NNI++; else nNNI++; nNNIThisRound++; stats[node].age = 0; stats[nodeA].age = 0; stats[nodeB].age = 0; stats[nodeC].age = 0; stats[nodeD].age = 0; } stats[node].delta = criteria[choice] - criteria[ABvsCD]; /* 0 if ABvsCD */ if (stats[node].delta > *dMaxDelta) *dMaxDelta = stats[node].delta; /* support is improvement of score for self over better of alternatives */ stats[node].support = 1e20; for (i = 0; i < 3; i++) if (choice != i && criteria[choice]-criteria[i] < stats[node].support) stats[node].support = criteria[choice]-criteria[i]; /* subtreeAge is the number of rounds since self or descendent had a significant improvement */ if (stats[node].delta > supportThreshold) stats[node].subtreeAge = 0; else { stats[node].subtreeAge++; for (i = 0; i < 2; i++) { int child = NJ->child[node].child[i]; if (stats[node].subtreeAge > stats[child].subtreeAge) stats[node].subtreeAge = stats[child].subtreeAge; } } /* update profiles and free up unneeded up-profiles */ if (choice == ABvsCD) { /* No longer needed */ DeleteUpProfile(upProfiles, NJ, nodeA); DeleteUpProfile(upProfiles, NJ, nodeB); DeleteUpProfile(upProfiles, NJ, nodeC); RecomputeProfile(/*IN/OUT*/NJ, /*IN/OUT*/upProfiles, node, useML); if(slow && useML) UpdateForNNI(NJ, node, upProfiles, useML); } else { UpdateForNNI(NJ, node, upProfiles, useML); } if(verbose > 2 && slow && useML) { /* Note we recomputed profiles back up to root already if slow */ PrintNJInternal(/*WRITE*/stderr, NJ, /*useLen*/true); fprintf(stderr, "New tree lk -- %.4f\n", TreeLogLk(NJ, /*site_likelihoods*/NULL)); } } /* end postorder traversal */ traversal = FreeTraversal(traversal,NJ); if (verbose>=2) { int nUp = 0; for (i = 0; i < NJ->maxnodes; i++) if (upProfiles[i] != NULL) nUp++; fprintf(stderr, "N up profiles at end of NNI: %d\n", nUp); } upProfiles = FreeUpProfiles(upProfiles,NJ); return(nNNIThisRound); } nni_stats_t *InitNNIStats(NJ_t *NJ) { nni_stats_t *stats = mymalloc(sizeof(nni_stats_t)*NJ->maxnode); const int LargeAge = 1000000; int i; for (i = 0; i < NJ->maxnode; i++) { stats[i].delta = 0; stats[i].support = 0; if (i == NJ->root || i < NJ->nSeq) { stats[i].age = LargeAge; stats[i].subtreeAge = LargeAge; } else { stats[i].age = 0; stats[i].subtreeAge = 0; } } return(stats); } nni_stats_t *FreeNNIStats(nni_stats_t *stats, NJ_t *NJ) { return(myfree(stats, sizeof(nni_stats_t)*NJ->maxnode)); } int FindSPRSteps(/*IN/OUT*/NJ_t *NJ, int nodeMove, /* the node to move multiple times */ int nodeAround, /* sibling or parent of node to NNI to start the chain */ /*IN/OUT*/profile_t **upProfiles, /*OUT*/spr_step_t *steps, int maxSteps, bool bFirstAC) { int iStep; for (iStep = 0; iStep < maxSteps; iStep++) { if (NJ->child[nodeAround].nChild != 2) break; /* no further to go */ /* Consider the NNIs around nodeAround */ profile_t *profiles[4]; int nodeABCD[4]; SetupABCD(NJ, nodeAround, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, /*useML*/false); double criteria[3]; (void) ChooseNNI(profiles, NJ->distance_matrix, NJ->nPos, NJ->nConstraints, /*OUT*/criteria); /* Do & save the swap */ spr_step_t *step = &steps[iStep]; if (iStep == 0 ? bFirstAC : criteria[ACvsBD] < criteria[ADvsBC]) { /* swap B & C to put AC together */ step->deltaLength = criteria[ACvsBD] - criteria[ABvsCD]; step->nodes[0] = nodeABCD[1]; step->nodes[1] = nodeABCD[2]; } else { /* swap AC to put AD together */ step->deltaLength = criteria[ADvsBC] - criteria[ABvsCD]; step->nodes[0] = nodeABCD[0]; step->nodes[1] = nodeABCD[2]; } if (verbose>3) { fprintf(stderr, "SPR chain step %d for %d around %d swap %d %d deltaLen %.5f\n", iStep+1, nodeAround, nodeMove, step->nodes[0], step->nodes[1], step->deltaLength); if (verbose>4) PrintNJInternal(stderr, NJ, /*useLen*/false); } ReplaceChild(/*IN/OUT*/NJ, nodeAround, step->nodes[0], step->nodes[1]); ReplaceChild(/*IN/OUT*/NJ, NJ->parent[nodeAround], step->nodes[1], step->nodes[0]); UpdateForNNI(/*IN/OUT*/NJ, nodeAround, /*IN/OUT*/upProfiles, /*useML*/false); /* set the new nodeAround -- either parent(nodeMove) or sibling(nodeMove) -- so that it different from current nodeAround */ int newAround[2] = { NJ->parent[nodeMove], Sibling(NJ, nodeMove) }; if (NJ->parent[nodeMove] == NJ->root) RootSiblings(NJ, nodeMove, /*OUT*/newAround); assert(newAround[0] == nodeAround || newAround[1] == nodeAround); assert(newAround[0] != newAround[1]); nodeAround = newAround[newAround[0] == nodeAround ? 1 : 0]; } return(iStep); } void UnwindSPRStep(/*IN/OUT*/NJ_t *NJ, /*IN*/spr_step_t *step, /*IN/OUT*/profile_t **upProfiles) { int parents[2]; int i; for (i = 0; i < 2; i++) { assert(step->nodes[i] >= 0 && step->nodes[i] < NJ->maxnodes); parents[i] = NJ->parent[step->nodes[i]]; assert(parents[i] >= 0); } assert(parents[0] != parents[1]); ReplaceChild(/*IN/OUT*/NJ, parents[0], step->nodes[0], step->nodes[1]); ReplaceChild(/*IN/OUT*/NJ, parents[1], step->nodes[1], step->nodes[0]); int iYounger = 0; if (NJ->parent[parents[0]] == parents[1]) { iYounger = 0; } else { assert(NJ->parent[parents[1]] == parents[0]); iYounger = 1; } UpdateForNNI(/*IN/OUT*/NJ, parents[iYounger], /*IN/OUT*/upProfiles, /*useML*/false); } /* Update the profile of node and its ancestor, and delete nearby out-profiles */ void UpdateForNNI(/*IN/OUT*/NJ_t *NJ, int node, /*IN/OUT*/profile_t **upProfiles, bool useML) { int i; if (slow) { /* exhaustive update */ for (i = 0; i < NJ->maxnodes; i++) DeleteUpProfile(upProfiles, NJ, i); /* update profiles back to root */ int ancestor; for (ancestor = node; ancestor >= 0; ancestor = NJ->parent[ancestor]) RecomputeProfile(/*IN/OUT*/NJ, upProfiles, ancestor, useML); /* remove any up-profiles made while doing that*/ for (i = 0; i < NJ->maxnodes; i++) DeleteUpProfile(upProfiles, NJ, i); } else { /* if fast, only update around self note that upProfile(parent) is still OK after an NNI, but up-profiles of uncles may not be */ DeleteUpProfile(upProfiles, NJ, node); for (i = 0; i < NJ->child[node].nChild; i++) DeleteUpProfile(upProfiles, NJ, NJ->child[node].child[i]); assert(node != NJ->root); int parent = NJ->parent[node]; int neighbors[2] = { parent, Sibling(NJ, node) }; if (parent == NJ->root) RootSiblings(NJ, node, /*OUT*/neighbors); DeleteUpProfile(upProfiles, NJ, neighbors[0]); DeleteUpProfile(upProfiles, NJ, neighbors[1]); int uncle = Sibling(NJ, parent); if (uncle >= 0) DeleteUpProfile(upProfiles, NJ, uncle); RecomputeProfile(/*IN/OUT*/NJ, upProfiles, node, useML); RecomputeProfile(/*IN/OUT*/NJ, upProfiles, parent, useML); } } void SPR(/*IN/OUT*/NJ_t *NJ, int maxSPRLength, int iRound, int nRounds) { /* Given a non-root node N with children A,B, sibling C, and uncle D, we can try to move A by doing three types of moves (4 choices): "down" -- swap A with a child of B (if B is not a leaf) [2 choices] "over" -- swap B with C "up" -- swap A with D We follow down moves with down moves, over moves with down moves, and up moves with either up or over moves. (Other choices are just backing up and hence useless.) As with NNIs, we keep track of up-profiles as we go. However, some of the regular profiles may also become "stale" so it is a bit trickier. We store the traversal before we do SPRs to avoid any possible infinite loop */ double last_tot_len = 0.0; if (NJ->nSeq <= 3 || maxSPRLength < 1) return; if (slow) last_tot_len = TreeLength(NJ, /*recomputeLengths*/true); int *nodeList = mymalloc(sizeof(int) * NJ->maxnodes); int nodeListLen = 0; traversal_t traversal = InitTraversal(NJ); int node = NJ->root; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { nodeList[nodeListLen++] = node; } assert(nodeListLen == NJ->maxnode); traversal = FreeTraversal(traversal,NJ); profile_t **upProfiles = UpProfiles(NJ); spr_step_t *steps = mymalloc(sizeof(spr_step_t) * maxSPRLength); /* current chain of SPRs */ int i; for (i = 0; i < nodeListLen; i++) { node = nodeList[i]; if ((i % 100) == 0) ProgressReport("SPR round %3d of %3d, %d of %d nodes", iRound+1, nRounds, i+1, nodeListLen); if (node == NJ->root) continue; /* nothing to do for root */ /* The nodes to NNI around */ int nodeAround[2] = { NJ->parent[node], Sibling(NJ, node) }; if (NJ->parent[node] == NJ->root) { /* NNI around both siblings instead */ RootSiblings(NJ, node, /*OUT*/nodeAround); } bool bChanged = false; int iAround; for (iAround = 0; iAround < 2 && bChanged == false; iAround++) { int ACFirst; for (ACFirst = 0; ACFirst < 2 && bChanged == false; ACFirst++) { if(verbose > 3) PrintNJInternal(stderr, NJ, /*useLen*/false); int chainLength = FindSPRSteps(/*IN/OUT*/NJ, node, nodeAround[iAround], upProfiles, /*OUT*/steps, maxSPRLength, (bool)ACFirst); double dMinDelta = 0.0; int iCBest = -1; double dTotDelta = 0.0; int iC; for (iC = 0; iC < chainLength; iC++) { dTotDelta += steps[iC].deltaLength; if (dTotDelta < dMinDelta) { dMinDelta = dTotDelta; iCBest = iC; } } if (verbose>3) { fprintf(stderr, "SPR %s %d around %d chainLength %d of %d deltaLength %.5f swaps:", iCBest >= 0 ? "move" : "abandoned", node,nodeAround[iAround],iCBest+1,chainLength,dMinDelta); for (iC = 0; iC < chainLength; iC++) fprintf(stderr, " (%d,%d)%.4f", steps[iC].nodes[0], steps[iC].nodes[1], steps[iC].deltaLength); fprintf(stderr,"\n"); } for (iC = chainLength - 1; iC > iCBest; iC--) UnwindSPRStep(/*IN/OUT*/NJ, /*IN*/&steps[iC], /*IN/OUT*/upProfiles); if(verbose > 3) PrintNJInternal(stderr, NJ, /*useLen*/false); while (slow && iCBest >= 0) { double expected_tot_len = last_tot_len + dMinDelta; double new_tot_len = TreeLength(NJ, /*recompute*/true); if (verbose > 2) fprintf(stderr, "Total branch-length is now %.4f was %.4f expected %.4f\n", new_tot_len, last_tot_len, expected_tot_len); if (new_tot_len < last_tot_len) { last_tot_len = new_tot_len; break; /* no rewinding necessary */ } if (verbose > 2) fprintf(stderr, "Rewinding SPR to %d\n",iCBest); UnwindSPRStep(/*IN/OUT*/NJ, /*IN*/&steps[iCBest], /*IN/OUT*/upProfiles); dMinDelta -= steps[iCBest].deltaLength; iCBest--; } if (iCBest >= 0) bChanged = true; } /* loop over which step to take at 1st NNI */ } /* loop over which node to pivot around */ if (bChanged) { nSPR++; /* the SPR move is OK */ /* make sure all the profiles are OK */ int j; for (j = 0; j < NJ->maxnodes; j++) DeleteUpProfile(upProfiles, NJ, j); int ancestor; for (ancestor = NJ->parent[node]; ancestor >= 0; ancestor = NJ->parent[ancestor]) RecomputeProfile(/*IN/OUT*/NJ, upProfiles, ancestor, /*useML*/false); } } /* end loop over subtrees to prune & regraft */ steps = myfree(steps, sizeof(spr_step_t) * maxSPRLength); upProfiles = FreeUpProfiles(upProfiles,NJ); nodeList = myfree(nodeList, sizeof(int) * NJ->maxnodes); } void RecomputeProfile(/*IN/OUT*/NJ_t *NJ, /*IN/OUT*/profile_t **upProfiles, int node, bool useML) { if (node < NJ->nSeq || node == NJ->root) return; /* no profile to compute */ assert(NJ->child[node].nChild==2); profile_t *profiles[4]; double weight = 0.5; if (useML || !bionj) { profiles[0] = NJ->profiles[NJ->child[node].child[0]]; profiles[1] = NJ->profiles[NJ->child[node].child[1]]; } else { int nodeABCD[4]; SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, useML); weight = QuartetWeight(profiles, NJ->distance_matrix, NJ->nPos); } if (verbose>3) { if (useML) { fprintf(stderr, "Recompute %d from %d %d lengths %.4f %.4f\n", node, NJ->child[node].child[0], NJ->child[node].child[1], NJ->branchlength[NJ->child[node].child[0]], NJ->branchlength[NJ->child[node].child[1]]); } else { fprintf(stderr, "Recompute %d from %d %d weight %.3f\n", node, NJ->child[node].child[0], NJ->child[node].child[1], weight); } } NJ->profiles[node] = FreeProfile(NJ->profiles[node], NJ->nPos, NJ->nConstraints); if (useML) { NJ->profiles[node] = PosteriorProfile(profiles[0], profiles[1], NJ->branchlength[NJ->child[node].child[0]], NJ->branchlength[NJ->child[node].child[1]], NJ->transmat, &NJ->rates, NJ->nPos, NJ->nConstraints); } else { NJ->profiles[node] = AverageProfile(profiles[0], profiles[1], NJ->nPos, NJ->nConstraints, NJ->distance_matrix, weight); } } /* The BIONJ-like formula for the weight of A when building a profile for AB is 1/2 + (avgD(B,CD) - avgD(A,CD))/(2*d(A,B)) */ double QuartetWeight(profile_t *profiles[4], distance_matrix_t *dmat, int nPos) { if (!bionj) return(-1.0); /* even weighting */ double d[6]; CorrectedPairDistances(profiles, 4, dmat, nPos, /*OUT*/d); if (d[qAB] < 0.01) return -1.0; double weight = 0.5 + ((d[qBC]+d[qBD])-(d[qAC]+d[qAD]))/(4*d[qAB]); if (weight < 0) weight = 0; if (weight > 1) weight = 1; return (weight); } /* Resets the children entry of parent and also the parent entry of newchild */ void ReplaceChild(/*IN/OUT*/NJ_t *NJ, int parent, int oldchild, int newchild) { NJ->parent[newchild] = parent; int iChild; for (iChild = 0; iChild < NJ->child[parent].nChild; iChild++) { if (NJ->child[parent].child[iChild] == oldchild) { NJ->child[parent].child[iChild] = newchild; return; } } assert(0); } /* Recomputes all branch lengths For internal branches such as (A,B) vs. (C,D), uses the formula length(AB|CD) = (d(A,C)+d(A,D)+d(B,C)+d(B,D))/4 - d(A,B)/2 - d(C,D)/2 (where all distances are profile distances - diameters). For external branches (e.g. to leaves) A vs. (B,C), use the formula length(A|BC) = (d(A,B)+d(A,C)-d(B,C))/2 */ void UpdateBranchLengths(/*IN/OUT*/NJ_t *NJ) { if (NJ->nSeq < 2) return; else if (NJ->nSeq == 2) { int root = NJ->root; int nodeA = NJ->child[root].child[0]; int nodeB = NJ->child[root].child[1]; besthit_t h; ProfileDist(NJ->profiles[nodeA],NJ->profiles[nodeB], NJ->nPos, NJ->distance_matrix, /*OUT*/&h); if (logdist) h.dist = LogCorrect(h.dist); NJ->branchlength[nodeA] = h.dist/2.0; NJ->branchlength[nodeB] = h.dist/2.0; return; } profile_t **upProfiles = UpProfiles(NJ); traversal_t traversal = InitTraversal(NJ); int node = NJ->root; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { /* reset branch length of node (distance to its parent) */ if (node == NJ->root) continue; /* no branch length to set */ if (node < NJ->nSeq) { /* a leaf */ profile_t *profileA = NJ->profiles[node]; profile_t *profileB = NULL; profile_t *profileC = NULL; int sib = Sibling(NJ,node); if (sib == -1) { /* at root, have 2 siblings */ int sibs[2]; RootSiblings(NJ, node, /*OUT*/sibs); profileB = NJ->profiles[sibs[0]]; profileC = NJ->profiles[sibs[1]]; } else { profileB = NJ->profiles[sib]; profileC = GetUpProfile(/*IN/OUT*/upProfiles, NJ, NJ->parent[node], /*useML*/false); } profile_t *profiles[3] = {profileA,profileB,profileC}; double d[3]; /*AB,AC,BC*/ CorrectedPairDistances(profiles, 3, NJ->distance_matrix, NJ->nPos, /*OUT*/d); /* d(A,BC) = (dAB+dAC-dBC)/2 */ NJ->branchlength[node] = (d[0]+d[1]-d[2])/2.0; } else { profile_t *profiles[4]; int nodeABCD[4]; SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, /*useML*/false); double d[6]; CorrectedPairDistances(profiles, 4, NJ->distance_matrix, NJ->nPos, /*OUT*/d); NJ->branchlength[node] = (d[qAC]+d[qAD]+d[qBC]+d[qBD])/4.0 - (d[qAB]+d[qCD])/2.0; /* no longer needed */ DeleteUpProfile(upProfiles, NJ, nodeABCD[0]); DeleteUpProfile(upProfiles, NJ, nodeABCD[1]); } } traversal = FreeTraversal(traversal,NJ); upProfiles = FreeUpProfiles(upProfiles,NJ); } /* Pick columns for resampling, stored as returned_vector[iBoot*nPos + j] */ int *ResampleColumns(int nPos, int nBootstrap) { long lPos = nPos; /* to prevent overflow on very long alignments when multiplying nPos * nBootstrap */ int *col = (int*)mymalloc(sizeof(int)*lPos*(size_t)nBootstrap); int i; for (i = 0; i < nBootstrap; i++) { int j; for (j = 0; j < nPos; j++) { int pos = (int)(knuth_rand() * nPos); if (pos<0) pos = 0; else if (pos == nPos) pos = nPos-1; col[i*lPos + j] = pos; } } if (verbose > 5) { for (i=0; i < 3 && i < nBootstrap; i++) { fprintf(stderr,"Boot%d",i); int j; for (j = 0; j < nPos; j++) { fprintf(stderr,"\t%d",col[i*lPos+j]); } fprintf(stderr,"\n"); } } return(col); } void ReliabilityNJ(/*IN/OUT*/NJ_t *NJ, int nBootstrap) { /* For each non-root node N, with children A,B, parent P, sibling C, and grandparent G, we test the reliability of the split (A,B) versus rest by comparing the profiles of A, B, C, and the "up-profile" of P. Each node's upProfile is the average of its sibling's (down)-profile + its parent's up-profile (If node's parent is the root, then there are two siblings and we don't need an up-profile) To save memory, we do depth-first-search down from the root, and we only keep up-profiles for nodes in the active path. */ if (NJ->nSeq <= 3 || nBootstrap <= 0) return; /* nothing to do */ int *col = ResampleColumns(NJ->nPos, nBootstrap); profile_t **upProfiles = UpProfiles(NJ); traversal_t traversal = InitTraversal(NJ); int node = NJ->root; int iNodesDone = 0; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { if (node < NJ->nSeq || node == NJ->root) continue; /* nothing to do for leaves or root */ if(iNodesDone > 0 && (iNodesDone % 100) == 0) ProgressReport("Local bootstrap for %6d of %6d internal splits", iNodesDone, NJ->nSeq-3, 0, 0); iNodesDone++; profile_t *profiles[4]; int nodeABCD[4]; SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, /*useML*/false); NJ->support[node] = SplitSupport(profiles[0], profiles[1], profiles[2], profiles[3], NJ->distance_matrix, NJ->nPos, nBootstrap, col); /* no longer needed */ DeleteUpProfile(upProfiles, NJ, nodeABCD[0]); DeleteUpProfile(upProfiles, NJ, nodeABCD[1]); DeleteUpProfile(upProfiles, NJ, nodeABCD[2]); } traversal = FreeTraversal(traversal,NJ); upProfiles = FreeUpProfiles(upProfiles,NJ); col = myfree(col, sizeof(int)*((size_t)NJ->nPos)*nBootstrap); } profile_t *NewProfile(int nPos, int nConstraints) { profile_t *profile = (profile_t *)mymalloc(sizeof(profile_t)); profile->weights = mymalloc(sizeof(numeric_t)*nPos); profile->codes = mymalloc(sizeof(unsigned char)*nPos); profile->vectors = NULL; profile->nVectors = 0; profile->codeDist = NULL; if (nConstraints == 0) { profile->nOn = NULL; profile->nOff = NULL; } else { profile->nOn = mymalloc(sizeof(int)*nConstraints); profile->nOff = mymalloc(sizeof(int)*nConstraints); } return(profile); } profile_t *FreeProfile(profile_t *profile, int nPos, int nConstraints) { if(profile==NULL) return(NULL); myfree(profile->codes, nPos); myfree(profile->weights, nPos); myfree(profile->vectors, sizeof(numeric_t)*nCodes*profile->nVectors); myfree(profile->codeDist, sizeof(numeric_t)*nCodes*nPos); if (nConstraints > 0) { myfree(profile->nOn, sizeof(int)*nConstraints); myfree(profile->nOff, sizeof(int)*nConstraints); } return(myfree(profile, sizeof(profile_t))); } void SetupABCD(NJ_t *NJ, int node, /* the 4 profiles; the last one is an outprofile */ /*OPTIONAL OUT*/profile_t *profiles[4], /*OPTIONAL IN/OUT*/profile_t **upProfiles, /*OUT*/int nodeABCD[4], bool useML) { int parent = NJ->parent[node]; assert(parent >= 0); assert(NJ->child[node].nChild == 2); nodeABCD[0] = NJ->child[node].child[0]; /*A*/ nodeABCD[1] = NJ->child[node].child[1]; /*B*/ profile_t *profile4 = NULL; if (parent == NJ->root) { int sibs[2]; RootSiblings(NJ, node, /*OUT*/sibs); nodeABCD[2] = sibs[0]; nodeABCD[3] = sibs[1]; if (profiles == NULL) return; profile4 = NJ->profiles[sibs[1]]; } else { nodeABCD[2] = Sibling(NJ,node); assert(nodeABCD[2] >= 0); nodeABCD[3] = parent; if (profiles == NULL) return; profile4 = GetUpProfile(upProfiles,NJ,parent,useML); } assert(upProfiles != NULL); int i; for (i = 0; i < 3; i++) profiles[i] = NJ->profiles[nodeABCD[i]]; profiles[3] = profile4; } int Sibling(NJ_t *NJ, int node) { int parent = NJ->parent[node]; if (parent < 0 || parent == NJ->root) return(-1); int iChild; for(iChild=0;iChild<NJ->child[parent].nChild;iChild++) { if(NJ->child[parent].child[iChild] != node) return (NJ->child[parent].child[iChild]); } assert(0); return(-1); } void RootSiblings(NJ_t *NJ, int node, /*OUT*/int sibs[2]) { assert(NJ->parent[node] == NJ->root); assert(NJ->child[NJ->root].nChild == 3); int nSibs = 0; int iChild; for(iChild=0; iChild < NJ->child[NJ->root].nChild; iChild++) { int child = NJ->child[NJ->root].child[iChild]; if (child != node) sibs[nSibs++] = child; } assert(nSibs==2); } void TestSplitsML(/*IN/OUT*/NJ_t *NJ, /*OUT*/SplitCount_t *splitcount, int nBootstrap) { const double tolerance = 1e-6; splitcount->nBadSplits = 0; splitcount->nConstraintViolations = 0; splitcount->nBadBoth = 0; splitcount->nSplits = 0; splitcount->dWorstDeltaUnconstrained = 0; splitcount->dWorstDeltaConstrained = 0; profile_t **upProfiles = UpProfiles(NJ); traversal_t traversal = InitTraversal(NJ); int node = NJ->root; int *col = nBootstrap > 0 ? ResampleColumns(NJ->nPos, nBootstrap) : NULL; double *site_likelihoods[3]; int choice; for (choice = 0; choice < 3; choice++) site_likelihoods[choice] = mymalloc(sizeof(double)*NJ->nPos); int iNodesDone = 0; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { if (node < NJ->nSeq || node == NJ->root) continue; /* nothing to do for leaves or root */ if(iNodesDone > 0 && (iNodesDone % 100) == 0) ProgressReport("ML split tests for %6d of %6d internal splits", iNodesDone, NJ->nSeq-3, 0, 0); iNodesDone++; profile_t *profiles[4]; int nodeABCD[4]; SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, /*useML*/true); double loglk[3]; double len[5]; int i; for (i = 0; i < 4; i++) len[i] = NJ->branchlength[nodeABCD[i]]; len[4] = NJ->branchlength[node]; double lenABvsCD[5] = {len[LEN_A], len[LEN_B], len[LEN_C], len[LEN_D], len[LEN_I]}; double lenACvsBD[5] = {len[LEN_A], len[LEN_C], len[LEN_B], len[LEN_D], len[LEN_I]}; /* Swap B & C */ double lenADvsBC[5] = {len[LEN_A], len[LEN_D], len[LEN_C], len[LEN_B], len[LEN_I]}; /* Swap B & D */ { #ifdef OPENMP #pragma omp parallel #pragma omp sections #endif { #ifdef OPENMP #pragma omp section #endif { /* Lengths are already optimized for ABvsCD */ loglk[ABvsCD] = MLQuartetLogLk(profiles[0], profiles[1], profiles[2], profiles[3], NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/lenABvsCD, /*OUT*/site_likelihoods[ABvsCD]); } #ifdef OPENMP #pragma omp section #endif { loglk[ACvsBD] = MLQuartetOptimize(profiles[0], profiles[2], profiles[1], profiles[3], NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/lenACvsBD, /*pStarTest*/NULL, /*OUT*/site_likelihoods[ACvsBD]); } #ifdef OPENMP #pragma omp section #endif { loglk[ADvsBC] = MLQuartetOptimize(profiles[0], profiles[3], profiles[2], profiles[1], NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/lenADvsBC, /*pStarTest*/NULL, /*OUT*/site_likelihoods[ADvsBC]); } } } /* do a second pass on the better alternative if it is close */ if (loglk[ACvsBD] > loglk[ADvsBC]) { if (mlAccuracy > 1 || loglk[ACvsBD] > loglk[ABvsCD] - closeLogLkLimit) { loglk[ACvsBD] = MLQuartetOptimize(profiles[0], profiles[2], profiles[1], profiles[3], NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/lenACvsBD, /*pStarTest*/NULL, /*OUT*/site_likelihoods[ACvsBD]); } } else { if (mlAccuracy > 1 || loglk[ADvsBC] > loglk[ABvsCD] - closeLogLkLimit) { loglk[ADvsBC] = MLQuartetOptimize(profiles[0], profiles[3], profiles[2], profiles[1], NJ->nPos, NJ->transmat, &NJ->rates, /*IN/OUT*/lenADvsBC, /*pStarTest*/NULL, /*OUT*/site_likelihoods[ADvsBC]); } } if (loglk[ABvsCD] >= loglk[ACvsBD] && loglk[ABvsCD] >= loglk[ADvsBC]) choice = ABvsCD; else if (loglk[ACvsBD] >= loglk[ABvsCD] && loglk[ACvsBD] >= loglk[ADvsBC]) choice = ACvsBD; else choice = ADvsBC; bool badSplit = loglk[choice] > loglk[ABvsCD] + treeLogLkDelta; /* ignore small changes in likelihood */ /* constraint penalties, indexed by nni_t (lower is better) */ double p[3]; QuartetConstraintPenalties(profiles, NJ->nConstraints, /*OUT*/p); bool bBadConstr = p[ABvsCD] > p[ACvsBD] + tolerance || p[ABvsCD] > p[ADvsBC] + tolerance; bool violateConstraint = false; int iC; for (iC=0; iC < NJ->nConstraints; iC++) { if (SplitViolatesConstraint(profiles, iC)) { violateConstraint = true; break; } } splitcount->nSplits++; if (violateConstraint) splitcount->nConstraintViolations++; if (badSplit) splitcount->nBadSplits++; if (badSplit && bBadConstr) splitcount->nBadBoth++; if (badSplit) { double delta = loglk[choice] - loglk[ABvsCD]; /* If ABvsCD is favored over the more likely NNI by constraints, then this is probably a bad split because of the constraint */ if (p[choice] > p[ABvsCD] + tolerance) splitcount->dWorstDeltaConstrained = MAX(delta, splitcount->dWorstDeltaConstrained); else splitcount->dWorstDeltaUnconstrained = MAX(delta, splitcount->dWorstDeltaUnconstrained); } if (nBootstrap>0) NJ->support[node] = badSplit ? 0.0 : SHSupport(NJ->nPos, nBootstrap, col, loglk, site_likelihoods); /* No longer needed */ DeleteUpProfile(upProfiles, NJ, nodeABCD[0]); DeleteUpProfile(upProfiles, NJ, nodeABCD[1]); DeleteUpProfile(upProfiles, NJ, nodeABCD[2]); } traversal = FreeTraversal(traversal,NJ); upProfiles = FreeUpProfiles(upProfiles,NJ); if (nBootstrap>0) col = myfree(col, sizeof(int)*((size_t)NJ->nPos)*nBootstrap); for (choice = 0; choice < 3; choice++) site_likelihoods[choice] = myfree(site_likelihoods[choice], sizeof(double)*NJ->nPos); } void TestSplitsMinEvo(NJ_t *NJ, /*OUT*/SplitCount_t *splitcount) { const double tolerance = 1e-6; splitcount->nBadSplits = 0; splitcount->nConstraintViolations = 0; splitcount->nBadBoth = 0; splitcount->nSplits = 0; splitcount->dWorstDeltaUnconstrained = 0.0; splitcount->dWorstDeltaConstrained = 0.0; profile_t **upProfiles = UpProfiles(NJ); traversal_t traversal = InitTraversal(NJ); int node = NJ->root; while((node = TraversePostorder(node, NJ, /*IN/OUT*/traversal, /*pUp*/NULL)) >= 0) { if (node < NJ->nSeq || node == NJ->root) continue; /* nothing to do for leaves or root */ profile_t *profiles[4]; int nodeABCD[4]; SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, /*useML*/false); if (verbose>2) fprintf(stderr,"Testing Split around %d: A=%d B=%d C=%d D=up(%d) or node parent %d\n", node, nodeABCD[0], nodeABCD[1], nodeABCD[2], nodeABCD[3], NJ->parent[node]); double d[6]; /* distances, perhaps log-corrected distances, no constraint penalties */ CorrectedPairDistances(profiles, 4, NJ->distance_matrix, NJ->nPos, /*OUT*/d); /* alignment-based scores for each split (lower is better) */ double sABvsCD = d[qAB] + d[qCD]; double sACvsBD = d[qAC] + d[qBD]; double sADvsBC = d[qAD] + d[qBC]; /* constraint penalties, indexed by nni_t (lower is better) */ double p[3]; QuartetConstraintPenalties(profiles, NJ->nConstraints, /*OUT*/p); int nConstraintsViolated = 0; int iC; for (iC=0; iC < NJ->nConstraints; iC++) { if (SplitViolatesConstraint(profiles, iC)) { nConstraintsViolated++; if (verbose > 2) { double penalty[3] = {0.0,0.0,0.0}; (void)QuartetConstraintPenaltiesPiece(profiles, iC, /*OUT*/penalty); fprintf(stderr, "Violate constraint %d at %d (children %d %d) penalties %.3f %.3f %.3f %d/%d %d/%d %d/%d %d/%d\n", iC, node, NJ->child[node].child[0], NJ->child[node].child[1], penalty[ABvsCD], penalty[ACvsBD], penalty[ADvsBC], profiles[0]->nOn[iC], profiles[0]->nOff[iC], profiles[1]->nOn[iC], profiles[1]->nOff[iC], profiles[2]->nOn[iC], profiles[2]->nOff[iC], profiles[3]->nOn[iC], profiles[3]->nOff[iC]); } } } double delta = sABvsCD - MIN(sACvsBD,sADvsBC); bool bBadDist = delta > tolerance; bool bBadConstr = p[ABvsCD] > p[ACvsBD] + tolerance || p[ABvsCD] > p[ADvsBC] + tolerance; splitcount->nSplits++; if (bBadDist) { nni_t choice = sACvsBD < sADvsBC ? ACvsBD : ADvsBC; /* If ABvsCD is favored over the shorter NNI by constraints, then this is probably a bad split because of the constraint */ if (p[choice] > p[ABvsCD] + tolerance) splitcount->dWorstDeltaConstrained = MAX(delta, splitcount->dWorstDeltaConstrained); else splitcount->dWorstDeltaUnconstrained = MAX(delta, splitcount->dWorstDeltaUnconstrained); } if (nConstraintsViolated > 0) splitcount->nConstraintViolations++; /* count splits with any violations, not #constraints in a splits */ if (bBadDist) splitcount->nBadSplits++; if (bBadDist && bBadConstr) splitcount->nBadBoth++; if (bBadConstr && verbose > 2) { /* Which NNI would be better */ double dist_advantage = 0; double constraint_penalty = 0; if (p[ACvsBD] < p[ADvsBC]) { dist_advantage = sACvsBD - sABvsCD; constraint_penalty = p[ABvsCD] - p[ACvsBD]; } else { dist_advantage = sADvsBC - sABvsCD; constraint_penalty = p[ABvsCD] - p[ADvsBC]; } fprintf(stderr, "Violate constraints %d distance_advantage %.3f constraint_penalty %.3f (children %d %d):", node, dist_advantage, constraint_penalty, NJ->child[node].child[0], NJ->child[node].child[1]); /* list the constraints with a penalty, meaning that ABCD all have non-zero values and that AB|CD worse than others */ for (iC = 0; iC < NJ->nConstraints; iC++) { double ppart[6]; if (QuartetConstraintPenaltiesPiece(profiles, iC, /*OUT*/ppart)) { if (ppart[qAB] + ppart[qCD] > ppart[qAD] + ppart[qBC] + tolerance || ppart[qAB] + ppart[qCD] > ppart[qAC] + ppart[qBD] + tolerance) fprintf(stderr, " %d (%d/%d %d/%d %d/%d %d/%d)", iC, profiles[0]->nOn[iC], profiles[0]->nOff[iC], profiles[1]->nOn[iC], profiles[1]->nOff[iC], profiles[2]->nOn[iC], profiles[2]->nOff[iC], profiles[3]->nOn[iC], profiles[3]->nOff[iC]); } } fprintf(stderr, "\n"); } /* no longer needed */ DeleteUpProfile(upProfiles, NJ, nodeABCD[0]); DeleteUpProfile(upProfiles, NJ, nodeABCD[1]); } traversal = FreeTraversal(traversal,NJ); upProfiles = FreeUpProfiles(upProfiles,NJ); } /* Computes support for (A,B),(C,D) compared to that for (A,C),(B,D) and (A,D),(B,C) */ double SplitSupport(profile_t *pA, profile_t *pB, profile_t *pC, profile_t *pD, /*OPTIONAL*/distance_matrix_t *dmat, int nPos, int nBootstrap, int *col) { int i,j; long lPos = nPos; /* to avoid overflow when multiplying */ /* Note distpieces are weighted */ double *distpieces[6]; double *weights[6]; for (j = 0; j < 6; j++) { distpieces[j] = (double*)mymalloc(sizeof(double)*nPos); weights[j] = (double*)mymalloc(sizeof(double)*nPos); } int iFreqA = 0; int iFreqB = 0; int iFreqC = 0; int iFreqD = 0; for (i = 0; i < nPos; i++) { numeric_t *fA = GET_FREQ(pA, i, /*IN/OUT*/iFreqA); numeric_t *fB = GET_FREQ(pB, i, /*IN/OUT*/iFreqB); numeric_t *fC = GET_FREQ(pC, i, /*IN/OUT*/iFreqC); numeric_t *fD = GET_FREQ(pD, i, /*IN/OUT*/iFreqD); weights[qAB][i] = pA->weights[i] * pB->weights[i]; weights[qAC][i] = pA->weights[i] * pC->weights[i]; weights[qAD][i] = pA->weights[i] * pD->weights[i]; weights[qBC][i] = pB->weights[i] * pC->weights[i]; weights[qBD][i] = pB->weights[i] * pD->weights[i]; weights[qCD][i] = pC->weights[i] * pD->weights[i]; distpieces[qAB][i] = weights[qAB][i] * ProfileDistPiece(pA->codes[i], pB->codes[i], fA, fB, dmat, NULL); distpieces[qAC][i] = weights[qAC][i] * ProfileDistPiece(pA->codes[i], pC->codes[i], fA, fC, dmat, NULL); distpieces[qAD][i] = weights[qAD][i] * ProfileDistPiece(pA->codes[i], pD->codes[i], fA, fD, dmat, NULL); distpieces[qBC][i] = weights[qBC][i] * ProfileDistPiece(pB->codes[i], pC->codes[i], fB, fC, dmat, NULL); distpieces[qBD][i] = weights[qBD][i] * ProfileDistPiece(pB->codes[i], pD->codes[i], fB, fD, dmat, NULL); distpieces[qCD][i] = weights[qCD][i] * ProfileDistPiece(pC->codes[i], pD->codes[i], fC, fD, dmat, NULL); } assert(iFreqA == pA->nVectors); assert(iFreqB == pB->nVectors); assert(iFreqC == pC->nVectors); assert(iFreqD == pD->nVectors); double totpieces[6]; double totweights[6]; double dists[6]; for (j = 0; j < 6; j++) { totpieces[j] = 0.0; totweights[j] = 0.0; for (i = 0; i < nPos; i++) { totpieces[j] += distpieces[j][i]; totweights[j] += weights[j][i]; } dists[j] = totweights[j] > 0.01 ? totpieces[j]/totweights[j] : 3.0; if (logdist) dists[j] = LogCorrect(dists[j]); } /* Support1 = Support(AB|CD over AC|BD) = d(A,C)+d(B,D)-d(A,B)-d(C,D) Support2 = Support(AB|CD over AD|BC) = d(A,D)+d(B,C)-d(A,B)-d(C,D) */ double support1 = dists[qAC] + dists[qBD] - dists[qAB] - dists[qCD]; double support2 = dists[qAD] + dists[qBC] - dists[qAB] - dists[qCD]; if (support1 < 0 || support2 < 0) { nSuboptimalSplits++; /* Another split seems superior */ } assert(nBootstrap > 0); int nSupport = 0; int iBoot; for (iBoot=0;iBoot<nBootstrap;iBoot++) { int *colw = &col[lPos*iBoot]; for (j = 0; j < 6; j++) { double totp = 0; double totw = 0; double *d = distpieces[j]; double *w = weights[j]; for (i=0; i<nPos; i++) { int c = colw[i]; totp += d[c]; totw += w[c]; } dists[j] = totw > 0.01 ? totp/totw : 3.0; if (logdist) dists[j] = LogCorrect(dists[j]); } support1 = dists[qAC] + dists[qBD] - dists[qAB] - dists[qCD]; support2 = dists[qAD] + dists[qBC] - dists[qAB] - dists[qCD]; if (support1 > 0 && support2 > 0) nSupport++; } /* end loop over bootstrap replicates */ for (j = 0; j < 6; j++) { distpieces[j] = myfree(distpieces[j], sizeof(double)*nPos); weights[j] = myfree(weights[j], sizeof(double)*nPos); } return( nSupport/(double)nBootstrap ); } double SHSupport(int nPos, int nBootstrap, int *col, double loglk[3], double *site_likelihoods[3]) { long lPos = nPos; /* to avoid overflow when multiplying */ assert(nBootstrap>0); double delta1 = loglk[0]-loglk[1]; double delta2 = loglk[0]-loglk[2]; double delta = delta1 < delta2 ? delta1 : delta2; double *siteloglk[3]; int i,j; for (i = 0; i < 3; i++) { siteloglk[i] = mymalloc(sizeof(double)*nPos); for (j = 0; j < nPos; j++) siteloglk[i][j] = log(site_likelihoods[i][j]); } int nSupport = 0; int iBoot; for (iBoot = 0; iBoot < nBootstrap; iBoot++) { double resampled[3]; for (i = 0; i < 3; i++) resampled[i] = -loglk[i]; for (j = 0; j < nPos; j++) { int pos = col[iBoot*lPos+j]; for (i = 0; i < 3; i++) resampled[i] += siteloglk[i][pos]; } int iBest = 0; for (i = 1; i < 3; i++) if (resampled[i] > resampled[iBest]) iBest = i; double resample1 = resampled[iBest] - resampled[(iBest+1)%3]; double resample2 = resampled[iBest] - resampled[(iBest+2)%3]; double resampleDelta = resample1 < resample2 ? resample1 : resample2; if (resampleDelta < delta) nSupport++; } for (i=0;i<3;i++) siteloglk[i] = myfree(siteloglk[i], sizeof(double)*nPos); return(nSupport/(double)nBootstrap); } void SetDistCriterion(/*IN/OUT*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *hit) { if (hit->i < NJ->nSeq && hit->j < NJ->nSeq) { SeqDist(NJ->profiles[hit->i]->codes, NJ->profiles[hit->j]->codes, NJ->nPos, NJ->distance_matrix, /*OUT*/hit); } else { ProfileDist(NJ->profiles[hit->i], NJ->profiles[hit->j], NJ->nPos, NJ->distance_matrix, /*OUT*/hit); hit->dist -= (NJ->diameter[hit->i] + NJ->diameter[hit->j]); } hit->dist += constraintWeight * (double)JoinConstraintPenalty(NJ, hit->i, hit->j); SetCriterion(NJ,nActive,/*IN/OUT*/hit); } void SetCriterion(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *join) { if(join->i < 0 || join->j < 0 || NJ->parent[join->i] >= 0 || NJ->parent[join->j] >= 0) return; assert(NJ->nOutDistActive[join->i] >= nActive); assert(NJ->nOutDistActive[join->j] >= nActive); int nDiffAllow = tophitsMult > 0 ? (int)(nActive*staleOutLimit) : 0; if (NJ->nOutDistActive[join->i] - nActive > nDiffAllow) SetOutDistance(NJ, join->i, nActive); if (NJ->nOutDistActive[join->j] - nActive > nDiffAllow) SetOutDistance(NJ, join->j, nActive); double outI = NJ->outDistances[join->i]; if (NJ->nOutDistActive[join->i] != nActive) outI *= (nActive-1)/(double)(NJ->nOutDistActive[join->i]-1); double outJ = NJ->outDistances[join->j]; if (NJ->nOutDistActive[join->j] != nActive) outJ *= (nActive-1)/(double)(NJ->nOutDistActive[join->j]-1); join->criterion = join->dist - (outI+outJ)/(double)(nActive-2); if (verbose > 2 && nActive <= 5) { fprintf(stderr, "Set Criterion to join %d %d with nActive=%d dist+penalty %.3f criterion %.3f\n", join->i, join->j, nActive, join->dist, join->criterion); } } void SetOutDistance(NJ_t *NJ, int iNode, int nActive) { if (NJ->nOutDistActive[iNode] == nActive) return; /* May be called by InitNJ before we have parents */ assert(iNode>=0 && (NJ->parent == NULL || NJ->parent[iNode]<0)); besthit_t dist; ProfileDist(NJ->profiles[iNode], NJ->outprofile, NJ->nPos, NJ->distance_matrix, &dist); outprofileOps++; /* out(A) = sum(X!=A) d(A,X) = sum(X!=A) (profiledist(A,X) - diam(A) - diam(X)) = sum(X!=A) profiledist(A,X) - (N-1)*diam(A) - (totdiam - diam(A)) in the absence of gaps: profiledist(A,out) = mean profiledist(A, all active nodes) sum(X!=A) profiledist(A,X) = N * profiledist(A,out) - profiledist(A,A) With gaps, we need to take the weights of the comparisons into account, where w(Ai) is the weight of position i in profile A: w(A,B) = sum_i w(Ai) * w(Bi) d(A,B) = sum_i w(Ai) * w(Bi) * d(Ai,Bi) / w(A,B) sum(X!=A) profiledist(A,X) ~= (N-1) * profiledist(A, Out w/o A) profiledist(A, Out w/o A) = sum_X!=A sum_i d(Ai,Xi) * w(Ai) * w(Bi) / ( sum_X!=A sum_i w(Ai) * w(Bi) ) d(A, Out) = sum_A sum_i d(Ai,Xi) * w(Ai) * w(Bi) / ( sum_X sum_i w(Ai) * w(Bi) ) and so we get profiledist(A,out w/o A) = (top of d(A,Out) - top of d(A,A)) / (weight of d(A,Out) - weight of d(A,A)) top = dist * weight with another correction of nActive because the weight of the out-profile is the average weight not the total weight. */ double top = (nActive-1) * (dist.dist * dist.weight * nActive - NJ->selfweight[iNode] * NJ->selfdist[iNode]); double bottom = (dist.weight * nActive - NJ->selfweight[iNode]); double pdistOutWithoutA = top/bottom; NJ->outDistances[iNode] = bottom > 0.01 ? pdistOutWithoutA - NJ->diameter[iNode] * (nActive-1) - (NJ->totdiam - NJ->diameter[iNode]) : 3.0; NJ->nOutDistActive[iNode] = nActive; if(verbose>3 && iNode < 5) fprintf(stderr,"NewOutDist for %d %f from dist %f selfd %f diam %f totdiam %f newActive %d\n", iNode, NJ->outDistances[iNode], dist.dist, NJ->selfdist[iNode], NJ->diameter[iNode], NJ->totdiam, nActive); if (verbose>6 && (iNode % 10) == 0) { /* Compute the actual out-distance and compare */ double total = 0.0; double total_pd = 0.0; int j; for (j=0;j<NJ->maxnode;j++) { if (j!=iNode && (NJ->parent==NULL || NJ->parent[j]<0)) { besthit_t bh; ProfileDist(NJ->profiles[iNode], NJ->profiles[j], NJ->nPos, NJ->distance_matrix, /*OUT*/&bh); total_pd += bh.dist; total += bh.dist - (NJ->diameter[iNode] + NJ->diameter[j]); } } fprintf(stderr,"OutDist for Node %d %f truth %f profiled %f truth %f pd_err %f\n", iNode, NJ->outDistances[iNode], total, pdistOutWithoutA, total_pd,fabs(pdistOutWithoutA-total_pd)); } } top_hits_t *FreeTopHits(top_hits_t *tophits) { if (tophits == NULL) return(NULL); int iNode; for (iNode = 0; iNode < tophits->maxnodes; iNode++) { top_hits_list_t *l = &tophits->top_hits_lists[iNode]; if (l->hits != NULL) l->hits = myfree(l->hits, sizeof(hit_t) * l->nHits); } tophits->top_hits_lists = myfree(tophits->top_hits_lists, sizeof(top_hits_list_t) * tophits->maxnodes); tophits->visible = myfree(tophits->visible, sizeof(hit_t*) * tophits->maxnodes); tophits->topvisible = myfree(tophits->topvisible, sizeof(int) * tophits->nTopVisible); #ifdef OPENMP for (iNode = 0; iNode < tophits->maxnodes; iNode++) omp_destroy_lock(&tophits->locks[iNode]); tophits->locks = myfree(tophits->locks, sizeof(omp_lock_t) * tophits->maxnodes); #endif return(myfree(tophits, sizeof(top_hits_t))); } top_hits_t *InitTopHits(NJ_t *NJ, int m) { int iNode; assert(m > 0); top_hits_t *tophits = mymalloc(sizeof(top_hits_t)); tophits->m = m; tophits->q = (int)(0.5 + tophits2Mult * sqrt(tophits->m)); if (!useTopHits2nd || tophits->q >= tophits->m) tophits->q = 0; tophits->maxnodes = NJ->maxnodes; tophits->top_hits_lists = mymalloc(sizeof(top_hits_list_t) * tophits->maxnodes); tophits->visible = mymalloc(sizeof(hit_t) * tophits->maxnodes); tophits->nTopVisible = (int)(0.5 + topvisibleMult*m); tophits->topvisible = mymalloc(sizeof(int) * tophits->nTopVisible); #ifdef OPENMP tophits->locks = mymalloc(sizeof(omp_lock_t) * tophits->maxnodes); for (iNode = 0; iNode < tophits->maxnodes; iNode++) omp_init_lock(&tophits->locks[iNode]); #endif int i; for (i = 0; i < tophits->nTopVisible; i++) tophits->topvisible[i] = -1; /* empty */ tophits->topvisibleAge = 0; for (iNode = 0; iNode < tophits->maxnodes; iNode++) { top_hits_list_t *l = &tophits->top_hits_lists[iNode]; l->nHits = 0; l->hits = NULL; l->hitSource = -1; l->age = 0; hit_t *v = &tophits->visible[iNode]; v->j = -1; v->dist = 1e20; } return(tophits); } /* Helper function for sorting in SetAllLeafTopHits, and the global variables it needs */ NJ_t *CompareSeedNJ = NULL; int *CompareSeedGaps = NULL; int CompareSeeds(const void *c1, const void *c2) { int seed1 = *(int *)c1; int seed2 = *(int *)c2; int gapdiff = CompareSeedGaps[seed1] - CompareSeedGaps[seed2]; if (gapdiff != 0) return(gapdiff); /* fewer gaps is better */ double outdiff = CompareSeedNJ->outDistances[seed1] - CompareSeedNJ->outDistances[seed2]; if(outdiff < 0) return(-1); /* closer to more nodes is better */ if(outdiff > 0) return(1); return(0); } /* Using the seed heuristic and the close global variable */ void SetAllLeafTopHits(/*IN/UPDATE*/NJ_t *NJ, /*IN/OUT*/top_hits_t *tophits) { double close = tophitsClose; if (close < 0) { if (fastest && NJ->nSeq >= 50000) { close = 0.99; } else { double logN = log((double)NJ->nSeq)/log(2.0); close = logN/(logN+2.0); } } /* Sort the potential seeds, by a combination of nGaps and NJ->outDistances We don't store nGaps so we need to compute that */ int *nGaps = (int*)mymalloc(sizeof(int)*NJ->nSeq); int iNode; for(iNode=0; iNode<NJ->nSeq; iNode++) { nGaps[iNode] = (int)(0.5 + NJ->nPos - NJ->selfweight[iNode]); } int *seeds = (int*)mymalloc(sizeof(int)*NJ->nSeq); for (iNode=0; iNode<NJ->nSeq; iNode++) seeds[iNode] = iNode; CompareSeedNJ = NJ; CompareSeedGaps = nGaps; qsort(/*IN/OUT*/seeds, NJ->nSeq, sizeof(int), CompareSeeds); CompareSeedNJ = NULL; CompareSeedGaps = NULL; /* For each seed, save its top 2*m hits and then look for close neighbors */ assert(2 * tophits->m <= NJ->nSeq); int iSeed; int nHasTopHits = 0; #ifdef OPENMP #pragma omp parallel for schedule(dynamic, 50) #endif for(iSeed=0; iSeed < NJ->nSeq; iSeed++) { int seed = seeds[iSeed]; if (iSeed > 0 && (iSeed % 100) == 0) { #ifdef OPENMP #pragma omp critical #endif ProgressReport("Top hits for %6d of %6d seqs (at seed %6d)", nHasTopHits, NJ->nSeq, iSeed, 0); } if (tophits->top_hits_lists[seed].nHits > 0) { if(verbose>2) fprintf(stderr, "Skipping seed %d\n", seed); continue; } besthit_t *besthitsSeed = (besthit_t*)mymalloc(sizeof(besthit_t)*NJ->nSeq); besthit_t *besthitsNeighbor = (besthit_t*)mymalloc(sizeof(besthit_t) * 2 * tophits->m); besthit_t bestjoin; if(verbose>2) fprintf(stderr,"Trying seed %d\n", seed); SetBestHit(seed, NJ, /*nActive*/NJ->nSeq, /*OUT*/&bestjoin, /*OUT*/besthitsSeed); /* sort & save top hits of self. besthitsSeed is now sorted. */ SortSaveBestHits(seed, /*IN/SORT*/besthitsSeed, /*IN-SIZE*/NJ->nSeq, /*OUT-SIZE*/tophits->m, /*IN/OUT*/tophits); nHasTopHits++; /* find "close" neighbors and compute their top hits */ double neardist = besthitsSeed[2 * tophits->m - 1].dist * close; /* must have at least average weight, rem higher is better and allow a bit more than average, e.g. if we are looking for within 30% away, 20% more gaps than usual seems OK Alternatively, have a coverage requirement in case neighbor is short If fastest, consider the top q/2 hits to be close neighbors, regardless */ double nearweight = 0; int iClose; for (iClose = 0; iClose < 2 * tophits->m; iClose++) nearweight += besthitsSeed[iClose].weight; nearweight = nearweight/(2.0 * tophits->m); /* average */ nearweight *= (1.0-2.0*neardist/3.0); double nearcover = 1.0 - neardist/2.0; if(verbose>2) fprintf(stderr,"Distance limit for close neighbors %f weight %f ungapped %d\n", neardist, nearweight, NJ->nPos-nGaps[seed]); for (iClose = 0; iClose < tophits->m; iClose++) { besthit_t *closehit = &besthitsSeed[iClose]; int closeNode = closehit->j; if (tophits->top_hits_lists[closeNode].nHits > 0) continue; /* If within close-distance, or identical, use as close neighbor */ bool close = closehit->dist <= neardist && (closehit->weight >= nearweight || closehit->weight >= (NJ->nPos-nGaps[closeNode])*nearcover); bool identical = closehit->dist < 1e-6 && fabs(closehit->weight - (NJ->nPos - nGaps[seed])) < 1e-5 && fabs(closehit->weight - (NJ->nPos - nGaps[closeNode])) < 1e-5; if (useTopHits2nd && iClose < tophits->q && (close || identical)) { nHasTopHits++; nClose2Used++; int nUse = MIN(tophits->q * tophits2Safety, 2 * tophits->m); besthit_t *besthitsClose = mymalloc(sizeof(besthit_t) * nUse); TransferBestHits(NJ, /*nActive*/NJ->nSeq, closeNode, /*IN*/besthitsSeed, /*SIZE*/nUse, /*OUT*/besthitsClose, /*updateDistance*/true); SortSaveBestHits(closeNode, /*IN/SORT*/besthitsClose, /*IN-SIZE*/nUse, /*OUT-SIZE*/tophits->q, /*IN/OUT*/tophits); tophits->top_hits_lists[closeNode].hitSource = seed; besthitsClose = myfree(besthitsClose, sizeof(besthit_t) * nUse); } else if (close || identical || (fastest && iClose < (tophits->q+1)/2)) { nHasTopHits++; nCloseUsed++; if(verbose>2) fprintf(stderr, "Near neighbor %d (rank %d weight %f ungapped %d %d)\n", closeNode, iClose, besthitsSeed[iClose].weight, NJ->nPos-nGaps[seed], NJ->nPos-nGaps[closeNode]); /* compute top 2*m hits */ TransferBestHits(NJ, /*nActive*/NJ->nSeq, closeNode, /*IN*/besthitsSeed, /*SIZE*/2 * tophits->m, /*OUT*/besthitsNeighbor, /*updateDistance*/true); SortSaveBestHits(closeNode, /*IN/SORT*/besthitsNeighbor, /*IN-SIZE*/2 * tophits->m, /*OUT-SIZE*/tophits->m, /*IN/OUT*/tophits); /* And then try for a second level of transfer. We assume we are in a good area, because of the 1st level of transfer, and in a small neighborhood, because q is small (32 for 1 million sequences), so we do not make any close checks. */ int iClose2; for (iClose2 = 0; iClose2 < tophits->q && iClose2 < 2 * tophits->m; iClose2++) { int closeNode2 = besthitsNeighbor[iClose2].j; assert(closeNode2 >= 0); if (tophits->top_hits_lists[closeNode2].hits == NULL) { nClose2Used++; nHasTopHits++; int nUse = MIN(tophits->q * tophits2Safety, 2 * tophits->m); besthit_t *besthitsClose2 = mymalloc(sizeof(besthit_t) * nUse); TransferBestHits(NJ, /*nActive*/NJ->nSeq, closeNode2, /*IN*/besthitsNeighbor, /*SIZE*/nUse, /*OUT*/besthitsClose2, /*updateDistance*/true); SortSaveBestHits(closeNode2, /*IN/SORT*/besthitsClose2, /*IN-SIZE*/nUse, /*OUT-SIZE*/tophits->q, /*IN/OUT*/tophits); tophits->top_hits_lists[closeNode2].hitSource = closeNode; besthitsClose2 = myfree(besthitsClose2, sizeof(besthit_t) * nUse); } /* end if should do 2nd-level transfer */ } } } /* end loop over close candidates */ besthitsSeed = myfree(besthitsSeed, sizeof(besthit_t)*NJ->nSeq); besthitsNeighbor = myfree(besthitsNeighbor, sizeof(besthit_t) * 2 * tophits->m); } /* end loop over seeds */ for (iNode=0; iNode<NJ->nSeq; iNode++) { top_hits_list_t *l = &tophits->top_hits_lists[iNode]; assert(l->hits != NULL); assert(l->hits[0].j >= 0); assert(l->hits[0].j < NJ->nSeq); assert(l->hits[0].j != iNode); tophits->visible[iNode] = l->hits[0]; } if (verbose >= 2) fprintf(stderr, "#Close neighbors among leaves: 1st-level %ld 2nd-level %ld seeds %ld\n", nCloseUsed, nClose2Used, NJ->nSeq-nCloseUsed-nClose2Used); nGaps = myfree(nGaps, sizeof(int)*NJ->nSeq); seeds = myfree(seeds, sizeof(int)*NJ->nSeq); /* Now add a "checking phase" where we ensure that the q or 2*sqrt(m) hits of i are represented in j (if they should be) */ long lReplace = 0; int nCheck = tophits->q > 0 ? tophits->q : (int)(0.5 + 2.0*sqrt(tophits->m)); for (iNode = 0; iNode < NJ->nSeq; iNode++) { if ((iNode % 100) == 0) ProgressReport("Checking top hits for %6d of %6d seqs", iNode+1, NJ->nSeq, 0, 0); top_hits_list_t *lNode = &tophits->top_hits_lists[iNode]; int iHit; for (iHit = 0; iHit < nCheck && iHit < lNode->nHits; iHit++) { besthit_t bh = HitToBestHit(iNode, lNode->hits[iHit]); SetCriterion(NJ, /*nActive*/NJ->nSeq, /*IN/OUT*/&bh); top_hits_list_t *lTarget = &tophits->top_hits_lists[bh.j]; /* If this criterion is worse than the nCheck-1 entry of the target, then skip the check. This logic is based on assuming that the list is sorted, which is true initially but may not be true later. Still, is a good heuristic. */ assert(nCheck > 0); assert(nCheck <= lTarget->nHits); besthit_t bhCheck = HitToBestHit(bh.j, lTarget->hits[nCheck-1]); SetCriterion(NJ, /*nActive*/NJ->nSeq, /*IN/OUT*/&bhCheck); if (bhCheck.criterion < bh.criterion) continue; /* no check needed */ /* Check if this is present in the top-hit list */ int iHit2; bool bFound = false; for (iHit2 = 0; iHit2 < lTarget->nHits && !bFound; iHit2++) if (lTarget->hits[iHit2].j == iNode) bFound = true; if (!bFound) { /* Find the hit with the worst criterion and replace it with this one */ int iWorst = -1; double dWorstCriterion = -1e20; for (iHit2 = 0; iHit2 < lTarget->nHits; iHit2++) { besthit_t bh2 = HitToBestHit(bh.j, lTarget->hits[iHit2]); SetCriterion(NJ, /*nActive*/NJ->nSeq, /*IN/OUT*/&bh2); if (bh2.criterion > dWorstCriterion) { iWorst = iHit2; dWorstCriterion = bh2.criterion; } } if (dWorstCriterion > bh.criterion) { assert(iWorst >= 0); lTarget->hits[iWorst].j = iNode; lTarget->hits[iWorst].dist = bh.dist; lReplace++; /* and perhaps update visible */ besthit_t v; bool bSuccess = GetVisible(NJ, /*nActive*/NJ->nSeq, tophits, bh.j, /*OUT*/&v); assert(bSuccess); if (bh.criterion < v.criterion) tophits->visible[bh.j] = lTarget->hits[iWorst]; } } } } if (verbose >= 2) fprintf(stderr, "Replaced %ld top hit entries\n", lReplace); } /* Updates out-distances but does not reset or update visible set */ void GetBestFromTopHits(int iNode, /*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN*/top_hits_t *tophits, /*OUT*/besthit_t *bestjoin) { assert(iNode >= 0); assert(NJ->parent[iNode] < 0); top_hits_list_t *l = &tophits->top_hits_lists[iNode]; assert(l->nHits > 0); assert(l->hits != NULL); if(!fastest) SetOutDistance(NJ, iNode, nActive); /* ensure out-distances are not stale */ bestjoin->i = -1; bestjoin->j = -1; bestjoin->dist = 1e20; bestjoin->criterion = 1e20; int iBest; for(iBest=0; iBest < l->nHits; iBest++) { besthit_t bh = HitToBestHit(iNode, l->hits[iBest]); if (UpdateBestHit(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/&bh, /*update dist*/true)) { SetCriterion(/*IN/OUT*/NJ, nActive, /*IN/OUT*/&bh); /* make sure criterion is correct */ if (bh.criterion < bestjoin->criterion) *bestjoin = bh; } } assert(bestjoin->j >= 0); /* a hit was found */ assert(bestjoin->i == iNode); } int ActiveAncestor(/*IN*/NJ_t *NJ, int iNode) { if (iNode < 0) return(iNode); while(NJ->parent[iNode] >= 0) iNode = NJ->parent[iNode]; return(iNode); } bool UpdateBestHit(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/besthit_t *hit, bool bUpdateDist) { int i = ActiveAncestor(/*IN*/NJ, hit->i); int j = ActiveAncestor(/*IN*/NJ, hit->j); if (i < 0 || j < 0 || i == j) { hit->i = -1; hit->j = -1; hit->weight = 0; hit->dist = 1e20; hit->criterion = 1e20; return(false); } if (i != hit->i || j != hit->j) { hit->i = i; hit->j = j; if (bUpdateDist) { SetDistCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/hit); } else { hit->dist = -1e20; hit->criterion = 1e20; } } return(true); } bool GetVisible(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/top_hits_t *tophits, int iNode, /*OUT*/besthit_t *visible) { if (iNode < 0 || NJ->parent[iNode] >= 0) return(false); hit_t *v = &tophits->visible[iNode]; if (v->j < 0 || NJ->parent[v->j] >= 0) return(false); *visible = HitToBestHit(iNode, *v); SetCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/visible); return(true); } besthit_t *UniqueBestHits(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/SORT*/besthit_t *combined, int nCombined, /*OUT*/int *nUniqueOut) { int iHit; for (iHit = 0; iHit < nCombined; iHit++) { besthit_t *hit = &combined[iHit]; UpdateBestHit(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/hit, /*update*/false); } qsort(/*IN/OUT*/combined, nCombined, sizeof(besthit_t), CompareHitsByIJ); besthit_t *uniqueList = (besthit_t*)mymalloc(sizeof(besthit_t)*nCombined); int nUnique = 0; int iSavedLast = -1; /* First build the new list */ for (iHit = 0; iHit < nCombined; iHit++) { besthit_t *hit = &combined[iHit]; if (hit->i < 0 || hit->j < 0) continue; if (iSavedLast >= 0) { /* toss out duplicates */ besthit_t *saved = &combined[iSavedLast]; if (saved->i == hit->i && saved->j == hit->j) continue; } assert(nUnique < nCombined); assert(hit->j >= 0 && NJ->parent[hit->j] < 0); uniqueList[nUnique++] = *hit; iSavedLast = iHit; } *nUniqueOut = nUnique; /* Then do any updates to the criterion or the distances in parallel */ #ifdef OPENMP #pragma omp parallel for schedule(dynamic, 50) #endif for (iHit = 0; iHit < nUnique; iHit++) { besthit_t *hit = &uniqueList[iHit]; if (hit->dist < 0.0) SetDistCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/hit); else SetCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/hit); } return(uniqueList); } /* Create a top hit list for the new node, either from children (if there are enough best hits left) or by a "refresh" Also set visible set for newnode Also update visible set for other nodes if we stumble across a "better" hit */ void TopHitJoin(int newnode, /*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/top_hits_t *tophits) { long startProfileOps = profileOps; long startOutProfileOps = outprofileOps; assert(NJ->child[newnode].nChild == 2); top_hits_list_t *lNew = &tophits->top_hits_lists[newnode]; assert(lNew->hits == NULL); /* Copy the hits */ int i; top_hits_list_t *lChild[2]; for (i = 0; i< 2; i++) { lChild[i] = &tophits->top_hits_lists[NJ->child[newnode].child[i]]; assert(lChild[i]->hits != NULL && lChild[i]->nHits > 0); } int nCombined = lChild[0]->nHits + lChild[1]->nHits; besthit_t *combinedList = (besthit_t*)mymalloc(sizeof(besthit_t)*nCombined); HitsToBestHits(lChild[0]->hits, lChild[0]->nHits, NJ->child[newnode].child[0], /*OUT*/combinedList); HitsToBestHits(lChild[1]->hits, lChild[1]->nHits, NJ->child[newnode].child[1], /*OUT*/combinedList + lChild[0]->nHits); int nUnique; /* UniqueBestHits() replaces children (used in the calls to HitsToBestHits) with active ancestors, so all distances & criteria will be recomputed */ besthit_t *uniqueList = UniqueBestHits(/*IN/UPDATE*/NJ, nActive, /*IN/SORT*/combinedList, nCombined, /*OUT*/&nUnique); int nUniqueAlloc = nCombined; combinedList = myfree(combinedList, sizeof(besthit_t)*nCombined); /* Forget the top-hit lists of the joined nodes */ for (i = 0; i < 2; i++) { lChild[i]->hits = myfree(lChild[i]->hits, sizeof(hit_t) * lChild[i]->nHits); lChild[i]->nHits = 0; } /* Use the average age, rounded up, by 1 Versions 2.0 and earlier used the maximum age, which leads to more refreshes without improving the accuracy of the NJ phase. Intuitively, if one of them was just refreshed then another refresh is unlikely to help. */ lNew->age = (lChild[0]->age+lChild[1]->age+1)/2 + 1; /* If top hit ages always match (perfectly balanced), then a limit of log2(m) would mean a refresh after m joins, which is about what we want. */ int tophitAgeLimit = MAX(1, (int)(0.5 + log((double)tophits->m)/log(2.0))); /* Either use the merged list as candidate top hits, or move from 2nd level to 1st level, or do a refresh UniqueBestHits eliminates hits to self, so if nUnique==nActive-1, we've already done the exhaustive search. Either way, we set tophits, visible(newnode), update visible of its top hits, and modify topvisible: if we do a refresh, then we reset it, otherwise we update */ bool bSecondLevel = lChild[0]->hitSource >= 0 && lChild[1]->hitSource >= 0; bool bUseUnique = nUnique==nActive-1 || (lNew->age <= tophitAgeLimit && nUnique >= (bSecondLevel ? (int)(0.5 + tophits2Refresh * tophits->q) : (int)(0.5 + tophits->m * tophitsRefresh) )); if (bUseUnique && verbose > 2) fprintf(stderr,"Top hits for %d from combined %d nActive=%d tophitsage %d %s\n", newnode,nUnique,nActive,lNew->age, bSecondLevel ? "2ndlevel" : "1stlevel"); if (!bUseUnique && bSecondLevel && lNew->age <= tophitAgeLimit) { int source = ActiveAncestor(NJ, lChild[0]->hitSource); if (source == newnode) source = ActiveAncestor(NJ, lChild[1]->hitSource); /* In parallel mode, it is possible that we would select a node as the hit-source and then over-write that top hit with a short list. So we need this sanity check. */ if (source != newnode && source >= 0 && tophits->top_hits_lists[source].hitSource < 0) { /* switch from 2nd-level to 1st-level top hits -- compute top hits list of node from what we have so far plus the active source plus its top hits */ top_hits_list_t *lSource = &tophits->top_hits_lists[source]; assert(lSource->hitSource < 0); assert(lSource->nHits > 0); int nMerge = 1 + lSource->nHits + nUnique; besthit_t *mergeList = mymalloc(sizeof(besthit_t) * nMerge); memcpy(/*to*/mergeList, /*from*/uniqueList, nUnique * sizeof(besthit_t)); int iMerge = nUnique; mergeList[iMerge].i = newnode; mergeList[iMerge].j = source; SetDistCriterion(NJ, nActive, /*IN/OUT*/&mergeList[iMerge]); iMerge++; HitsToBestHits(lSource->hits, lSource->nHits, newnode, /*OUT*/mergeList+iMerge); for (i = 0; i < lSource->nHits; i++) { SetDistCriterion(NJ, nActive, /*IN/OUT*/&mergeList[iMerge]); iMerge++; } assert(iMerge == nMerge); uniqueList = myfree(uniqueList, nUniqueAlloc * sizeof(besthit_t)); uniqueList = UniqueBestHits(/*IN/UPDATE*/NJ, nActive, /*IN/SORT*/mergeList, nMerge, /*OUT*/&nUnique); nUniqueAlloc = nMerge; mergeList = myfree(mergeList, sizeof(besthit_t)*nMerge); assert(nUnique > 0); bUseUnique = nUnique >= (int)(0.5 + tophits->m * tophitsRefresh); bSecondLevel = false; if (bUseUnique && verbose > 2) fprintf(stderr, "Top hits for %d from children and source %d's %d hits, nUnique %d\n", newnode, source, lSource->nHits, nUnique); } } if (bUseUnique) { if (bSecondLevel) { /* pick arbitrarily */ lNew->hitSource = lChild[0]->hitSource; } int nSave = MIN(nUnique, bSecondLevel ? tophits->q : tophits->m); assert(nSave>0); if (verbose > 2) fprintf(stderr, "Combined %d ops so far %ld\n", nUnique, profileOps - startProfileOps); SortSaveBestHits(newnode, /*IN/SORT*/uniqueList, /*nIn*/nUnique, /*nOut*/nSave, /*IN/OUT*/tophits); assert(lNew->hits != NULL); /* set by sort/save */ tophits->visible[newnode] = lNew->hits[0]; UpdateTopVisible(/*IN*/NJ, nActive, newnode, &tophits->visible[newnode], /*IN/OUT*/tophits); UpdateVisible(/*IN/UPDATE*/NJ, nActive, /*IN*/uniqueList, nSave, /*IN/OUT*/tophits); } else { /* need to refresh: set top hits for node and for its top hits */ if(verbose > 2) fprintf(stderr,"Top hits for %d by refresh (%d unique age %d) nActive=%d\n", newnode,nUnique,lNew->age,nActive); nRefreshTopHits++; lNew->age = 0; int iNode; /* ensure all out-distances are up to date ahead of time to avoid any data overwriting issues. */ #ifdef OPENMP #pragma omp parallel for schedule(dynamic, 50) #endif for (iNode = 0; iNode < NJ->maxnode; iNode++) { if (NJ->parent[iNode] < 0) { if (fastest) { besthit_t bh; bh.i = iNode; bh.j = iNode; bh.dist = 0; SetCriterion(/*IN/UPDATE*/NJ, nActive, &bh); } else { SetOutDistance(/*IN/UDPATE*/NJ, iNode, nActive); } } } /* exhaustively get the best 2*m hits for newnode, set visible, and save the top m */ besthit_t *allhits = (besthit_t*)mymalloc(sizeof(besthit_t)*NJ->maxnode); assert(2 * tophits->m <= NJ->maxnode); besthit_t bh; SetBestHit(newnode, NJ, nActive, /*OUT*/&bh, /*OUT*/allhits); qsort(/*IN/OUT*/allhits, NJ->maxnode, sizeof(besthit_t), CompareHitsByCriterion); SortSaveBestHits(newnode, /*IN/SORT*/allhits, /*nIn*/NJ->maxnode, /*nOut*/tophits->m, /*IN/OUT*/tophits); /* Do not need to call UpdateVisible because we set visible below */ /* And use the top 2*m entries to expand other best-hit lists, but only for top m */ int iHit; #ifdef OPENMP #pragma omp parallel for schedule(dynamic, 50) #endif for (iHit=0; iHit < tophits->m; iHit++) { if (allhits[iHit].i < 0) continue; int iNode = allhits[iHit].j; assert(iNode>=0); if (NJ->parent[iNode] >= 0) continue; top_hits_list_t *l = &tophits->top_hits_lists[iNode]; int nHitsOld = l->nHits; assert(nHitsOld <= tophits->m); l->age = 0; /* Merge: old hits into 0->nHitsOld and hits from iNode above that */ besthit_t *bothList = (besthit_t*)mymalloc(sizeof(besthit_t) * 3 * tophits->m); HitsToBestHits(/*IN*/l->hits, nHitsOld, iNode, /*OUT*/bothList); /* does not compute criterion */ for (i = 0; i < nHitsOld; i++) SetCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/&bothList[i]); if (nActive <= 2 * tophits->m) l->hitSource = -1; /* abandon the 2nd-level top-hits heuristic */ int nNewHits = l->hitSource >= 0 ? tophits->q : tophits->m; assert(nNewHits > 0); TransferBestHits(/*IN/UPDATE*/NJ, nActive, iNode, /*IN*/allhits, /*nOldHits*/2 * nNewHits, /*OUT*/&bothList[nHitsOld], /*updateDist*/false); /* rely on UniqueBestHits to update dist and/or criterion */ int nUnique2; besthit_t *uniqueList2 = UniqueBestHits(/*IN/UPDATE*/NJ, nActive, /*IN/SORT*/bothList, nHitsOld + 2 * nNewHits, /*OUT*/&nUnique2); assert(nUnique2 > 0); bothList = myfree(bothList,3 * tophits->m * sizeof(besthit_t)); /* Note this will overwrite l, but we saved nHitsOld */ SortSaveBestHits(iNode, /*IN/SORT*/uniqueList2, /*nIn*/nUnique2, /*nOut*/nNewHits, /*IN/OUT*/tophits); /* will update topvisible below */ tophits->visible[iNode] = tophits->top_hits_lists[iNode].hits[0]; uniqueList2 = myfree(uniqueList2, (nHitsOld + 2 * tophits->m) * sizeof(besthit_t)); } ResetTopVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits); /* outside of the parallel phase */ allhits = myfree(allhits,sizeof(besthit_t)*NJ->maxnode); } uniqueList = myfree(uniqueList, nUniqueAlloc * sizeof(besthit_t)); if (verbose > 2) { fprintf(stderr, "New top-hit list for %d profile-ops %ld (out-ops %ld): source %d age %d members ", newnode, profileOps - startProfileOps, outprofileOps - startOutProfileOps, lNew->hitSource, lNew->age); int i; for (i = 0; i < lNew->nHits; i++) fprintf(stderr, " %d", lNew->hits[i].j); fprintf(stderr,"\n"); } } void UpdateVisible(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN*/besthit_t *tophitsNode, int nTopHits, /*IN/OUT*/top_hits_t *tophits) { int iHit; for(iHit = 0; iHit < nTopHits; iHit++) { besthit_t *hit = &tophitsNode[iHit]; if (hit->i < 0) continue; /* possible empty entries */ assert(NJ->parent[hit->i] < 0); assert(hit->j >= 0 && NJ->parent[hit->j] < 0); besthit_t visible; bool bSuccess = GetVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits, hit->j, /*OUT*/&visible); if (!bSuccess || hit->criterion < visible.criterion) { if (bSuccess) nVisibleUpdate++; hit_t *v = &tophits->visible[hit->j]; v->j = hit->i; v->dist = hit->dist; UpdateTopVisible(NJ, nActive, hit->j, v, /*IN/OUT*/tophits); if(verbose>5) fprintf(stderr,"NewVisible %d %d %f\n", hit->j,v->j,v->dist); } } /* end loop over hits */ } /* Update the top-visible list to perhaps include visible[iNode] */ void UpdateTopVisible(/*IN*/NJ_t * NJ, int nActive, int iIn, /*IN*/hit_t *hit, /*IN/OUT*/top_hits_t *tophits) { assert(tophits != NULL); bool bIn = false; /* placed in the list */ int i; /* First, if the list is not full, put it in somewhere */ for (i = 0; i < tophits->nTopVisible && !bIn; i++) { int iNode = tophits->topvisible[i]; if (iNode == iIn) { /* this node is already in the top hit list */ bIn = true; } else if (iNode < 0 || NJ->parent[iNode] >= 0) { /* found an empty spot */ bIn = true; tophits->topvisible[i] = iIn; } } int iPosWorst = -1; double dCriterionWorst = -1e20; if (!bIn) { /* Search for the worst hit */ for (i = 0; i < tophits->nTopVisible && !bIn; i++) { int iNode = tophits->topvisible[i]; assert(iNode >= 0 && NJ->parent[iNode] < 0 && iNode != iIn); besthit_t visible; if (!GetVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits, iNode, /*OUT*/&visible)) { /* found an empty spot */ tophits->topvisible[i] = iIn; bIn = true; } else if (visible.i == hit->j && visible.j == iIn) { /* the reverse hit is already in the top hit list */ bIn = true; } else if (visible.criterion >= dCriterionWorst) { iPosWorst = i; dCriterionWorst = visible.criterion; } } } if (!bIn && iPosWorst >= 0) { besthit_t visible = HitToBestHit(iIn, *hit); SetCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/&visible); if (visible.criterion < dCriterionWorst) { if (verbose > 2) { int iOld = tophits->topvisible[iPosWorst]; fprintf(stderr, "TopVisible replace %d=>%d with %d=>%d\n", iOld, tophits->visible[iOld].j, visible.i, visible.j); } tophits->topvisible[iPosWorst] = iIn; } } if (verbose > 2) { fprintf(stderr, "Updated TopVisible: "); for (i = 0; i < tophits->nTopVisible; i++) { int iNode = tophits->topvisible[i]; if (iNode >= 0 && NJ->parent[iNode] < 0) { besthit_t bh = HitToBestHit(iNode, tophits->visible[iNode]); SetDistCriterion(NJ, nActive, &bh); fprintf(stderr, " %d=>%d:%.4f", bh.i, bh.j, bh.criterion); } } fprintf(stderr,"\n"); } } /* Recompute the topvisible list */ void ResetTopVisible(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/top_hits_t *tophits) { besthit_t *visibleSorted = mymalloc(sizeof(besthit_t)*nActive); int nVisible = 0; /* #entries in visibleSorted */ int iNode; for (iNode = 0; iNode < NJ->maxnode; iNode++) { /* skip joins involving stale nodes */ if (NJ->parent[iNode] >= 0) continue; besthit_t v; if (GetVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits, iNode, /*OUT*/&v)) { assert(nVisible < nActive); visibleSorted[nVisible++] = v; } } assert(nVisible > 0); qsort(/*IN/OUT*/visibleSorted,nVisible,sizeof(besthit_t),CompareHitsByCriterion); /* Only keep the top m items, and try to avoid duplicating i->j with j->i Note that visible(i) -> j does not necessarily imply visible(j) -> i, so we store what the pairing was (or -1 for not used yet) */ int *inTopVisible = malloc(sizeof(int) * NJ->maxnodes); int i; for (i = 0; i < NJ->maxnodes; i++) inTopVisible[i] = -1; if (verbose > 2) fprintf(stderr, "top-hit search: nActive %d nVisible %d considering up to %d items\n", nActive, nVisible, tophits->m); /* save the sorted indices in topvisible */ int iSave = 0; for (i = 0; i < nVisible && iSave < tophits->nTopVisible; i++) { besthit_t *v = &visibleSorted[i]; if (inTopVisible[v->i] != v->j) { /* not seen already */ tophits->topvisible[iSave++] = v->i; inTopVisible[v->i] = v->j; inTopVisible[v->j] = v->i; } } while(iSave < tophits->nTopVisible) tophits->topvisible[iSave++] = -1; myfree(visibleSorted, sizeof(besthit_t)*nActive); myfree(inTopVisible, sizeof(int) * NJ->maxnodes); tophits->topvisibleAge = 0; if (verbose > 2) { fprintf(stderr, "Reset TopVisible: "); for (i = 0; i < tophits->nTopVisible; i++) { int iNode = tophits->topvisible[i]; if (iNode < 0) break; fprintf(stderr, " %d=>%d", iNode, tophits->visible[iNode].j); } fprintf(stderr,"\n"); } } /* Find best hit to do in O(N*log(N) + m*L*log(N)) time, by copying and sorting the visible list updating out-distances for the top (up to m) candidates selecting the best hit if !fastest then local hill-climbing for a better join, using best-hit lists only, and updating all out-distances in every best-hit list */ void TopHitNJSearch(/*IN/UPDATE*/NJ_t *NJ, int nActive, /*IN/OUT*/top_hits_t *tophits, /*OUT*/besthit_t *join) { /* first, do we have at least m/2 candidates in topvisible? And remember the best one */ int nCandidate = 0; int iNodeBestCandidate = -1; double dBestCriterion = 1e20; int i; for (i = 0; i < tophits->nTopVisible; i++) { int iNode = tophits->topvisible[i]; besthit_t visible; if (GetVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits, iNode, /*OUT*/&visible)) { nCandidate++; if (iNodeBestCandidate < 0 || visible.criterion < dBestCriterion) { iNodeBestCandidate = iNode; dBestCriterion = visible.criterion; } } } tophits->topvisibleAge++; /* Note we may have only nActive/2 joins b/c we try to store them once */ if (2 * tophits->topvisibleAge > tophits->m || (3*nCandidate < tophits->nTopVisible && 3*nCandidate < nActive)) { /* recompute top visible */ if (verbose > 2) fprintf(stderr, "Resetting the top-visible list at nActive=%d\n",nActive); /* If age is low, then our visible set is becoming too sparse, because we have recently recomputed the top visible subset. This is very rare but can happen with -fastest. A quick-and-dirty solution is to walk up the parents to get additional entries in top hit lists. To ensure that the visible set becomes full, pick an arbitrary node if walking up terminates at self. */ if (tophits->topvisibleAge <= 2) { if (verbose > 2) fprintf(stderr, "Expanding visible set by walking up to active nodes at nActive=%d\n", nActive); int iNode; for (iNode = 0; iNode < NJ->maxnode; iNode++) { if (NJ->parent[iNode] >= 0) continue; hit_t *v = &tophits->visible[iNode]; int newj = ActiveAncestor(NJ, v->j); if (newj >= 0 && newj != v->j) { if (newj == iNode) { /* pick arbitrarily */ newj = 0; while (NJ->parent[newj] >= 0 || newj == iNode) newj++; } assert(newj >= 0 && newj < NJ->maxnodes && newj != iNode && NJ->parent[newj] < 0); /* Set v to point to newj */ besthit_t bh = { iNode, newj, -1e20, -1e20, -1e20 }; SetDistCriterion(NJ, nActive, /*IN/OUT*/&bh); v->j = newj; v->dist = bh.dist; } } } ResetTopVisible(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/tophits); /* and recurse to try again */ TopHitNJSearch(NJ, nActive, tophits, join); return; } if (verbose > 2) fprintf(stderr, "Top-visible list size %d (nActive %d m %d)\n", nCandidate, nActive, tophits->m); assert(iNodeBestCandidate >= 0 && NJ->parent[iNodeBestCandidate] < 0); bool bSuccess = GetVisible(NJ, nActive, tophits, iNodeBestCandidate, /*OUT*/join); assert(bSuccess); assert(join->i >= 0 && NJ->parent[join->i] < 0); assert(join->j >= 0 && NJ->parent[join->j] < 0); if(fastest) return; int changed; do { changed = 0; besthit_t bestI; GetBestFromTopHits(join->i, NJ, nActive, tophits, /*OUT*/&bestI); assert(bestI.i == join->i); if (bestI.j != join->j && bestI.criterion < join->criterion) { changed = 1; if (verbose>2) fprintf(stderr,"BetterI\t%d\t%d\t%d\t%d\t%f\t%f\n", join->i,join->j,bestI.i,bestI.j, join->criterion,bestI.criterion); *join = bestI; } besthit_t bestJ; GetBestFromTopHits(join->j, NJ, nActive, tophits, /*OUT*/&bestJ); assert(bestJ.i == join->j); if (bestJ.j != join->i && bestJ.criterion < join->criterion) { changed = 1; if (verbose>2) fprintf(stderr,"BetterJ\t%d\t%d\t%d\t%d\t%f\t%f\n", join->i,join->j,bestJ.i,bestJ.j, join->criterion,bestJ.criterion); *join = bestJ; } if(changed) nHillBetter++; } while(changed); } int NGaps(/*IN*/NJ_t *NJ, int iNode) { assert(iNode < NJ->nSeq); int nGaps = 0; int p; for(p=0; p<NJ->nPos; p++) { if (NJ->profiles[iNode]->codes[p] == NOCODE) nGaps++; } return(nGaps); } int CompareHitsByCriterion(const void *c1, const void *c2) { const besthit_t *hit1 = (besthit_t*)c1; const besthit_t *hit2 = (besthit_t*)c2; if (hit1->criterion < hit2->criterion) return(-1); if (hit1->criterion > hit2->criterion) return(1); return(0); } int CompareHitsByIJ(const void *c1, const void *c2) { const besthit_t *hit1 = (besthit_t*)c1; const besthit_t *hit2 = (besthit_t*)c2; return hit1->i != hit2->i ? hit1->i - hit2->i : hit1->j - hit2->j; } void SortSaveBestHits(int iNode, /*IN/SORT*/besthit_t *besthits, int nIn, int nOut, /*IN/OUT*/top_hits_t *tophits) { assert(nIn > 0); assert(nOut > 0); top_hits_list_t *l = &tophits->top_hits_lists[iNode]; /* */ qsort(/*IN/OUT*/besthits,nIn,sizeof(besthit_t),CompareHitsByCriterion); /* First count how many we will save Not sure if removing duplicates is actually necessary. */ int nSave = 0; int jLast = -1; int iBest; for (iBest = 0; iBest < nIn && nSave < nOut; iBest++) { if (besthits[iBest].i < 0) continue; assert(besthits[iBest].i == iNode); int j = besthits[iBest].j; if (j != iNode && j != jLast && j >= 0) { nSave++; jLast = j; } } assert(nSave > 0); #ifdef OPENMP omp_set_lock(&tophits->locks[iNode]); #endif if (l->hits != NULL) { l->hits = myfree(l->hits, l->nHits * sizeof(hit_t)); l->nHits = 0; } l->hits = mymalloc(sizeof(hit_t) * nSave); l->nHits = nSave; int iSave = 0; jLast = -1; for (iBest = 0; iBest < nIn && iSave < nSave; iBest++) { int j = besthits[iBest].j; if (j != iNode && j != jLast && j >= 0) { l->hits[iSave].j = j; l->hits[iSave].dist = besthits[iBest].dist; iSave++; jLast = j; } } #ifdef OPENMP omp_unset_lock(&tophits->locks[iNode]); #endif assert(iSave == nSave); } void TransferBestHits(/*IN/UPDATE*/NJ_t *NJ, int nActive, int iNode, /*IN*/besthit_t *oldhits, int nOldHits, /*OUT*/besthit_t *newhits, bool updateDistances) { assert(iNode >= 0); assert(NJ->parent[iNode] < 0); int iBest; for(iBest = 0; iBest < nOldHits; iBest++) { besthit_t *old = &oldhits[iBest]; besthit_t *new = &newhits[iBest]; new->i = iNode; new->j = ActiveAncestor(/*IN*/NJ, old->j); new->dist = old->dist; /* may get reset below */ new->weight = old->weight; new->criterion = old->criterion; if(new->j < 0 || new->j == iNode) { new->weight = 0; new->dist = -1e20; new->criterion = 1e20; } else if (new->i != old->i || new->j != old->j) { if (updateDistances) SetDistCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/new); else { new->dist = -1e20; new->criterion = 1e20; } } else { if (updateDistances) SetCriterion(/*IN/UPDATE*/NJ, nActive, /*IN/OUT*/new); else new->criterion = 1e20; /* leave dist alone */ } } } void HitsToBestHits(/*IN*/hit_t *hits, int nHits, int iNode, /*OUT*/besthit_t *newhits) { int i; for (i = 0; i < nHits; i++) { hit_t *hit = &hits[i]; besthit_t *bh = &newhits[i]; bh->i = iNode; bh->j = hit->j; bh->dist = hit->dist; bh->criterion = 1e20; bh->weight = -1; /* not the true value -- we compute these directly when needed */ } } besthit_t HitToBestHit(int i, hit_t hit) { besthit_t bh; bh.i = i; bh.j = hit.j; bh.dist = hit.dist; bh.criterion = 1e20; bh.weight = -1; return(bh); } char *OpenMPString(void) { #ifdef OPENMP static char buf[100]; sprintf(buf, ", OpenMP (%d threads)", omp_get_max_threads()); return(buf); #else return(""); #endif } /* Algorithm 26.2.17 from Abromowitz and Stegun, Handbook of Mathematical Functions Absolute accuracy of only about 1e-7, which is enough for us */ double pnorm(double x) { double b1 = 0.319381530; double b2 = -0.356563782; double b3 = 1.781477937; double b4 = -1.821255978; double b5 = 1.330274429; double p = 0.2316419; double c = 0.39894228; if(x >= 0.0) { double t = 1.0 / ( 1.0 + p * x ); return (1.0 - c * exp( -x * x / 2.0 ) * t * ( t *( t * ( t * ( t * b5 + b4 ) + b3 ) + b2 ) + b1 )); } /*else*/ double t = 1.0 / ( 1.0 - p * x ); return ( c * exp( -x * x / 2.0 ) * t * ( t *( t * ( t * ( t * b5 + b4 ) + b3 ) + b2 ) + b1 )); } void *mymalloc(size_t sz) { if (sz == 0) return(NULL); void *new = malloc(sz); if (new == NULL) { fprintf(stderr, "Out of memory\n"); exit(1); } szAllAlloc += sz; mymallocUsed += sz; #ifdef TRACK_MEMORY struct mallinfo mi = mallinfo(); if (mi.arena+mi.hblkhd > maxmallocHeap) maxmallocHeap = mi.arena+mi.hblkhd; #endif /* gcc malloc should always return 16-byte-aligned values... */ assert(IS_ALIGNED(new)); return (new); } void *mymemdup(void *data, size_t sz) { if(data==NULL) return(NULL); void *new = mymalloc(sz); memcpy(/*to*/new, /*from*/data, sz); return(new); } void *myrealloc(void *data, size_t szOld, size_t szNew, bool bCopy) { if (data == NULL && szOld == 0) return(mymalloc(szNew)); if (data == NULL || szOld == 0 || szNew == 0) { fprintf(stderr,"Empty myrealloc\n"); exit(1); } if (szOld == szNew) return(data); void *new = NULL; if (bCopy) { /* Try to reduce memory fragmentation by allocating anew and copying Seems to help in practice */ new = mymemdup(data, szNew); myfree(data, szOld); } else { new = realloc(data,szNew); if (new == NULL) { fprintf(stderr, "Out of memory\n"); exit(1); } assert(IS_ALIGNED(new)); szAllAlloc += (szNew-szOld); mymallocUsed += (szNew-szOld); #ifdef TRACK_MEMORY struct mallinfo mi = mallinfo(); if (mi.arena+mi.hblkhd > maxmallocHeap) maxmallocHeap = mi.arena+mi.hblkhd; #endif } return(new); } void *myfree(void *p, size_t sz) { if(p==NULL) return(NULL); free(p); mymallocUsed -= sz; return(NULL); } /******************************************************************************/ /* Minimization of a 1-dimensional function by Brent's method (Numerical Recipes) * Borrowed from Tree-Puzzle 5.1 util.c under GPL * Modified by M.N.P to pass in the accessory data for the optimization function, * to use 2x bounds around the starting guess and expand them if necessary, * and to use both a fractional and an absolute tolerance */ #define ITMAX 100 #define CGOLD 0.3819660 #define TINY 1.0e-20 #define ZEPS 1.0e-10 #define SHFT(a,b,c,d) (a)=(b);(b)=(c);(c)=(d); #define SIGN(a,b) ((b) >= 0.0 ? fabs(a) : -fabs(a)) /* Brents method in one dimension */ double brent(double ax, double bx, double cx, double (*f)(double, void *), void *data, double ftol, double atol, double *foptx, double *f2optx, double fax, double fbx, double fcx) { int iter; double a,b,d=0,etemp,fu,fv,fw,fx,p,q,r,tol1,tol2,u,v,w,x,xm; double xw,wv,vx; double e=0.0; a=(ax < cx ? ax : cx); b=(ax > cx ? ax : cx); x=bx; fx=fbx; if (fax < fcx) { w=ax; fw=fax; v=cx; fv=fcx; } else { w=cx; fw=fcx; v=ax; fv=fax; } for (iter=1;iter<=ITMAX;iter++) { xm=0.5*(a+b); tol1=ftol*fabs(x); tol2=2.0*(tol1+ZEPS); if (fabs(x-xm) <= (tol2-0.5*(b-a)) || fabs(a-b) < atol) { *foptx = fx; xw = x-w; wv = w-v; vx = v-x; *f2optx = 2.0*(fv*xw + fx*wv + fw*vx)/ (v*v*xw + x*x*wv + w*w*vx); return x; } if (fabs(e) > tol1) { r=(x-w)*(fx-fv); q=(x-v)*(fx-fw); p=(x-v)*q-(x-w)*r; q=2.0*(q-r); if (q > 0.0) p = -p; q=fabs(q); etemp=e; e=d; if (fabs(p) >= fabs(0.5*q*etemp) || p <= q*(a-x) || p >= q*(b-x)) d=CGOLD*(e=(x >= xm ? a-x : b-x)); else { d=p/q; u=x+d; if (u-a < tol2 || b-u < tol2) d=SIGN(tol1,xm-x); } } else { d=CGOLD*(e=(x >= xm ? a-x : b-x)); } u=(fabs(d) >= tol1 ? x+d : x+SIGN(tol1,d)); fu=(*f)(u,data); if (fu <= fx) { if (u >= x) a=x; else b=x; SHFT(v,w,x,u) SHFT(fv,fw,fx,fu) } else { if (u < x) a=u; else b=u; if (fu <= fw || w == x) { v=w; w=u; fv=fw; fw=fu; } else if (fu <= fv || v == x || v == w) { v=u; fv=fu; } } } *foptx = fx; xw = x-w; wv = w-v; vx = v-x; *f2optx = 2.0*(fv*xw + fx*wv + fw*vx)/ (v*v*xw + x*x*wv + w*w*vx); return x; } /* brent */ #undef ITMAX #undef CGOLD #undef ZEPS #undef SHFT #undef SIGN /* one-dimensional minimization - as input a lower and an upper limit and a trial value for the minimum is needed: xmin < xguess < xmax the function and a fractional tolerance has to be specified onedimenmin returns the optimal x value and the value of the function and its second derivative at this point */ double onedimenmin(double xmin, double xguess, double xmax, double (*f)(double,void*), void *data, double ftol, double atol, /*OUT*/double *fx, /*OUT*/double *f2x) { double optx, ax, bx, cx, fa, fb, fc; /* first attempt to bracketize minimum */ if (xguess == xmin) { ax = xmin; bx = 2.0*xguess; cx = 10.0*xguess; } else if (xguess <= 2.0 * xmin) { ax = xmin; bx = xguess; cx = 5.0*xguess; } else { ax = 0.5*xguess; bx = xguess; cx = 2.0*xguess; } if (cx > xmax) cx = xmax; if (bx >= cx) bx = 0.5*(ax+cx); if (verbose > 4) fprintf(stderr, "onedimenmin lo %.4f guess %.4f hi %.4f range %.4f %.4f\n", ax, bx, cx, xmin, xmax); /* ideally this range includes the true minimum, i.e., fb < fa and fb < fc if not, we gradually expand the boundaries until it does, or we near the boundary of the allowed range and use that */ fa = (*f)(ax,data); fb = (*f)(bx,data); fc = (*f)(cx,data); while(fa < fb && ax > xmin) { ax = (ax+xmin)/2.0; if (ax < 2.0*xmin) /* give up on shrinking the region */ ax = xmin; fa = (*f)(ax,data); } while(fc < fb && cx < xmax) { cx = (cx+xmax)/2.0; if (cx > xmax * 0.95) cx = xmax; fc = (*f)(cx,data); } optx = brent(ax, bx, cx, f, data, ftol, atol, fx, f2x, fa, fb, fc); if (verbose > 4) fprintf(stderr, "onedimenmin reaches optimum f(%.4f) = %.4f f2x %.4f\n", optx, *fx, *f2x); return optx; /* return optimal x */ } /* onedimenmin */ /* Numerical code for the gamma distribution is modified from the PhyML 3 code (GNU public license) of Stephane Guindon */ double LnGamma (double alpha) { /* returns ln(gamma(alpha)) for alpha>0, accurate to 10 decimal places. Stirling's formula is used for the central polynomial part of the procedure. Pike MC & Hill ID (1966) Algorithm 291: Logarithm of the gamma function. Communications of the Association for Computing Machinery, 9:684 */ double x=alpha, f=0, z; if (x<7) { f=1; z=x-1; while (++z<7) f*=z; x=z; f=-(double)log(f); } z = 1/(x*x); return f + (x-0.5)*(double)log(x) - x + .918938533204673 + (((-.000595238095238*z+.000793650793651)*z-.002777777777778)*z +.083333333333333)/x; } double IncompleteGamma(double x, double alpha, double ln_gamma_alpha) { /* returns the incomplete gamma ratio I(x,alpha) where x is the upper limit of the integration and alpha is the shape parameter. returns (-1) if in error ln_gamma_alpha = ln(Gamma(alpha)), is almost redundant. (1) series expansion if (alpha>x || x<=1) (2) continued fraction otherwise RATNEST FORTRAN by Bhattacharjee GP (1970) The incomplete gamma integral. Applied Statistics, 19: 285-287 (AS32) */ int i; double p=alpha, g=ln_gamma_alpha; double accurate=1e-8, overflow=1e30; double factor, gin=0, rn=0, a=0,b=0,an=0,dif=0, term=0, pn[6]; if (x==0) return (0); if (x<0 || p<=0) return (-1); factor=(double)exp(p*(double)log(x)-x-g); if (x>1 && x>=p) goto l30; /* (1) series expansion */ gin=1; term=1; rn=p; l20: rn++; term*=x/rn; gin+=term; if (term > accurate) goto l20; gin*=factor/p; goto l50; l30: /* (2) continued fraction */ a=1-p; b=a+x+1; term=0; pn[0]=1; pn[1]=x; pn[2]=x+1; pn[3]=x*b; gin=pn[2]/pn[3]; l32: a++; b+=2; term++; an=a*term; for (i=0; i<2; i++) pn[i+4]=b*pn[i+2]-an*pn[i]; if (pn[5] == 0) goto l35; rn=pn[4]/pn[5]; dif=fabs(gin-rn); if (dif>accurate) goto l34; if (dif<=accurate*rn) goto l42; l34: gin=rn; l35: for (i=0; i<4; i++) pn[i]=pn[i+2]; if (fabs(pn[4]) < overflow) goto l32; for (i=0; i<4; i++) pn[i]/=overflow; goto l32; l42: gin=1-factor*gin; l50: return (gin); } double PGamma(double x, double alpha) { /* scale = 1/alpha */ return IncompleteGamma(x*alpha,alpha,LnGamma(alpha)); } /* helper function to subtract timval structures */ /* Subtract the `struct timeval' values X and Y, storing the result in RESULT. Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract (struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } double clockDiff(/*IN*/struct timeval *clock_start) { struct timeval time_now, elapsed; gettimeofday(/*OUT*/&time_now,NULL); timeval_subtract(/*OUT*/&elapsed,/*IN*/&time_now,/*IN*/clock_start); return(elapsed.tv_sec + elapsed.tv_usec*1e-6); } /* The random number generator is taken from D E Knuth http://www-cs-faculty.stanford.edu/~knuth/taocp.html */ /* This program by D E Knuth is in the public domain and freely copyable. * It is explained in Seminumerical Algorithms, 3rd edition, Section 3.6 * (or in the errata to the 2nd edition --- see * http://www-cs-faculty.stanford.edu/~knuth/taocp.html * in the changes to Volume 2 on pages 171 and following). */ /* N.B. The MODIFICATIONS introduced in the 9th printing (2002) are included here; there's no backwards compatibility with the original. */ /* This version also adopts Brendan McKay's suggestion to accommodate naive users who forget to call ran_start(seed). */ /* If you find any bugs, please report them immediately to * [email protected] * (and you will be rewarded if the bug is genuine). Thanks! */ /************ see the book for explanations and caveats! *******************/ /************ in particular, you need two's complement arithmetic **********/ #define KK 100 /* the long lag */ #define LL 37 /* the short lag */ #define MM (1L<<30) /* the modulus */ #define mod_diff(x,y) (((x)-(y))&(MM-1)) /* subtraction mod MM */ long ran_x[KK]; /* the generator state */ #ifdef __STDC__ void ran_array(long aa[],int n) #else void ran_array(aa,n) /* put n new random numbers in aa */ long *aa; /* destination */ int n; /* array length (must be at least KK) */ #endif { register int i,j; for (j=0;j<KK;j++) aa[j]=ran_x[j]; for (;j<n;j++) aa[j]=mod_diff(aa[j-KK],aa[j-LL]); for (i=0;i<LL;i++,j++) ran_x[i]=mod_diff(aa[j-KK],aa[j-LL]); for (;i<KK;i++,j++) ran_x[i]=mod_diff(aa[j-KK],ran_x[i-LL]); } /* the following routines are from exercise 3.6--15 */ /* after calling ran_start, get new randoms by, e.g., "x=ran_arr_next()" */ #define QUALITY 1009 /* recommended quality level for high-res use */ long ran_arr_buf[QUALITY]; long ran_arr_dummy=-1, ran_arr_started=-1; long *ran_arr_ptr=&ran_arr_dummy; /* the next random number, or -1 */ #define TT 70 /* guaranteed separation between streams */ #define is_odd(x) ((x)&1) /* units bit of x */ #ifdef __STDC__ void ran_start(long seed) #else void ran_start(seed) /* do this before using ran_array */ long seed; /* selector for different streams */ #endif { register int t,j; long x[KK+KK-1]; /* the preparation buffer */ register long ss=(seed+2)&(MM-2); for (j=0;j<KK;j++) { x[j]=ss; /* bootstrap the buffer */ ss<<=1; if (ss>=MM) ss-=MM-2; /* cyclic shift 29 bits */ } x[1]++; /* make x[1] (and only x[1]) odd */ for (ss=seed&(MM-1),t=TT-1; t; ) { for (j=KK-1;j>0;j--) x[j+j]=x[j], x[j+j-1]=0; /* "square" */ for (j=KK+KK-2;j>=KK;j--) x[j-(KK-LL)]=mod_diff(x[j-(KK-LL)],x[j]), x[j-KK]=mod_diff(x[j-KK],x[j]); if (is_odd(ss)) { /* "multiply by z" */ for (j=KK;j>0;j--) x[j]=x[j-1]; x[0]=x[KK]; /* shift the buffer cyclically */ x[LL]=mod_diff(x[LL],x[KK]); } if (ss) ss>>=1; else t--; } for (j=0;j<LL;j++) ran_x[j+KK-LL]=x[j]; for (;j<KK;j++) ran_x[j-LL]=x[j]; for (j=0;j<10;j++) ran_array(x,KK+KK-1); /* warm things up */ ran_arr_ptr=&ran_arr_started; } #define ran_arr_next() (*ran_arr_ptr>=0? *ran_arr_ptr++: ran_arr_cycle()) long ran_arr_cycle() { if (ran_arr_ptr==&ran_arr_dummy) ran_start(314159L); /* the user forgot to initialize */ ran_array(ran_arr_buf,QUALITY); ran_arr_buf[KK]=-1; ran_arr_ptr=ran_arr_buf+1; return ran_arr_buf[0]; } /* end of code from Knuth */ double knuth_rand() { return(9.31322574615479e-10 * ran_arr_next()); /* multiply by 2**-30 */ } hashstrings_t *MakeHashtable(char **strings, int nStrings) { hashstrings_t *hash = (hashstrings_t*)mymalloc(sizeof(hashstrings_t)); hash->nBuckets = 8*nStrings; hash->buckets = (hashbucket_t*)mymalloc(sizeof(hashbucket_t) * hash->nBuckets); int i; for (i=0; i < hash->nBuckets; i++) { hash->buckets[i].string = NULL; hash->buckets[i].nCount = 0; hash->buckets[i].first = -1; } for (i=0; i < nStrings; i++) { hashiterator_t hi = FindMatch(hash, strings[i]); if (hash->buckets[hi].string == NULL) { /* save a unique entry */ assert(hash->buckets[hi].nCount == 0); hash->buckets[hi].string = strings[i]; hash->buckets[hi].nCount = 1; hash->buckets[hi].first = i; } else { /* record a duplicate entry */ assert(hash->buckets[hi].string != NULL); assert(strcmp(hash->buckets[hi].string, strings[i]) == 0); assert(hash->buckets[hi].first >= 0); hash->buckets[hi].nCount++; } } return(hash); } hashstrings_t *FreeHashtable(hashstrings_t* hash) { if (hash != NULL) { myfree(hash->buckets, sizeof(hashbucket_t) * hash->nBuckets); myfree(hash, sizeof(hashstrings_t)); } return(NULL); } #define MAXADLER 65521 hashiterator_t FindMatch(hashstrings_t *hash, char *string) { /* Adler-32 checksum */ unsigned int hashA = 1; unsigned int hashB = 0; char *p; for (p = string; *p != '\0'; p++) { hashA = ((unsigned int)*p + hashA); hashB = hashA+hashB; } hashA %= MAXADLER; hashB %= MAXADLER; hashiterator_t hi = (hashB*65536+hashA) % hash->nBuckets; while(hash->buckets[hi].string != NULL && strcmp(hash->buckets[hi].string, string) != 0) { hi++; if (hi >= hash->nBuckets) hi = 0; } return(hi); } char *GetHashString(hashstrings_t *hash, hashiterator_t hi) { return(hash->buckets[hi].string); } int HashCount(hashstrings_t *hash, hashiterator_t hi) { return(hash->buckets[hi].nCount); } int HashFirst(hashstrings_t *hash, hashiterator_t hi) { return(hash->buckets[hi].first); } uniquify_t *UniquifyAln(alignment_t *aln) { int nUniqueSeq = 0; char **uniqueSeq = (char**)mymalloc(aln->nSeq * sizeof(char*)); /* iUnique -> seq */ int *uniqueFirst = (int*)mymalloc(aln->nSeq * sizeof(int)); /* iUnique -> iFirst in aln */ int *alnNext = (int*)mymalloc(aln->nSeq * sizeof(int)); /* i in aln -> next, or -1 */ int *alnToUniq = (int*)mymalloc(aln->nSeq * sizeof(int)); /* i in aln -> iUnique; many -> -1 */ int i; for (i = 0; i < aln->nSeq; i++) { uniqueSeq[i] = NULL; uniqueFirst[i] = -1; alnNext[i] = -1; alnToUniq[i] = -1; } hashstrings_t *hashseqs = MakeHashtable(aln->seqs, aln->nSeq); for (i=0; i<aln->nSeq; i++) { hashiterator_t hi = FindMatch(hashseqs,aln->seqs[i]); int first = HashFirst(hashseqs,hi); if (first == i) { uniqueSeq[nUniqueSeq] = aln->seqs[i]; uniqueFirst[nUniqueSeq] = i; alnToUniq[i] = nUniqueSeq; nUniqueSeq++; } else { int last = first; while (alnNext[last] != -1) last = alnNext[last]; assert(last>=0); alnNext[last] = i; assert(alnToUniq[last] >= 0 && alnToUniq[last] < nUniqueSeq); alnToUniq[i] = alnToUniq[last]; } } assert(nUniqueSeq>0); hashseqs = FreeHashtable(hashseqs); uniquify_t *uniquify = (uniquify_t*)mymalloc(sizeof(uniquify_t)); uniquify->nSeq = aln->nSeq; uniquify->nUnique = nUniqueSeq; uniquify->uniqueFirst = uniqueFirst; uniquify->alnNext = alnNext; uniquify->alnToUniq = alnToUniq; uniquify->uniqueSeq = uniqueSeq; return(uniquify); } uniquify_t *FreeUniquify(uniquify_t *unique) { if (unique != NULL) { myfree(unique->uniqueFirst, sizeof(int)*unique->nSeq); myfree(unique->alnNext, sizeof(int)*unique->nSeq); myfree(unique->alnToUniq, sizeof(int)*unique->nSeq); myfree(unique->uniqueSeq, sizeof(char*)*unique->nSeq); myfree(unique,sizeof(uniquify_t)); unique = NULL; } return(unique); } traversal_t InitTraversal(NJ_t *NJ) { traversal_t worked = (bool*)mymalloc(sizeof(bool)*NJ->maxnodes); int i; for (i=0; i<NJ->maxnodes; i++) worked[i] = false; return(worked); } void SkipTraversalInto(int node, /*IN/OUT*/traversal_t traversal) { traversal[node] = true; } int TraversePostorder(int node, NJ_t *NJ, /*IN/OUT*/traversal_t traversal, /*OPTIONAL OUT*/bool *pUp) { if (pUp) *pUp = false; while(1) { assert(node >= 0); /* move to a child if possible */ bool found = false; int iChild; for (iChild=0; iChild < NJ->child[node].nChild; iChild++) { int child = NJ->child[node].child[iChild]; if (!traversal[child]) { node = child; found = true; break; } } if (found) continue; /* keep moving down */ if (!traversal[node]) { traversal[node] = true; return(node); } /* If we've already done this node, need to move up */ if (node == NJ->root) return(-1); /* nowhere to go -- done traversing */ node = NJ->parent[node]; /* If we go up to someplace that was already marked as visited, this is due to a change in topology, so return it marked as "up" */ if (pUp && traversal[node]) { *pUp = true; return(node); } } } traversal_t FreeTraversal(traversal_t traversal, NJ_t *NJ) { myfree(traversal, sizeof(bool)*NJ->maxnodes); return(NULL); } profile_t **UpProfiles(NJ_t *NJ) { profile_t **upProfiles = (profile_t**)mymalloc(sizeof(profile_t*)*NJ->maxnodes); int i; for (i=0; i<NJ->maxnodes; i++) upProfiles[i] = NULL; return(upProfiles); } profile_t *GetUpProfile(/*IN/OUT*/profile_t **upProfiles, NJ_t *NJ, int outnode, bool useML) { assert(outnode != NJ->root && outnode >= NJ->nSeq); /* not for root or leaves */ if (upProfiles[outnode] != NULL) return(upProfiles[outnode]); int depth; int *pathToRoot = PathToRoot(NJ, outnode, /*OUT*/&depth); int i; /* depth-1 is root */ for (i = depth-2; i>=0; i--) { int node = pathToRoot[i]; if (upProfiles[node] == NULL) { /* Note -- SetupABCD may call GetUpProfile, but it should do it farther up in the path to the root */ profile_t *profiles[4]; int nodeABCD[4]; SetupABCD(NJ, node, /*OUT*/profiles, /*IN/OUT*/upProfiles, /*OUT*/nodeABCD, useML); if (useML) { /* If node is a child of root, then the 4th profile is of the 2nd root-sibling of node Otherwise, the 4th profile is the up-profile of the parent of node, and that is the branch-length we need */ double lenC = NJ->branchlength[nodeABCD[2]]; double lenD = NJ->branchlength[nodeABCD[3]]; if (verbose > 3) { fprintf(stderr, "Computing UpProfile for node %d with lenC %.4f lenD %.4f pair-loglk %.3f\n", node, lenC, lenD, PairLogLk(profiles[2],profiles[3],lenC+lenD,NJ->nPos,NJ->transmat,&NJ->rates, /*site_lk*/NULL)); PrintNJInternal(stderr, NJ, /*useLen*/true); } upProfiles[node] = PosteriorProfile(/*C*/profiles[2], /*D*/profiles[3], lenC, lenD, NJ->transmat, &NJ->rates, NJ->nPos, NJ->nConstraints); } else { profile_t *profilesCDAB[4] = { profiles[2], profiles[3], profiles[0], profiles[1] }; double weight = QuartetWeight(profilesCDAB, NJ->distance_matrix, NJ->nPos); if (verbose>3) fprintf(stderr, "Compute upprofile of %d from %d and parents (vs. children %d %d) with weight %.3f\n", node, nodeABCD[2], nodeABCD[0], nodeABCD[1], weight); upProfiles[node] = AverageProfile(profiles[2], profiles[3], NJ->nPos, NJ->nConstraints, NJ->distance_matrix, weight); } } } FreePath(pathToRoot,NJ); assert(upProfiles[outnode] != NULL); return(upProfiles[outnode]); } profile_t *DeleteUpProfile(/*IN/OUT*/profile_t **upProfiles, NJ_t *NJ, int node) { assert(node>=0 && node < NJ->maxnodes); if (upProfiles[node] != NULL) upProfiles[node] = FreeProfile(upProfiles[node], NJ->nPos, NJ->nConstraints); /* returns NULL */ return(NULL); } profile_t **FreeUpProfiles(profile_t **upProfiles, NJ_t *NJ) { int i; int nUsed = 0; for (i=0; i < NJ->maxnodes; i++) { if (upProfiles[i] != NULL) nUsed++; DeleteUpProfile(upProfiles, NJ, i); } myfree(upProfiles, sizeof(profile_t*)*NJ->maxnodes); if (verbose >= 3) fprintf(stderr,"FreeUpProfiles -- freed %d\n", nUsed); return(NULL); } int *PathToRoot(NJ_t *NJ, int node, /*OUT*/int *outDepth) { int *pathToRoot = (int*)mymalloc(sizeof(int)*NJ->maxnodes); int depth = 0; int ancestor = node; while(ancestor >= 0) { pathToRoot[depth] = ancestor; ancestor = NJ->parent[ancestor]; depth++; } *outDepth = depth; return(pathToRoot); } int *FreePath(int *path, NJ_t *NJ) { myfree(path, sizeof(int)*NJ->maxnodes); return(NULL); } transition_matrix_t *CreateGTR(double *r/*ac ag at cg ct gt*/, double *f/*acgt*/) { double matrix[4][MAXCODES]; assert(nCodes==4); int i, j; /* Place rates onto a symmetric matrix, but correct by f(target), so that stationary distribution f[] is maintained Leave diagonals as 0 (CreateTransitionMatrix will fix them) */ int imat = 0; for (i = 0; i < nCodes; i++) { matrix[i][i] = 0; for (j = i+1; j < nCodes; j++) { double rate = r[imat++]; assert(rate > 0); /* Want t(matrix) * f to be 0 */ matrix[i][j] = rate * f[i]; matrix[j][i] = rate * f[j]; } } /* Compute average mutation rate */ double total_rate = 0; for (i = 0; i < nCodes; i++) for (j = 0; j < nCodes; j++) total_rate += f[i] * matrix[i][j]; assert(total_rate > 1e-6); double inv = 1.0/total_rate; for (i = 0; i < nCodes; i++) for (j = 0; j < nCodes; j++) matrix[i][j] *= inv; return(CreateTransitionMatrix(matrix,f)); } transition_matrix_t *CreateTransitionMatrix(/*IN*/double matrix[MAXCODES][MAXCODES], /*IN*/double stat[MAXCODES]) { int i,j,k; transition_matrix_t *transmat = mymalloc(sizeof(transition_matrix_t)); double sqrtstat[20]; for (i = 0; i < nCodes; i++) { transmat->stat[i] = stat[i]; transmat->statinv[i] = 1.0/stat[i]; sqrtstat[i] = sqrt(stat[i]); } double sym[20*20]; /* symmetrized matrix M' */ /* set diagonals so columns sums are 0 before symmetrization */ for (i = 0; i < nCodes; i++) for (j = 0; j < nCodes; j++) sym[nCodes*i+j] = matrix[i][j]; for (j = 0; j < nCodes; j++) { double sum = 0; sym[nCodes*j+j] = 0; for (i = 0; i < nCodes; i++) sum += sym[nCodes*i+j]; sym[nCodes*j+j] = -sum; } /* M' = S**-1 M S */ for (i = 0; i < nCodes; i++) for (j = 0; j < nCodes; j++) sym[nCodes*i+j] *= sqrtstat[j]/sqrtstat[i]; /* eigen decomposition of M' -- note that eigenW is the transpose of what we want, which is eigenvectors in columns */ double eigenW[20*20], eval[20], e[20]; for (i = 0; i < nCodes*nCodes; i++) eigenW[i] = sym[i]; tred2(eigenW, nCodes, nCodes, eval, e); tqli(eval, e, nCodes , nCodes, eigenW); /* save eigenvalues */ for (i = 0; i < nCodes; i++) transmat->eigenval[i] = eval[i]; /* compute eigen decomposition of M into t(codeFreq): V = S*W */ /* compute inverse of V in eigeninv: V**-1 = t(W) S**-1 */ for (i = 0; i < nCodes; i++) { for (j = 0; j < nCodes; j++) { transmat->eigeninv[i][j] = eigenW[nCodes*i+j] / sqrtstat[j]; transmat->eigeninvT[j][i] = transmat->eigeninv[i][j]; } } for (i = 0; i < nCodes; i++) for (j = 0; j < nCodes; j++) transmat->codeFreq[i][j] = eigenW[j*nCodes+i] * sqrtstat[i]; /* codeFreq[NOCODE] is the rotation of (1,1,...) not (1/nCodes,1/nCodes,...), which gives correct posterior probabilities */ for (j = 0; j < nCodes; j++) { transmat->codeFreq[NOCODE][j] = 0.0; for (i = 0; i < nCodes; i++) transmat->codeFreq[NOCODE][j] += transmat->codeFreq[i][j]; } /* save some posterior probabilities for approximating later: first, we compute P(B | A, t) for t = approxMLnearT, by using V * exp(L*t) * V**-1 */ double expvalues[MAXCODES]; for (i = 0; i < nCodes; i++) expvalues[i] = exp(approxMLnearT * transmat->eigenval[i]); double LVinv[MAXCODES][MAXCODES]; /* exp(L*t) * V**-1 */ for (i = 0; i < nCodes; i++) { for (j = 0; j < nCodes; j++) LVinv[i][j] = transmat->eigeninv[i][j] * expvalues[i]; } /* matrix transform for converting A -> B given t: transt[i][j] = P(j->i | t) */ double transt[MAXCODES][MAXCODES]; for (i = 0; i < nCodes; i++) { for (j = 0; j < nCodes; j++) { transt[i][j] = 0; for (k = 0; k < nCodes; k++) transt[i][j] += transmat->codeFreq[i][k] * LVinv[k][j]; } } /* nearP[i][j] = P(parent = j | both children are i) = P(j | i,i) ~ stat(j) * P(j->i | t)**2 */ for (i = 0; i < nCodes; i++) { double nearP[MAXCODES]; double tot = 0; for (j = 0; j < nCodes; j++) { assert(transt[j][i] > 0); assert(transmat->stat[j] > 0); nearP[j] = transmat->stat[j] * transt[i][j] * transt[i][j]; tot += nearP[j]; } assert(tot > 0); for (j = 0; j < nCodes; j++) nearP[j] *= 1.0/tot; /* save nearP in transmat->nearP[i][] */ for (j = 0; j < nCodes; j++) transmat->nearP[i][j] = nearP[j]; /* multiply by 1/stat and rotate nearP */ for (j = 0; j < nCodes; j++) nearP[j] /= transmat->stat[j]; for (j = 0; j < nCodes; j++) { double rot = 0; for (k = 0; k < nCodes; k++) rot += nearP[k] * transmat->codeFreq[i][j]; transmat->nearFreq[i][j] = rot; } } return(transmat); assert(0); } distance_matrix_t *TransMatToDistanceMat(transition_matrix_t *transmat) { if (transmat == NULL) return(NULL); distance_matrix_t *dmat = mymalloc(sizeof(distance_matrix_t)); int i, j; for (i=0; i<nCodes; i++) { for (j=0; j<nCodes; j++) { dmat->distances[i][j] = 0; /* never actually used */ dmat->eigeninv[i][j] = transmat->eigeninv[i][j]; dmat->codeFreq[i][j] = transmat->codeFreq[i][j]; } } /* eigentot . rotated-vector is the total frequency of the unrotated vector (used to normalize in NormalizeFreq() For transition matrices, we rotate by transpose of eigenvectors, so we need to multiply by the inverse matrix by 1....1 to get this vector, or in other words, sum the columns */ for(i = 0; i<nCodes; i++) { dmat->eigentot[i] = 0.0; for (j = 0; j<nCodes; j++) dmat->eigentot[i] += transmat->eigeninv[i][j]; } return(dmat); } /* Numerical recipes code for eigen decomposition (actually taken from RAxML rev_functions.c) */ void tred2 (double *a, const int n, const int np, double *d, double *e) { #define a(i,j) a[(j-1)*np + (i-1)] #define e(i) e[i-1] #define d(i) d[i-1] int i, j, k, l; double f, g, h, hh, scale; for (i = n; i > 1; i--) { l = i-1; h = 0; scale = 0; if ( l > 1 ) { for ( k = 1; k <= l; k++ ) scale += fabs(a(i,k)); if (scale == 0) e(i) = a(i,l); else { for (k = 1; k <= l; k++) { a(i,k) /= scale; h += a(i,k) * a(i,k); } f = a(i,l); g = -sqrt(h); if (f < 0) g = -g; e(i) = scale *g; h -= f*g; a(i,l) = f-g; f = 0; for (j = 1; j <=l ; j++) { a(j,i) = a(i,j) / h; g = 0; for (k = 1; k <= j; k++) g += a(j,k)*a(i,k); for (k = j+1; k <= l; k++) g += a(k,j)*a(i,k); e(j) = g/h; f += e(j)*a(i,j); } hh = f/(h+h); for (j = 1; j <= l; j++) { f = a(i,j); g = e(j) - hh * f; e(j) = g; for (k = 1; k <= j; k++) a(j,k) -= f*e(k) + g*a(i,k); } } } else e(i) = a(i,l); d(i) = h; } d(1) = 0; e(1) = 0; for (i = 1; i <= n; i++) { l = i-1; if (d(i) != 0) { for (j = 1; j <=l; j++) { g = 0; for (k = 1; k <= l; k++) g += a(i,k)*a(k,j); for (k=1; k <=l; k++) a(k,j) -= g * a(k,i); } } d(i) = a(i,i); a(i,i) = 1; for (j=1; j<=l; j++) a(i,j) = a(j,i) = 0; } return; #undef a #undef e #undef d } double pythag(double a, double b) { double absa = fabs(a), absb = fabs(b); return (absa > absb) ? absa * sqrt(1+ (absb/absa)*(absb/absa)) : absb == 0 ? 0 : absb * sqrt(1+ (absa/absb)*(absa/absb)); } void tqli(double *d, double *e, int n, int np, double *z) { #define z(i,j) z[(j-1)*np + (i-1)] #define e(i) e[i-1] #define d(i) d[i-1] int i = 0, iter = 0, k = 0, l = 0, m = 0; double b = 0, c = 0, dd = 0, f = 0, g = 0, p = 0, r = 0, s = 0; for(i=2; i<=n; i++) e(i-1) = e(i); e(n) = 0; for (l = 1; l <= n; l++) { iter = 0; labelExtra: for (m = l; (m < n); m++) { dd = fabs(d(m))+fabs(d(m+1)); if (fabs(e(m))+dd == dd) break; } if (m != l) { assert(iter < 30); iter++; g = (d(l+1)-d(l))/(2*e(l)); r = pythag(g,1.); g = d(m)-d(l)+e(l)/(g+(g<0?-r:r)); s = 1; c = 1; p = 0; for (i = m-1; i>=l; i--) { f = s*e(i); b = c*e(i); r = pythag(f,g); e(i+1) = r; if (r == 0) { d (i+1) -= p; e (m) = 0; goto labelExtra; } s = f/r; c = g/r; g = d(i+1)-p; r = (d(i)-g)*s + 2*c*b; p = s*r; d(i+1) = g + p; g = c*r - b; for (k=1; k <= n; k++) { f = z(k,i+1); z(k,i+1) = s * z(k,i) + c*f; z(k,i) = c * z(k,i) - s*f; } } d(l) -= p; e(l) = g; e(m) = 0; goto labelExtra; } } return; #undef z #undef e #undef d } #ifdef USE_SSE3 inline float mm_sum(register __m128 sum) { #if 1 /* stupider but faster */ float f[4] ALIGNED; _mm_store_ps(f,sum); return(f[0]+f[1]+f[2]+f[3]); #else /* first we get sum[0]+sum[1], sum[2]+sum[3] by selecting 0/1 and 2/3 */ sum = _mm_add_ps(sum,_mm_shuffle_ps(sum,sum,_MM_SHUFFLE(0,1,2,3))); /* then get sum[0]+sum[1]+sum[2]+sum[3] by selecting 0/1 and 0/1 */ sum = _mm_add_ps(sum,_mm_shuffle_ps(sum,sum,_MM_SHUFFLE(0,1,0,1))); float f; _mm_store_ss(&f, sum); /* save the lowest word */ return(f); #endif } #endif void vector_multiply(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, int n, /*OUT*/numeric_t *fOut) { #ifdef USE_SSE3 int i; for (i = 0; i < n; i += 4) { __m128 a, b, c; a = _mm_load_ps(f1+i); b = _mm_load_ps(f2+i); c = _mm_mul_ps(a, b); _mm_store_ps(fOut+i,c); } #else int i; for (i = 0; i < n; i++) fOut[i] = f1[i]*f2[i]; #endif } numeric_t vector_multiply_sum(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, int n) { #ifdef USE_SSE3 if (n == 4) return(f1[0]*f2[0]+f1[1]*f2[1]+f1[2]*f2[2]+f1[3]*f2[3]); __m128 sum = _mm_setzero_ps(); int i; for (i = 0; i < n; i += 4) { __m128 a, b, c; a = _mm_load_ps(f1+i); b = _mm_load_ps(f2+i); c = _mm_mul_ps(a, b); sum = _mm_add_ps(c, sum); } return(mm_sum(sum)); #else int i; numeric_t out = 0.0; for (i=0; i < n; i++) out += f1[i]*f2[i]; return(out); #endif } /* sum(f1*f2*f3) */ numeric_t vector_multiply3_sum(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, /*IN*/numeric_t* f3, int n) { #ifdef USE_SSE3 __m128 sum = _mm_setzero_ps(); int i; for (i = 0; i < n; i += 4) { __m128 a1, a2, a3; a1 = _mm_load_ps(f1+i); a2 = _mm_load_ps(f2+i); a3 = _mm_load_ps(f3+i); sum = _mm_add_ps(_mm_mul_ps(_mm_mul_ps(a1,a2),a3),sum); } return(mm_sum(sum)); #else int i; numeric_t sum = 0.0; for (i = 0; i < n; i++) sum += f1[i]*f2[i]*f3[i]; return(sum); #endif } numeric_t vector_dot_product_rot(/*IN*/numeric_t *f1, /*IN*/numeric_t *f2, /*IN*/numeric_t *fBy, int n) { #ifdef USE_SSE3 __m128 sum1 = _mm_setzero_ps(); __m128 sum2 = _mm_setzero_ps(); int i; for (i = 0; i < n; i += 4) { __m128 a1, a2, aBy; a1 = _mm_load_ps(f1+i); a2 = _mm_load_ps(f2+i); aBy = _mm_load_ps(fBy+i); sum1 = _mm_add_ps(_mm_mul_ps(a1, aBy), sum1); sum2 = _mm_add_ps(_mm_mul_ps(a2, aBy), sum2); } return(mm_sum(sum1)*mm_sum(sum2)); #else int i; numeric_t out1 = 0.0; numeric_t out2 = 0.0; for (i=0; i < n; i++) { out1 += f1[i]*fBy[i]; out2 += f2[i]*fBy[i]; } return(out1*out2); #endif } numeric_t vector_sum(/*IN*/numeric_t *f1, int n) { #ifdef USE_SSE3 if (n==4) return(f1[0]+f1[1]+f1[2]+f1[3]); __m128 sum = _mm_setzero_ps(); int i; for (i = 0; i < n; i+=4) { __m128 a; a = _mm_load_ps(f1+i); sum = _mm_add_ps(a, sum); } return(mm_sum(sum)); #else numeric_t out = 0.0; int i; for (i = 0; i < n; i++) out += f1[i]; return(out); #endif } void vector_multiply_by(/*IN/OUT*/numeric_t *f, /*IN*/numeric_t fBy, int n) { int i; #ifdef USE_SSE3 __m128 c = _mm_set1_ps(fBy); for (i = 0; i < n; i += 4) { __m128 a, b; a = _mm_load_ps(f+i); b = _mm_mul_ps(a,c); _mm_store_ps(f+i,b); } #else for (i = 0; i < n; i++) f[i] *= fBy; #endif } void vector_add_mult(/*IN/OUT*/numeric_t *fTot, /*IN*/numeric_t *fAdd, numeric_t weight, int n) { #ifdef USE_SSE3 int i; __m128 w = _mm_set1_ps(weight); for (i = 0; i < n; i += 4) { __m128 tot, add; tot = _mm_load_ps(fTot+i); add = _mm_load_ps(fAdd+i); _mm_store_ps(fTot+i, _mm_add_ps(tot, _mm_mul_ps(add,w))); } #else int i; for (i = 0; i < n; i++) fTot[i] += fAdd[i] * weight; #endif } void matrixt_by_vector4(/*IN*/numeric_t mat[4][MAXCODES], /*IN*/numeric_t vec[4], /*OUT*/numeric_t out[4]) { #ifdef USE_SSE3 /*__m128 v = _mm_load_ps(vec);*/ __m128 o = _mm_setzero_ps(); int j; /* result is a sum of vectors: sum(k) v[k] * mat[k][] */ for (j = 0; j < 4; j++) { __m128 m = _mm_load_ps(&mat[j][0]); __m128 vj = _mm_load1_ps(&vec[j]); /* is it faster to shuffle v? */ o = _mm_add_ps(o, _mm_mul_ps(vj,m)); } _mm_store_ps(out, o); #else int j,k; for (j = 0; j < 4; j++) { double sum = 0; for (k = 0; k < 4; k++) sum += vec[k] * mat[k][j]; out[j] = sum; } #endif } distance_matrix_t matrixBLOSUM45 = { /*distances*/ { {0, 1.31097856157468, 1.06573001937323, 1.2682782988532, 0.90471293383305, 1.05855446876905, 1.05232790675508, 0.769574440593014, 1.27579668305679, 0.964604099952603, 0.987178199640556, 1.05007594438157, 1.05464162250736, 1.1985987403937, 0.967404475245526, 0.700490199584332, 0.880060189098976, 1.09748548316685, 1.28141710375267, 0.800038509951648}, {1.31097856157468, 0, 0.8010890222701, 0.953340718498495, 1.36011107208122, 0.631543775840481, 0.791014908659279, 1.15694899265629, 0.761152570032029, 1.45014917711188, 1.17792001455227, 0.394661075648738, 0.998807558909651, 1.135143404599, 1.15432562628921, 1.05309036790541, 1.05010474413616, 1.03938321130789, 0.963216908696184, 1.20274751778601}, {1.06573001937323, 0.8010890222701, 0, 0.488217214273568, 1.10567116937273, 0.814970207038261, 0.810176440932339, 0.746487413974582, 0.61876156253224, 1.17886558630004, 1.52003670190022, 0.808442678243754, 1.2889025816028, 1.16264109995678, 1.18228799147301, 0.679475681649858, 0.853658619686283, 1.68988558988005, 1.24297493464833, 1.55207513886163}, {1.2682782988532, 0.953340718498495, 0.488217214273568, 0, 1.31581050011876, 0.769778474953791, 0.482077627352988, 0.888361752320536, 0.736360849050364, 1.76756333403346, 1.43574761894039, 0.763612910719347, 1.53386612356483, 1.74323672079854, 0.886347403928663, 0.808614044804528, 1.01590147813779, 1.59617804551619, 1.1740494822217, 1.46600946033173}, {0.90471293383305, 1.36011107208122, 1.10567116937273, 1.31581050011876, 0, 1.3836789310481, 1.37553994252576, 1.26740695314856, 1.32361065635259, 1.26087264215993, 1.02417540515351, 1.37259631233791, 1.09416720447891, 0.986982088723923, 1.59321190226694, 0.915638787768407, 0.913042853922533, 1.80744143643002, 1.3294417177004, 0.830022143283238}, {1.05855446876905, 0.631543775840481, 0.814970207038261, 0.769778474953791, 1.3836789310481, 0, 0.506942797642807, 1.17699648087288, 0.614595446514896, 1.17092829494457, 1.19833088638994, 0.637341078675405, 0.806490842729072, 1.83315144709714, 0.932064479113502, 0.850321696813199, 1.06830084665916, 1.05739353225849, 0.979907428113788, 1.5416250309563}, {1.05232790675508, 0.791014908659279, 0.810176440932339, 0.482077627352988, 1.37553994252576, 0.506942797642807, 0, 1.17007322676118, 0.769786956320484, 1.46659942462342, 1.19128214039009, 0.633592151371708, 1.27269395724349, 1.44641491621774, 0.735428579892476, 0.845319988414402, 1.06201695511881, 1.324395996498, 1.22734387448031, 1.53255698189437}, {0.769574440593014, 1.15694899265629, 0.746487413974582, 0.888361752320536, 1.26740695314856, 1.17699648087288, 1.17007322676118, 0, 1.1259007054424, 1.7025415585924, 1.38293205218175, 1.16756929156758, 1.17264582493965, 1.33271035269688, 1.07564768421292, 0.778868281341681, 1.23287107008366, 0.968539655354582, 1.42479529031801, 1.41208067821187}, {1.27579668305679, 0.761152570032029, 0.61876156253224, 0.736360849050364, 1.32361065635259, 0.614595446514896, 0.769786956320484, 1.1259007054424, 0, 1.4112324673522, 1.14630894167097, 0.967795284542623, 0.771479459384692, 1.10468029976148, 1.12334774065132, 1.02482926701639, 1.28754326478771, 1.27439749294131, 0.468683841672724, 1.47469999960758}, {0.964604099952603, 1.45014917711188, 1.17886558630004, 1.76756333403346, 1.26087264215993, 1.17092829494457, 1.46659942462342, 1.7025415585924, 1.4112324673522, 0, 0.433350517223017, 1.463460928818, 0.462965544381851, 0.66291968000662, 1.07010201755441, 1.23000200130049, 0.973485453109068, 0.963546200571036, 0.708724769805536, 0.351200119909572}, {0.987178199640556, 1.17792001455227, 1.52003670190022, 1.43574761894039, 1.02417540515351, 1.19833088638994, 1.19128214039009, 1.38293205218175, 1.14630894167097, 0.433350517223017, 0, 1.49770950074319, 0.473800072611076, 0.538473125003292, 1.37979627224964, 1.5859723170438, 0.996267398224516, 0.986095542821092, 0.725310666139274, 0.570542199221932}, {1.05007594438157, 0.394661075648738, 0.808442678243754, 0.763612910719347, 1.37259631233791, 0.637341078675405, 0.633592151371708, 1.16756929156758, 0.967795284542623, 1.463460928818, 1.49770950074319, 0, 1.0079761868248, 1.44331961488922, 0.924599080166146, 1.06275728888356, 1.05974425835993, 1.04892430642749, 0.972058829603409, 1.21378822764856}, {1.05464162250736, 0.998807558909651, 1.2889025816028, 1.53386612356483, 1.09416720447891, 0.806490842729072, 1.27269395724349, 1.17264582493965, 0.771479459384692, 0.462965544381851, 0.473800072611076, 1.0079761868248, 0, 0.72479754849538, 1.1699868662153, 1.34481214251794, 1.06435197383538, 1.05348497728858, 0.774878150710318, 0.609532859331199}, {1.1985987403937, 1.135143404599, 1.16264109995678, 1.74323672079854, 0.986982088723923, 1.83315144709714, 1.44641491621774, 1.33271035269688, 1.10468029976148, 0.66291968000662, 0.538473125003292, 1.44331961488922, 0.72479754849538, 0, 1.32968844979665, 1.21307373491949, 0.960087571600877, 0.475142555482979, 0.349485367759138, 0.692733248746636}, {0.967404475245526, 1.15432562628921, 1.18228799147301, 0.886347403928663, 1.59321190226694, 0.932064479113502, 0.735428579892476, 1.07564768421292, 1.12334774065132, 1.07010201755441, 1.37979627224964, 0.924599080166146, 1.1699868662153, 1.32968844979665, 0, 0.979087429691819, 0.97631161216338, 1.21751652292503, 1.42156458605332, 1.40887880416009}, {0.700490199584332, 1.05309036790541, 0.679475681649858, 0.808614044804528, 0.915638787768407, 0.850321696813199, 0.845319988414402, 0.778868281341681, 1.02482926701639, 1.23000200130049, 1.5859723170438, 1.06275728888356, 1.34481214251794, 1.21307373491949, 0.979087429691819, 0, 0.56109848274013, 1.76318885009194, 1.29689226231656, 1.02015839286433}, {0.880060189098976, 1.05010474413616, 0.853658619686283, 1.01590147813779, 0.913042853922533, 1.06830084665916, 1.06201695511881, 1.23287107008366, 1.28754326478771, 0.973485453109068, 0.996267398224516, 1.05974425835993, 1.06435197383538, 0.960087571600877, 0.97631161216338, 0.56109848274013, 0, 1.39547634461879, 1.02642577026706, 0.807404666228614}, {1.09748548316685, 1.03938321130789, 1.68988558988005, 1.59617804551619, 1.80744143643002, 1.05739353225849, 1.324395996498, 0.968539655354582, 1.27439749294131, 0.963546200571036, 0.986095542821092, 1.04892430642749, 1.05348497728858, 0.475142555482979, 1.21751652292503, 1.76318885009194, 1.39547634461879, 0, 0.320002937404137, 1.268589159299}, {1.28141710375267, 0.963216908696184, 1.24297493464833, 1.1740494822217, 1.3294417177004, 0.979907428113788, 1.22734387448031, 1.42479529031801, 0.468683841672724, 0.708724769805536, 0.725310666139274, 0.972058829603409, 0.774878150710318, 0.349485367759138, 1.42156458605332, 1.29689226231656, 1.02642577026706, 0.320002937404137, 0, 0.933095433689795}, {0.800038509951648, 1.20274751778601, 1.55207513886163, 1.46600946033173, 0.830022143283238, 1.5416250309563, 1.53255698189437, 1.41208067821187, 1.47469999960758, 0.351200119909572, 0.570542199221932, 1.21378822764856, 0.609532859331199, 0.692733248746636, 1.40887880416009, 1.02015839286433, 0.807404666228614, 1.268589159299, 0.933095433689795, 0} }, /*eigeninv*/ { {-0.216311217101265, -0.215171653035930, -0.217000020881064, -0.232890860601250, -0.25403526530177, -0.211569372858927, -0.218073620637049, -0.240585637190076, -0.214507049619293, -0.228476323330312, -0.223235445346107, -0.216116483840334, -0.206903836810903, -0.223553828183343, -0.236937609127783, -0.217652789023588, -0.211982652566286, -0.245995223308316, -0.206187718714279, -0.227670670439422}, {-0.0843931919568687, -0.0342164464991033, 0.393702284928246, -0.166018266253027, 0.0500896782860136, -0.262731388032538, 0.030139964190519, -0.253997503551094, -0.0932603349591988, -0.32884667697173, 0.199966846276877, -0.117543453869516, 0.196248237055757, -0.456448703853250, 0.139286961076387, 0.241166801918811, -0.0783508285295053, 0.377438091416498, 0.109499076984234, 0.128581669647144}, {-0.0690428674271772, 0.0133858672878363, -0.208289917312908, 0.161232925220819, 0.0735806288007248, -0.316269599838174, -0.0640708424745702, -0.117078801507436, 0.360805085405857, 0.336899760384943, 0.0332447078185156, 0.132954055834276, 0.00595209121998118, -0.157755611190327, -0.199839273133436, 0.193688928807663, 0.0970290928040946, 0.374683975138541, -0.478110944870958, -0.243290196936098}, {0.117284581850481, 0.310399467781876, -0.143513477698805, 0.088808130300351, 0.105747812943691, -0.373871701179853, 0.189069306295134, 0.133258225034741, -0.213043549687694, 0.301303731259140, -0.182085224761849, -0.161971915020789, 0.229301173581378, -0.293586313243755, -0.0260480060747498, -0.0217953684540699, 0.0202675755458796, -0.160134624443657, 0.431950096999465, -0.329885160320501}, {0.256496969244703, 0.0907408349583135, 0.0135731083898029, 0.477557831930769, -0.0727379669280703, 0.101732675207959, -0.147293025369251, -0.348325291603251, -0.255678082078362, -0.187092643740172, -0.177164064346593, -0.225921480146133, 0.422318841046522, 0.319959853469398, -0.0623652546300045, 0.0824203908606883, -0.102057926881110, 0.120728407576411, -0.156845807891241, -0.123528163091204}, {-0.00906668858975576, -0.0814722888231236, -0.0762715085459023, 0.055819989938286, -0.0540516675257271, -0.0070589302769034, -0.315813159989213, -0.0103527463419808, -0.194634331372293, -0.0185860407566822, 0.50134169352609, 0.384531812730061, -0.0405008616742061, 0.0781033650669525, 0.069334900096687, 0.396455180448549, -0.204065801866462, -0.215272089630713, 0.171046818996465, -0.396393364716348}, {0.201971098571663, 0.489747667606921, 0.00226258734592836, 0.0969514005747054, 0.0853921636903791, 0.0862068740282345, -0.465412154271164, -0.130516676347786, 0.165513616974634, 0.0712238027886633, 0.140746943067963, -0.325919272273406, -0.421213488261598, -0.163508199065965, 0.269695802810568, -0.110296405171437, -0.106834099902202, 0.00509414588152415, 0.00909215239544615, 0.0500401865589727}, {0.515854176692456, -0.087468413428258, 0.102796468891449, -0.06046105990993, -0.212014383772414, -0.259853648383794, -0.0997372883043333, -0.109934574535736, 0.284891018406112, -0.250578342940183, 0.142174204994568, 0.210384918947619, 0.118803190788946, -0.0268434355996836, 0.0103721198836548, -0.355555176478458, 0.428042332431476, -0.150610175411631, 0.0464090887952940, -0.140238796382057}, {-0.239392215229762, -0.315483492656425, 0.100205194952396, 0.197830195325302, 0.40178804665223, 0.195809461460298, -0.407817115321684, 0.0226836686147386, -0.169780276210306, 0.0818161585952184, -0.172886230584939, 0.174982644851064, 0.0868786992159535, -0.198450519980824, 0.168581078329968, -0.361514336004068, 0.238668430084722, 0.165494019791904, 0.110437707249228, -0.169592003035203}, {-0.313151735678025, 0.10757884850664, -0.49249098807229, 0.0993472335619114, -0.148695715250836, 0.0573801136941699, -0.190040373500722, 0.254848437434773, 0.134147888304352, -0.352719341442756, 0.0839609323513986, -0.207904182300122, 0.253940523323376, -0.109832138553288, 0.0980084518687944, 0.209026594443723, 0.406236051871548, -0.0521120230935943, 0.0554108014592302, 0.134681046631955}, {-0.102905214421384, 0.235803606800009, 0.213414976431981, -0.253606415825635, 0.00945656859370683, 0.259551282655855, 0.159527348902192, 0.083218761193016, -0.286815935191867, 0.0135069477264877, 0.336758103107357, -0.271707359524149, -0.0400009875851839, 0.0871186292716414, -0.171506310409388, -0.0954276577211755, 0.393467571460712, 0.111732846649458, -0.239886066474217, -0.426474828195231}, {-0.0130795552324104, 0.0758967690968058, -0.165099404017689, -0.46035152559912, 0.409888158016031, -0.0235053940299396, 0.0699393201709723, -0.161320910316996, 0.226111732196825, -0.177811841258496, -0.219073917645916, -0.00703219376737286, 0.162831878334912, 0.271670554900684, 0.451033612762052, 0.0820942662443393, -0.0904983490498446, -0.0587000279313978, -0.0938852980928252, -0.306078621571843}, {0.345092040577428, -0.257721588971295, -0.301689123771848, -0.0875212184538126, 0.161012613069275, 0.385104899829821, 0.118355290985046, -0.241723794416731, 0.083201920119646, -0.0809095291508749, -0.0820275390511991, -0.115569770103317, -0.250105681098033, -0.164197583037664, -0.299481453795592, 0.255906951902366, 0.129042051416371, 0.203761730442746, 0.347550071284268, -0.109264854744020}, {0.056345924962239, 0.072536751679082, 0.303127492633681, -0.368877185781648, -0.343024497082421, 0.206879529669083, -0.413012709639426, 0.078538816203612, 0.103382383425097, 0.288319996147499, -0.392663258459423, 0.0319588502083897, 0.220316797792669, -0.0563686494606947, -0.0869286063283735, 0.323677017794391, 0.0984875197088935, -0.0303289828821742, 0.0450197853450979, -0.0261771221270139}, {-0.253701638374729, -0.148922815783583, 0.111794052194159, 0.157313977830326, -0.269846001260543, -0.222989872703583, 0.115441028189268, -0.350456582262355, -0.0409581422905941, 0.174078744248002, -0.130673397086811, -0.123963802708056, -0.351609207081548, 0.281548012920868, 0.340382662112428, 0.180262131025562, 0.3895263830793, 0.0121546812430960, 0.214830943227063, -0.0617782909660214}, {-0.025854479416026, 0.480654788977767, -0.138024550829229, -0.130191670810919, 0.107816875829919, -0.111243997319276, -0.0679814460571245, -0.183167991080677, -0.363355166018786, -0.183934891092050, -0.216097125080962, 0.520240628803255, -0.179616013606479, 0.0664131536100941, -0.178350708111064, 0.0352047611606709, 0.223857228692892, 0.128363679623513, -0.000403433628490731, 0.224972110977704}, {0.159207394033448, -0.0371517305736114, -0.294302634912281, -0.0866954375908417, -0.259998567870054, 0.284966673982689, 0.205356416771391, -0.257613708650298, -0.264820519037270, 0.293359248624603, 0.0997476397434102, 0.151390539497369, 0.165571346773648, -0.347569523551258, 0.43792310820533, -0.0723248163210163, 0.0379214984816955, -0.0542758730251438, -0.258020301801603, 0.128680501102363}, {0.316853842351797, -0.153950010941153, -0.13387065213508, -0.0702971390607613, -0.202558481846057, -0.172941438694837, -0.068882524588574, 0.524738203063889, -0.271670479920716, -0.112864756695310, -0.146831636946145, -0.0352336188578041, -0.211108490884767, 0.097857111349555, 0.276459740956662, 0.0231297536754823, -0.0773173324868396, 0.487208384389438, -0.0734191389266824, -0.113198765573319}, {-0.274285525741087, 0.227334266052039, -0.0973746625709059, -0.00965256583655389, -0.402438444750043, 0.198586229519026, 0.0958135064575833, -0.108934376958686, 0.253641732094319, -0.0551918478254021, 0.0243640218331436, 0.181936272247179, 0.090952738347629, 0.0603352483029044, -0.0043821671755761, -0.347720824658591, -0.267879988539971, 0.403804652116592, 0.337654323971186, -0.241509293972297}, {-0.0197089518344238, 0.139681034626696, 0.251980475788267, 0.341846624362846, -0.075141195125153, 0.2184951591319, 0.268870823491343, 0.150392399018138, 0.134592404015057, -0.337050200539163, -0.313109373497998, 0.201993318439135, -0.217140733851970, -0.337622749083808, 0.135253284365068, 0.181729249828045, -0.00627813335422765, -0.197218833324039, -0.194060005031698, -0.303055888528004} }, /*eigenval*/ { 20.29131, 0.5045685, 0.2769945, 0.1551147, 0.03235484, -0.04127639, -0.3516426, -0.469973, -0.5835191, -0.6913107, -0.7207972, -0.7907875, -0.9524307, -1.095310, -1.402153, -1.424179, -1.936704, -2.037965, -3.273561, -5.488734 }, /*eigentot and codeFreq left out, these are initialized elsewhere*/ }; /* The JTT92 matrix, D. T. Jones, W. R. Taylor, & J. M. Thorton, CABIOS 8:275 (1992) Derived from the PhyML source code (models.c) by filling in the other side of the symmetric matrix, scaling the entries by the stationary rate (to give the rate of a->b not b|a), to set the diagonals so the rows sum to 0, to rescale the matrix so that the implied rate of evolution is 1. The resulting matrix is the transpose (I think). */ #if 0 { int i,j; for (i=0; i<20; i++) for (j=0; j<i; j++) daa[j*20+i] = daa[i*20+j]; for (i = 0; i < 20; i++) for (j = 0; j < 20; j++) daa[i*20+j] *= pi[j] / 100.0; double mr = 0; /* mean rate */ for (i = 0; i < 20; i++) { double sum = 0; for (j = 0; j < 20; j++) sum += daa[i*20+j]; daa[i*20+i] = -sum; mr += pi[i] * sum; } for (i = 0; i < 20*20; i++) daa[i] /= mr; } #endif double statJTT92[MAXCODES] = {0.07674789,0.05169087,0.04264509,0.05154407,0.01980301,0.04075195,0.06182989,0.07315199,0.02294399,0.05376110,0.09190390,0.05867583,0.02382594,0.04012589,0.05090097,0.06876503,0.05856501,0.01426057,0.03210196,0.06600504}; double matrixJTT92[MAXCODES][MAXCODES] = { { -1.247831,0.044229,0.041179,0.061769,0.042704,0.043467,0.08007,0.136501,0.02059,0.027453,0.022877,0.02669,0.041179,0.011439,0.14794,0.288253,0.362223,0.006863,0.008388,0.227247 }, { 0.029789,-1.025965,0.023112,0.008218,0.058038,0.159218,0.014895,0.070364,0.168463,0.011299,0.019517,0.33179,0.022599,0.002568,0.038007,0.051874,0.032871,0.064714,0.010272,0.008731 }, { 0.022881,0.019068,-1.280568,0.223727,0.014407,0.03644,0.024576,0.034322,0.165676,0.019915,0.005085,0.11144,0.012712,0.004237,0.006356,0.213134,0.098304,0.00339,0.029661,0.00678 }, { 0.041484,0.008194,0.270413,-1.044903,0.005121,0.025095,0.392816,0.066579,0.05736,0.005634,0.003585,0.013316,0.007682,0.002049,0.007682,0.030217,0.019462,0.002049,0.023559,0.015877 }, { 0.011019,0.022234,0.00669,0.001968,-0.56571,0.001771,0.000984,0.011609,0.013577,0.003345,0.004526,0.001377,0.0061,0.015348,0.002755,0.043878,0.008264,0.022628,0.041124,0.012199 }, { 0.02308,0.125524,0.034823,0.019841,0.003644,-1.04415,0.130788,0.010528,0.241735,0.003644,0.029154,0.118235,0.017411,0.00162,0.066406,0.021461,0.020651,0.007288,0.009718,0.008098 }, { 0.064507,0.017816,0.035632,0.471205,0.003072,0.198435,-0.944343,0.073107,0.015973,0.007372,0.005529,0.111197,0.011058,0.003072,0.011058,0.01843,0.019659,0.006143,0.0043,0.027646 }, { 0.130105,0.099578,0.058874,0.09449,0.042884,0.018898,0.086495,-0.647831,0.016717,0.004361,0.004361,0.019625,0.010176,0.003634,0.017444,0.146096,0.023986,0.039976,0.005815,0.034162 }, { 0.006155,0.074775,0.089138,0.025533,0.01573,0.1361,0.005927,0.005243,-1.135695,0.003648,0.012767,0.010259,0.007523,0.009119,0.026217,0.016642,0.010487,0.001824,0.130629,0.002508 }, { 0.01923,0.011752,0.025106,0.005876,0.009081,0.004808,0.00641,0.003205,0.008547,-1.273602,0.122326,0.011218,0.25587,0.047542,0.005342,0.021367,0.130873,0.004808,0.017094,0.513342 }, { 0.027395,0.0347,0.010958,0.006392,0.021003,0.065748,0.008219,0.005479,0.051137,0.209115,-0.668139,0.012784,0.354309,0.226465,0.093143,0.053877,0.022829,0.047485,0.021916,0.16437 }, { 0.020405,0.376625,0.153332,0.015158,0.004081,0.170239,0.105525,0.015741,0.026235,0.012243,0.008162,-0.900734,0.037896,0.002332,0.012243,0.027401,0.06005,0.00583,0.004664,0.008162 }, { 0.012784,0.010416,0.007102,0.003551,0.007339,0.01018,0.004261,0.003314,0.007812,0.113397,0.091854,0.015388,-1.182051,0.01018,0.003788,0.006865,0.053503,0.005682,0.004261,0.076466 }, { 0.00598,0.001993,0.003987,0.001595,0.031098,0.001595,0.001993,0.001993,0.015948,0.035484,0.098877,0.001595,0.017144,-0.637182,0.006778,0.03668,0.004784,0.021131,0.213701,0.024719 }, { 0.098117,0.037426,0.007586,0.007586,0.007081,0.082944,0.009104,0.012138,0.058162,0.005058,0.051587,0.010621,0.008092,0.008598,-0.727675,0.144141,0.059679,0.003035,0.005058,0.011632 }, { 0.258271,0.069009,0.343678,0.040312,0.152366,0.036213,0.020498,0.137334,0.049878,0.02733,0.040312,0.032113,0.019814,0.06286,0.194728,-1.447863,0.325913,0.023914,0.043045,0.025964 }, { 0.276406,0.037242,0.135003,0.022112,0.02444,0.029677,0.018621,0.019203,0.026768,0.142567,0.014548,0.059936,0.131511,0.006983,0.068665,0.27757,-1.335389,0.006983,0.01222,0.065174 }, { 0.001275,0.017854,0.001134,0.000567,0.016295,0.002551,0.001417,0.007793,0.001134,0.001275,0.007368,0.001417,0.003401,0.00751,0.00085,0.004959,0.0017,-0.312785,0.010061,0.003542 }, { 0.003509,0.006379,0.022328,0.014673,0.066664,0.007655,0.002233,0.002552,0.182769,0.010207,0.007655,0.002552,0.005741,0.170967,0.00319,0.020095,0.006698,0.022647,-0.605978,0.005103 }, { 0.195438,0.011149,0.010493,0.020331,0.040662,0.013117,0.029512,0.030824,0.007214,0.630254,0.11805,0.009182,0.211834,0.040662,0.015084,0.024922,0.073453,0.016396,0.010493,-1.241722 } }; double statWAG01[MAXCODES] = {0.0866279,0.043972, 0.0390894,0.0570451,0.0193078,0.0367281,0.0580589,0.0832518,0.0244314,0.048466, 0.086209, 0.0620286,0.0195027,0.0384319,0.0457631,0.0695179,0.0610127,0.0143859,0.0352742,0.0708956}; double matrixWAG01[MAXCODES][MAXCODES] = { {-1.117151, 0.050147, 0.046354, 0.067188, 0.093376, 0.082607, 0.143908, 0.128804, 0.028817, 0.017577, 0.036177, 0.082395, 0.081234, 0.019138, 0.130789, 0.306463, 0.192846, 0.010286, 0.021887, 0.182381}, {0.025455, -0.974318, 0.029321, 0.006798, 0.024376, 0.140086, 0.020267, 0.026982, 0.098628, 0.008629, 0.022967, 0.246964, 0.031527, 0.004740, 0.031358, 0.056495, 0.025586, 0.053714, 0.017607, 0.011623}, {0.020916, 0.026065, -1.452438, 0.222741, 0.010882, 0.063328, 0.038859, 0.046176, 0.162306, 0.022737, 0.005396, 0.123567, 0.008132, 0.003945, 0.008003, 0.163042, 0.083283, 0.002950, 0.044553, 0.008051}, {0.044244, 0.008819, 0.325058, -0.989665, 0.001814, 0.036927, 0.369645, 0.051822, 0.055719, 0.002361, 0.005077, 0.028729, 0.006212, 0.002798, 0.025384, 0.064166, 0.022443, 0.007769, 0.019500, 0.009120}, {0.020812, 0.010703, 0.005375, 0.000614, -0.487357, 0.002002, 0.000433, 0.006214, 0.005045, 0.003448, 0.007787, 0.001500, 0.007913, 0.008065, 0.002217, 0.028525, 0.010395, 0.014531, 0.011020, 0.020307}, {0.035023, 0.117008, 0.059502, 0.023775, 0.003809, -1.379785, 0.210830, 0.012722, 0.165524, 0.004391, 0.033516, 0.150135, 0.059565, 0.003852, 0.035978, 0.039660, 0.033070, 0.008316, 0.008777, 0.011613}, {0.096449, 0.026759, 0.057716, 0.376214, 0.001301, 0.333275, -1.236894, 0.034593, 0.034734, 0.007763, 0.009400, 0.157479, 0.019202, 0.004944, 0.041578, 0.042955, 0.050134, 0.009540, 0.011961, 0.035874}, {0.123784, 0.051085, 0.098345, 0.075630, 0.026795, 0.028838, 0.049604, -0.497615, 0.021792, 0.002661, 0.005356, 0.032639, 0.015212, 0.004363, 0.021282, 0.117240, 0.019732, 0.029444, 0.009052, 0.016361}, {0.008127, 0.054799, 0.101443, 0.023863, 0.006384, 0.110105, 0.014616, 0.006395, -0.992342, 0.003543, 0.012807, 0.022832, 0.010363, 0.017420, 0.017851, 0.018979, 0.012136, 0.006733, 0.099319, 0.003035}, {0.009834, 0.009511, 0.028192, 0.002006, 0.008654, 0.005794, 0.006480, 0.001549, 0.007029, -1.233162, 0.161294, 0.016472, 0.216559, 0.053891, 0.005083, 0.016249, 0.074170, 0.010808, 0.021372, 0.397837}, {0.036002, 0.045028, 0.011900, 0.007673, 0.034769, 0.078669, 0.013957, 0.005547, 0.045190, 0.286902, -0.726011, 0.023303, 0.439180, 0.191376, 0.037625, 0.031191, 0.029552, 0.060196, 0.036066, 0.162890}, {0.058998, 0.348377, 0.196082, 0.031239, 0.004820, 0.253558, 0.168246, 0.024319, 0.057967, 0.021081, 0.016767, -1.124580, 0.060821, 0.005783, 0.036254, 0.062960, 0.090292, 0.008952, 0.008675, 0.019884}, {0.018288, 0.013983, 0.004057, 0.002124, 0.007993, 0.031629, 0.006450, 0.003564, 0.008272, 0.087143, 0.099354, 0.019123, -1.322098, 0.024370, 0.003507, 0.010109, 0.031033, 0.010556, 0.008769, 0.042133}, {0.008490, 0.004143, 0.003879, 0.001885, 0.016054, 0.004030, 0.003273, 0.002014, 0.027402, 0.042734, 0.085315, 0.003583, 0.048024, -0.713669, 0.006512, 0.022020, 0.006934, 0.061698, 0.260332, 0.026213}, {0.069092, 0.032635, 0.009370, 0.020364, 0.005255, 0.044829, 0.032773, 0.011698, 0.033438, 0.004799, 0.019973, 0.026747, 0.008229, 0.007754, -0.605590, 0.077484, 0.038202, 0.006695, 0.010376, 0.015124}, {0.245933, 0.089317, 0.289960, 0.078196, 0.102703, 0.075066, 0.051432, 0.097899, 0.054003, 0.023306, 0.025152, 0.070562, 0.036035, 0.039831, 0.117705, -1.392239, 0.319421, 0.038212, 0.057419, 0.016981}, {0.135823, 0.035501, 0.129992, 0.024004, 0.032848, 0.054936, 0.052685, 0.014461, 0.030308, 0.093371, 0.020915, 0.088814, 0.097083, 0.011008, 0.050931, 0.280341, -1.154973, 0.007099, 0.018643, 0.088894}, {0.001708, 0.017573, 0.001086, 0.001959, 0.010826, 0.003257, 0.002364, 0.005088, 0.003964, 0.003208, 0.010045, 0.002076, 0.007786, 0.023095, 0.002105, 0.007908, 0.001674, -0.466694, 0.037525, 0.005516}, {0.008912, 0.014125, 0.040205, 0.012058, 0.020133, 0.008430, 0.007267, 0.003836, 0.143398, 0.015555, 0.014757, 0.004934, 0.015861, 0.238943, 0.007998, 0.029135, 0.010779, 0.092011, -0.726275, 0.011652}, {0.149259, 0.018739, 0.014602, 0.011335, 0.074565, 0.022417, 0.043805, 0.013932, 0.008807, 0.581952, 0.133956, 0.022726, 0.153161, 0.048356, 0.023429, 0.017317, 0.103293, 0.027186, 0.023418, -1.085487}, };
comm.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /** * Copyright (c) 2015 by Contributors */ #ifndef MXNET_KVSTORE_COMM_H_ #define MXNET_KVSTORE_COMM_H_ #include <dmlc/omp.h> #include <string> #include <algorithm> #include <utility> #include <limits> #include <vector> #include <tuple> #include <thread> #include "mxnet/ndarray.h" #include "gradient_compression.h" #include "../ndarray/ndarray_function.h" #include "../operator/tensor/sparse_retain-inl.h" #include "./kvstore_utils.h" namespace mxnet { namespace kvstore { /** * \brief multiple device commmunication */ class Comm { public: Comm() { pinned_ctx_ = Context::CPUPinned(0); } virtual ~Comm() { } /** * \brief init key with the data shape and storage shape */ virtual void Init(int key, const NDArrayStorageType stype, const mxnet::TShape& shape, int dtype = mshadow::kFloat32) = 0; /** * \brief returns src[0] + .. + src[src.size()-1] */ virtual const NDArray& Reduce( int key, const std::vector<NDArray>& src, int priority) = 0; /** * \brief copy from src to dst[i] for every i */ virtual void Broadcast( int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) = 0; /** * \brief broadcast src to dst[i] with target row_ids for every i * \param key the identifier key for the stored ndarray * \param src the source row_sparse ndarray to broadcast * \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast, where the row_ids are expected to be unique and sorted in row_id.data() * \param priority the priority of the operation */ virtual void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) = 0; /** * \brief return a pinned contex */ Context pinned_ctx() const { return pinned_ctx_; } /** * \brief Sets gradient compression parameters to be able to * perform reduce with compressed gradients */ void SetGradientCompression(std::shared_ptr<GradientCompression> gc) { gc_ = gc; } protected: Context pinned_ctx_; std::shared_ptr<GradientCompression> gc_; }; /** * \brief an implemention of Comm that first copy data to CPU memeory, and then * reduce there */ class CommCPU : public Comm { public: CommCPU() { nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4); bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000); // TODO(junwu) delete the following data member, now for benchmark only is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0); } virtual ~CommCPU() { } void Init(int key, const NDArrayStorageType stype, const mxnet::TShape& shape, int type = mshadow::kFloat32) override { // Delayed allocation - the dense merged buffer might not be used at all if push() // only sees sparse arrays bool delay_alloc = true; merge_buf_[key].merged = NDArray(shape, pinned_ctx_, delay_alloc, type); } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { auto& buf = merge_buf_[key]; const auto stype = src[0].storage_type(); // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { if (stype == kDefaultStorage) { return src[0]; } else { // With 'local' kvstore, we could store the weight on CPU while compute // the gradient on GPU when the weight is extremely large. // To avoiding copying the weight to the same context of the gradient, // we always copy the gradient to merged buf. NDArray& merged = buf.merged_buf(stype); CopyFromTo(src[0], &merged, priority); return merged; } } NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { std::vector<Engine::VarHandle> const_vars(src.size() - 1); std::vector<NDArray> reduce(src.size()); CopyFromTo(src[0], &buf_merged, priority); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()-1); for (size_t j = 0; j < src.size() - 1; ++j) { // allocate copy buffer buf.copy_buf[j] = NDArray( src[0].shape(), pinned_ctx_, false, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 1; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority); reduce[i] = buf.copy_buf[i-1]; const_vars[i-1] = reduce[i].var(); } Engine::Get()->PushAsync( [reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { ReduceSumCPU(reduce); on_complete(); }, Context::CPU(), const_vars, {reduce[0].var()}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } else { // sparse reduce std::vector<Engine::VarHandle> const_vars(src.size()); std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray( src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype()); } } CHECK(stype == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << stype << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; const_vars[i] = reduce[i].var(); } Resource rsc = ResourceManager::Get()->Request(buf_merged.ctx(), ResourceRequest(ResourceRequest::kTempSpace)); Engine::Get()->PushAsync( [reduce, buf_merged, rsc, this](RunContext rctx, Engine::CallbackOnComplete on_complete) { NDArray out = buf_merged; is_serial_push_? ReduceSumCPUExSerial(reduce, &out) : mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out); on_complete(); }, Context::CPU(), const_vars, {buf_merged.var(), rsc.var}, FnProperty::kCPUPrioritized, priority, "KVStoreReduce"); } return buf_merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { int mask = src.ctx().dev_mask(); if (mask == Context::kCPU) { for (auto d : dst) CopyFromTo(src, d, priority); } else { // First copy data to pinned_ctx, then broadcast. // Note that kv.init initializes the data on pinned_ctx. // This branch indicates push() with ndarrays on gpus were called, // and the source is copied to gpu ctx. // Also indicates that buffers are already initialized during push(). auto& buf = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf, priority); for (auto d : dst) CopyFromTo(buf, d, priority); } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { using namespace mshadow; CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; CHECK_EQ(src.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with src on gpu context not supported"; for (const auto& dst_kv : dst) { NDArray* out = dst_kv.first; NDArray row_id = dst_kv.second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU) << "BroadcastRowSparse with row_indices on gpu context not supported"; // retain according to unique indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_cpu = (is_same_ctx && is_diff_var) ? *out : NDArray(kRowSparseStorage, src.shape(), src.ctx(), true, src.dtype(), src.aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } Engine::Get()->PushAsync( [=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); NDArray temp = retained_cpu; // get rid the of const qualifier op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); on_complete(); }, Context::CPU(), {src.var(), row_id.var()}, {retained_cpu.var()}, FnProperty::kNormal, priority, "KVStoreSparseRetain"); // if retained_cpu == out, CopyFromTo will ignore the copy operation CopyFromTo(retained_cpu, out, priority); } } private: // reduce sum into val[0] inline void ReduceSumCPU(const std::vector<NDArray> &in_data) { MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, { std::vector<DType*> dptr(in_data.size()); for (size_t i = 0; i < in_data.size(); ++i) { TBlob data = in_data[i].data(); CHECK(data.CheckContiguous()); dptr[i] = data.FlatTo2D<cpu, DType>().dptr_; } size_t total = in_data[0].shape().Size(); ReduceSumCPUImpl(dptr, total); }); } // serial implementation of reduce sum for row sparse NDArray. inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) { using namespace rowsparse; using namespace mshadow; auto stype = out->storage_type(); CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype; size_t total_num_rows = 0; size_t num_in = in.size(); // skip the ones with empty indices and values std::vector<bool> skip(num_in, false); // the values tensor of the inputs MSHADOW_TYPE_SWITCH(out->dtype(), DType, { MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, { std::vector<Tensor<cpu, 2, DType>> in_vals(num_in); std::vector<Tensor<cpu, 1, IType>> in_indices(num_in); // offset to the values tensor of all inputs std::vector<size_t> offsets(num_in, 0); std::vector<size_t> num_rows(num_in, 0); for (size_t i = 0; i < num_in; i++) { if (!in[i].storage_initialized()) { skip[i] = true; continue; } auto size = in[i].aux_shape(kIdx).Size(); num_rows[i] = size; total_num_rows += size; in_vals[i] = in[i].data().FlatTo2D<cpu, DType>(); in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>(); } std::vector<IType> indices; indices.reserve(total_num_rows); // gather indices from all inputs for (size_t i = 0; i < num_in; i++) { for (size_t j = 0; j < num_rows[i]; j++) { indices.emplace_back(in_indices[i][j]); } } CHECK_EQ(indices.size(), total_num_rows); // dedup indices std::sort(indices.begin(), indices.end()); indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin()); // the one left are unique non-zero rows size_t nnr = indices.size(); // allocate memory for output out->CheckAndAlloc({Shape1(nnr)}); auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>(); auto val_data = out->data().FlatTo2D<cpu, DType>(); for (size_t i = 0; i < nnr; i++) { // copy indices back idx_data[i] = indices[i]; bool zeros = true; for (size_t j = 0; j < num_in; j++) { if (skip[j]) continue; size_t offset = offsets[j]; if (offset < num_rows[j]) { if (indices[i] == in_indices[j][offset]) { if (zeros) { Copy(val_data[i], in_vals[j][offset], nullptr); zeros = false; } else { val_data[i] += in_vals[j][offset]; } offsets[j] += 1; } } } } }); }); } template<typename DType> inline static void ReduceSumCPU( const std::vector<DType*> &dptr, size_t offset, index_t size) { using namespace mshadow; // NOLINT(*) Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size)); for (size_t i = 1; i < dptr.size(); i+=4) { switch (dptr.size() - i) { case 1: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); in_0 += in_1; break; } case 2: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); in_0 += in_1 + in_2; break; } case 3: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3; break; } default: { Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size)); Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size)); in_0 += in_1 + in_2 + in_3 + in_4; break; } } } } template<typename DType> inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) { const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10)); long ntask = (total + step - 1) / step; // NOLINT(*) if (total < bigarray_bound_ || nthread_reduction_ <= 1) { ReduceSumCPU(dptr, 0, total); } else { #pragma omp parallel for schedule(static) num_threads(nthread_reduction_) for (long j = 0; j < ntask; ++j) { // NOLINT(*) size_t k = static_cast<size_t>(j); size_t begin = std::min(k * step, total); size_t end = std::min((k + 1) * step, total); if (j == ntask - 1) CHECK_EQ(end, total); ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin)); } } } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the merged value NDArray merged; /// \brief the cpu buffer for gpu data std::vector<NDArray> copy_buf; /// \brief the merged buffer for the given storage type inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; size_t bigarray_bound_; int nthread_reduction_; bool is_serial_push_; }; /** * \brief an implementation of Comm that performs reduction on device * directly. * * It is faster if the total device-to-device bandwidths is larger than * device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device * memory. */ class CommDevice : public Comm { public: CommDevice() { inited_ = false; } virtual ~CommDevice() { } void Init(int key, const NDArrayStorageType stype, const mxnet::TShape& shape, int dtype = mshadow::kFloat32) override { sorted_key_attrs_.emplace_back(key, shape, dtype); inited_ = false; } void InitBuffersAndComm(const std::vector<NDArray>& src) { if (!inited_) { std::vector<Context> devs; for (const auto& a : src) { devs.push_back(a.ctx()); } InitMergeBuffer(devs); if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) { EnableP2P(devs); } } } const NDArray& ReduceRowSparse(int key, const std::vector<NDArray>& src, int priority) { auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); if (buf.copy_buf.empty()) { // initialize buffer for copying during reduce buf.copy_buf.resize(src.size()); for (size_t j = 0; j < src.size(); ++j) { buf.copy_buf[j] = NDArray(stype, src[0].shape(), buf_merged.ctx(), true, src[0].dtype()); } } CHECK(src[0].storage_type() == buf.copy_buf[0].storage_type()) << "Storage type mismatch detected. " << src[0].storage_type() << "(src) vs. " << buf.copy_buf[0].storage_type() << "(buf.copy_buf)"; for (size_t i = 0; i < src.size(); ++i) { CopyFromTo(src[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); return buf_merged; } const NDArray& Reduce(int key, const std::vector<NDArray>& src, int priority) override { // when this reduce is called from kvstore_dist, gc is not set // we don't do compression twice in dist_sync_device if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) { return ReduceCompressed(key, src, priority); } // avoid extra copy for single device, but it may bring problems for // abnormal usage of kvstore if (src.size() == 1) { return src[0]; } InitBuffersAndComm(src); auto& buf = merge_buf_[key]; const NDArrayStorageType stype = src[0].storage_type(); NDArray& buf_merged = buf.merged_buf(stype); // normal dense reduce if (stype == kDefaultStorage) { CopyFromTo(src[0], &buf_merged, priority); std::vector<NDArray> reduce(src.size()); reduce[0] = buf_merged; if (buf.copy_buf.empty()) { // TODO(mli) this results in large device memory usage for huge ndarray, // such as the largest fullc in VGG. consider to do segment reduce with // NDArray.Slice or gpu direct memory access. for the latter, we need to // remove some ctx check, and also it reduces 20% perf buf.copy_buf.resize(src.size()-1); for (size_t i = 0; i < src.size()-1; ++i) { buf.copy_buf[i] = NDArray( buf_merged.shape(), buf_merged.ctx(), false, buf_merged.dtype()); } } for (size_t i = 0; i < src.size()-1; ++i) { CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority); reduce[i+1] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf_merged, priority); } else { // sparse reduce buf_merged = ReduceRowSparse(key, src, priority); } return buf_merged; } const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src, int priority) { InitBuffersAndComm(src); auto& buf = merge_buf_[key]; std::vector<NDArray> reduce(src.size()); if (buf.copy_buf.empty()) { // one buf for each context buf.copy_buf.resize(src.size()); buf.compressed_recv_buf.resize(src.size()); buf.compressed_send_buf.resize(src.size()); buf.residual.resize(src.size()); for (size_t i = 0; i < src.size(); ++i) { buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(), false, buf.merged.dtype()); buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(), false, buf.merged.dtype()); buf.residual[i] = 0; int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size()); buf.compressed_recv_buf[i] = NDArray(mxnet::TShape{small_size}, buf.merged.ctx(), false, buf.merged.dtype()); buf.compressed_send_buf[i] = NDArray(mxnet::TShape{small_size}, src[i].ctx(), false, buf.merged.dtype()); } } for (size_t i = 0; i < src.size(); ++i) { // compress before copy // this is done even if the data is on same context as copy_buf because // we don't want the training to be biased towards data on this GPU gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority); if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) { CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority); } else { // avoid memory copy when they are on same context buf.compressed_recv_buf[i] = buf.compressed_send_buf[i]; } gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority); reduce[i] = buf.copy_buf[i]; } ElementwiseSum(reduce, &buf.merged); return buf.merged; } void Broadcast(int key, const NDArray& src, const std::vector<NDArray*> dst, int priority) override { if (!inited_) { // copy to a random device first int dev_id = key % dst.size(); CopyFromTo(src, dst[dev_id], priority); for (size_t i = 0; i < dst.size(); ++i) { if (i != static_cast<size_t>(dev_id)) { CopyFromTo(*dst[dev_id], dst[i], priority); } } } else { auto& buf_merged = merge_buf_[key].merged_buf(src.storage_type()); CopyFromTo(src, &buf_merged, priority); for (auto d : dst) { CopyFromTo(buf_merged, d, priority); } } } void BroadcastRowSparse(int key, const NDArray& src, const std::vector<std::pair<NDArray*, NDArray>>& dst, const int priority) override { CHECK_EQ(src.storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row-sparse src NDArray"; for (const auto& dst_kv : dst) { NDArray* out = dst_kv.first; NDArray row_id = dst_kv.second; CHECK_EQ(out->storage_type(), kRowSparseStorage) << "BroadcastRowSparse expects row_sparse dst NDArray"; CHECK_EQ(row_id.ctx(), src.ctx()) << "row_id and src are expected to be on the same context"; // retain according to indices const bool is_same_ctx = out->ctx() == src.ctx(); const bool is_diff_var = out->var() != src.var(); NDArray retained_gpu = (is_same_ctx && is_diff_var) ? *out : NDArray(kRowSparseStorage, out->shape(), src.ctx(), true, out->dtype(), out->aux_types()); if (!is_diff_var) { common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) + "refers to the same NDArray as the one stored in KVStore." "Performing row_sparse_pull() with such output is going to change the " "data stored in KVStore. Incorrect result may be generated " "next time row_sparse_pull() is called. To avoid such an issue," "consider create a new NDArray buffer to store the output."); } bool is_gpu = retained_gpu.ctx().dev_mask() == gpu::kDevMask; Engine::Get()->PushAsync([=](RunContext rctx, Engine::CallbackOnComplete on_complete) { const TBlob& indices = row_id.data(); using namespace mxnet::common; NDArray temp = retained_gpu; switch (temp.ctx().dev_mask()) { case cpu::kDevMask: { SparseRetainOpForwardRspWrapper<cpu>(rctx.get_stream<cpu>(), src, indices, kWriteTo, &temp); break; } #if MXNET_USE_CUDA case gpu::kDevMask: { SparseRetainOpForwardRspWrapper<gpu>(rctx.get_stream<gpu>(), src, indices, kWriteTo, &temp); // wait for GPU operations to complete rctx.get_stream<gpu>()->Wait(); break; } #endif default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR; } on_complete(); }, retained_gpu.ctx(), {src.var(), row_id.var()}, {retained_gpu.var()}, is_gpu ? FnProperty::kGPUPrioritized : FnProperty::kCPUPrioritized, priority, "KVStoreSparseRetain"); CopyFromTo(retained_gpu, out, priority); } } using KeyAttrs = std::tuple<int, mxnet::TShape, int>; // try to allocate buff on device evenly void InitMergeBuffer(const std::vector<Context>& devs) { std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), []( const KeyAttrs& a, const KeyAttrs& b) { return std::get<1>(a).Size() > std::get<1>(b).Size(); }); std::unordered_map<int, std::pair<Context, size_t>> ctx_info; for (auto d : devs) { ctx_info[d.dev_id] = std::make_pair(d, 0); } for (auto& sorted_key_attr : sorted_key_attrs_) { const int key = std::get<0>(sorted_key_attr); const mxnet::TShape& shape = std::get<1>(sorted_key_attr); const int type = std::get<2>(sorted_key_attr); auto& buf = merge_buf_[key]; Context ctx; size_t min_size = std::numeric_limits<size_t>::max(); for (auto& ctx_info_kv : ctx_info) { size_t size = ctx_info_kv.second.second; if (size <= min_size) { ctx = ctx_info_kv.second.first; min_size = size; } } // Delayed allocation - as the dense merged buffer might not be used at all if push() // only sees sparse arrays if (buf.merged.is_none()) { bool delay_alloc = true; buf.merged = NDArray(shape, ctx, delay_alloc, type); } ctx_info[ctx.dev_id].second += shape.Size(); } inited_ = true; } private: void EnableP2P(const std::vector<Context>& devs) { #if MXNET_USE_CUDA std::vector<int> gpus; for (const auto& d : devs) { if (d.dev_mask() == gpu::kDevMask) { gpus.push_back(d.dev_id); } } int n = static_cast<int>(gpus.size()); int enabled = 0; std::vector<int> p2p(n*n); for (int i = 0; i < n; ++i) { // Restores active device to what it was before EnableP2P mxnet::common::cuda::DeviceStore device_store(gpus[i]); for (int j = 0; j < n; j++) { int access; cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]); if (access) { cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0); if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) { ++enabled; p2p[i*n+j] = 1; } } } } if (enabled != n*(n-1)) { // print warning info if not fully enabled LOG(WARNING) << "only " << enabled << " out of " << n*(n-1) << " GPU pairs are enabled direct access. " << "It may affect the performance. " << "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off"; std::string access(n, '.'); for (int i = 0; i < n; ++i) { for (int j = 0; j < n; ++j) { access[j] = p2p[i*n+j] ? 'v' : '.'; } LOG(WARNING) << access; } } #endif } /// \brief temporal space for pushing and pulling struct BufferEntry { /// \brief the dense merged value for reduce and broadcast operations NDArray merged; /// \brief the gpu buffer for copy during reduce operation std::vector<NDArray> copy_buf; /// \brief the residual buffer for gradient compression std::vector<NDArray> residual; /// \brief the small buffer for compressed data in sender std::vector<NDArray> compressed_send_buf; /// \brief the small buffer for compressed data in receiver std::vector<NDArray> compressed_recv_buf; /// \brief the merged buffer for the given storage type (could be either dense or row_sparse) inline NDArray& merged_buf(NDArrayStorageType stype) { if (stype == kDefaultStorage) { CHECK(!merged.is_none()) << "unintialized merge buffer detected"; return merged; } CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype; // check if sparse_merged is initialized if (sparse_merged.is_none()) { CHECK(!merged.is_none()); sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(), true, merged.dtype()); } return sparse_merged; } private: /// \brief the sparse merged value for reduce and rowsparse broadcast operations NDArray sparse_merged; }; std::unordered_map<int, BufferEntry> merge_buf_; public: bool inited_; std::vector<KeyAttrs> sorted_key_attrs_; }; } // namespace kvstore } // namespace mxnet #endif // MXNET_KVSTORE_COMM_H_
OpenMPClause.h
//===- OpenMPClause.h - Classes for OpenMP clauses --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // /// \file /// This file defines OpenMP AST classes for clauses. /// There are clauses for executable directives, clauses for declarative /// directives and clauses which can be used in both kinds of directives. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_OPENMPCLAUSE_H #define LLVM_CLANG_AST_OPENMPCLAUSE_H #include "clang/AST/ASTFwd.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/MapVector.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/iterator.h" #include "llvm/ADT/iterator_range.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include "llvm/Frontend/OpenMP/OMPContext.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/TrailingObjects.h" #include <cassert> #include <cstddef> #include <iterator> #include <utility> namespace clang { class ASTContext; //===----------------------------------------------------------------------===// // AST classes for clauses. //===----------------------------------------------------------------------===// /// This is a basic class for representing single OpenMP clause. class OMPClause { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Ending location of the clause. SourceLocation EndLoc; /// Kind of the clause. OpenMPClauseKind Kind; protected: OMPClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation EndLoc) : StartLoc(StartLoc), EndLoc(EndLoc), Kind(K) {} public: /// Returns the starting location of the clause. SourceLocation getBeginLoc() const { return StartLoc; } /// Returns the ending location of the clause. SourceLocation getEndLoc() const { return EndLoc; } /// Sets the starting location of the clause. void setLocStart(SourceLocation Loc) { StartLoc = Loc; } /// Sets the ending location of the clause. void setLocEnd(SourceLocation Loc) { EndLoc = Loc; } /// Returns kind of OpenMP clause (private, shared, reduction, etc.). OpenMPClauseKind getClauseKind() const { return Kind; } bool isImplicit() const { return StartLoc.isInvalid(); } using child_iterator = StmtIterator; using const_child_iterator = ConstStmtIterator; using child_range = llvm::iterator_range<child_iterator>; using const_child_range = llvm::iterator_range<const_child_iterator>; child_range children(); const_child_range children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } /// Get the iterator range for the expressions used in the clauses. Used /// expressions include only the children that must be evaluated at the /// runtime before entering the construct. child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *) { return true; } }; /// Class that handles pre-initialization statement for some clauses, like /// 'shedule', 'firstprivate' etc. class OMPClauseWithPreInit { friend class OMPClauseReader; /// Pre-initialization statement for the clause. Stmt *PreInit = nullptr; /// Region that captures the associated stmt. OpenMPDirectiveKind CaptureRegion = llvm::omp::OMPD_unknown; protected: OMPClauseWithPreInit(const OMPClause *This) { assert(get(This) && "get is not tuned for pre-init."); } /// Set pre-initialization statement for the clause. void setPreInitStmt(Stmt *S, OpenMPDirectiveKind ThisRegion = llvm::omp::OMPD_unknown) { PreInit = S; CaptureRegion = ThisRegion; } public: /// Get pre-initialization statement for the clause. const Stmt *getPreInitStmt() const { return PreInit; } /// Get pre-initialization statement for the clause. Stmt *getPreInitStmt() { return PreInit; } /// Get capture region for the stmt in the clause. OpenMPDirectiveKind getCaptureRegion() const { return CaptureRegion; } static OMPClauseWithPreInit *get(OMPClause *C); static const OMPClauseWithPreInit *get(const OMPClause *C); }; /// Class that handles post-update expression for some clauses, like /// 'lastprivate', 'reduction' etc. class OMPClauseWithPostUpdate : public OMPClauseWithPreInit { friend class OMPClauseReader; /// Post-update expression for the clause. Expr *PostUpdate = nullptr; protected: OMPClauseWithPostUpdate(const OMPClause *This) : OMPClauseWithPreInit(This) { assert(get(This) && "get is not tuned for post-update."); } /// Set pre-initialization statement for the clause. void setPostUpdateExpr(Expr *S) { PostUpdate = S; } public: /// Get post-update expression for the clause. const Expr *getPostUpdateExpr() const { return PostUpdate; } /// Get post-update expression for the clause. Expr *getPostUpdateExpr() { return PostUpdate; } static OMPClauseWithPostUpdate *get(OMPClause *C); static const OMPClauseWithPostUpdate *get(const OMPClause *C); }; /// This structure contains most locations needed for by an OMPVarListClause. struct OMPVarListLocTy { /// Starting location of the clause (the clause keyword). SourceLocation StartLoc; /// Location of '('. SourceLocation LParenLoc; /// Ending location of the clause. SourceLocation EndLoc; OMPVarListLocTy() = default; OMPVarListLocTy(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : StartLoc(StartLoc), LParenLoc(LParenLoc), EndLoc(EndLoc) {} }; /// This represents clauses with the list of variables like 'private', /// 'firstprivate', 'copyin', 'shared', or 'reduction' clauses in the /// '#pragma omp ...' directives. template <class T> class OMPVarListClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of variables in the list. unsigned NumVars; protected: /// Build a clause with \a N variables /// /// \param K Kind of the clause. /// \param StartLoc Starting location of the clause (the clause keyword). /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPVarListClause(OpenMPClauseKind K, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(K, StartLoc, EndLoc), LParenLoc(LParenLoc), NumVars(N) {} /// Fetches list of variables associated with this clause. MutableArrayRef<Expr *> getVarRefs() { return MutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } /// Sets the list of variables for this clause. void setVarRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumVars && "Number of variables is not the same as the preallocated buffer"); std::copy(VL.begin(), VL.end(), static_cast<T *>(this)->template getTrailingObjects<Expr *>()); } public: using varlist_iterator = MutableArrayRef<Expr *>::iterator; using varlist_const_iterator = ArrayRef<const Expr *>::iterator; using varlist_range = llvm::iterator_range<varlist_iterator>; using varlist_const_range = llvm::iterator_range<varlist_const_iterator>; unsigned varlist_size() const { return NumVars; } bool varlist_empty() const { return NumVars == 0; } varlist_range varlists() { return varlist_range(varlist_begin(), varlist_end()); } varlist_const_range varlists() const { return varlist_const_range(varlist_begin(), varlist_end()); } varlist_iterator varlist_begin() { return getVarRefs().begin(); } varlist_iterator varlist_end() { return getVarRefs().end(); } varlist_const_iterator varlist_begin() const { return getVarRefs().begin(); } varlist_const_iterator varlist_end() const { return getVarRefs().end(); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Fetches list of all variables in the clause. ArrayRef<const Expr *> getVarRefs() const { return llvm::makeArrayRef( static_cast<const T *>(this)->template getTrailingObjects<Expr *>(), NumVars); } }; /// This represents 'allocator' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp allocate(a) allocator(omp_default_mem_alloc) /// \endcode /// In this example directive '#pragma omp allocate' has simple 'allocator' /// clause with the allocator 'omp_default_mem_alloc'. class OMPAllocatorClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression with the allocator. Stmt *Allocator = nullptr; /// Set allocator. void setAllocator(Expr *A) { Allocator = A; } public: /// Build 'allocator' clause with the given allocator. /// /// \param A Allocator. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAllocatorClause(Expr *A, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_allocator, StartLoc, EndLoc), LParenLoc(LParenLoc), Allocator(A) {} /// Build an empty clause. OMPAllocatorClause() : OMPClause(llvm::omp::OMPC_allocator, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns allocator. Expr *getAllocator() const { return cast_or_null<Expr>(Allocator); } child_range children() { return child_range(&Allocator, &Allocator + 1); } const_child_range children() const { return const_child_range(&Allocator, &Allocator + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocator; } }; /// This represents clause 'allocate' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a) allocate(omp_default_mem_alloc :a) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// and clause 'allocate' for the variable 'a'. class OMPAllocateClause final : public OMPVarListClause<OMPAllocateClause>, private llvm::TrailingObjects<OMPAllocateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Allocator specified in the clause, or 'nullptr' if the default one is /// used. Expr *Allocator = nullptr; /// Position of the ':' delimiter in the clause; SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPAllocateClause(SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, StartLoc, LParenLoc, EndLoc, N), Allocator(Allocator), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPAllocateClause(unsigned N) : OMPVarListClause<OMPAllocateClause>(llvm::omp::OMPC_allocate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } void setAllocator(Expr *A) { Allocator = A; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Allocator Allocator expression. /// \param ColonLoc Location of ':' delimiter. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPAllocateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, Expr *Allocator, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Returns the allocator expression or nullptr, if no allocator is specified. Expr *getAllocator() const { return Allocator; } /// Returns the location of the ':' delimiter. SourceLocation getColonLoc() const { return ColonLoc; } /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPAllocateClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAllocateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_allocate; } }; /// This represents 'if' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel if(parallel:a > 5) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'if' clause with /// condition 'a > 5' and directive name modifier 'parallel'. class OMPIfClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Location of ':' (if any). SourceLocation ColonLoc; /// Directive name modifier for the clause. OpenMPDirectiveKind NameModifier = llvm::omp::OMPD_unknown; /// Name modifier location. SourceLocation NameModifierLoc; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } /// Set directive name modifier for the clause. void setNameModifier(OpenMPDirectiveKind NM) { NameModifier = NM; } /// Set location of directive name modifier for the clause. void setNameModifierLoc(SourceLocation Loc) { NameModifierLoc = Loc; } /// Set location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Build 'if' clause with condition \a Cond. /// /// \param NameModifier [OpenMP 4.1] Directive name modifier of clause. /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param NameModifierLoc Location of directive name modifier. /// \param ColonLoc [OpenMP 4.1] Location of ':'. /// \param EndLoc Ending location of the clause. OMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_if, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond), ColonLoc(ColonLoc), NameModifier(NameModifier), NameModifierLoc(NameModifierLoc) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPIfClause() : OMPClause(llvm::omp::OMPC_if, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } /// Return directive name modifier associated with the clause. OpenMPDirectiveKind getNameModifier() const { return NameModifier; } /// Return the location of directive name modifier. SourceLocation getNameModifierLoc() const { return NameModifierLoc; } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPIfClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_if; } }; /// This represents 'final' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task final(a > 5) /// \endcode /// In this example directive '#pragma omp task' has simple 'final' /// clause with condition 'a > 5'. class OMPFinalClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'if' clause. Stmt *Condition = nullptr; /// Set condition. void setCondition(Expr *Cond) { Condition = Cond; } public: /// Build 'final' clause with condition \a Cond. /// /// \param Cond Condition of the clause. /// \param HelperCond Helper condition for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPFinalClause(Expr *Cond, Stmt *HelperCond, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_final, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Condition(Cond) { setPreInitStmt(HelperCond, CaptureRegion); } /// Build an empty clause. OMPFinalClause() : OMPClause(llvm::omp::OMPC_final, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns condition. Expr *getCondition() const { return cast_or_null<Expr>(Condition); } child_range children() { return child_range(&Condition, &Condition + 1); } const_child_range children() const { return const_child_range(&Condition, &Condition + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPFinalClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_final; } }; /// This represents 'num_threads' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel num_threads(6) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'num_threads' /// clause with number of threads '6'. class OMPNumThreadsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Condition of the 'num_threads' clause. Stmt *NumThreads = nullptr; /// Set condition. void setNumThreads(Expr *NThreads) { NumThreads = NThreads; } public: /// Build 'num_threads' clause with condition \a NumThreads. /// /// \param NumThreads Number of threads for the construct. /// \param HelperNumThreads Helper Number of threads for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumThreadsClause(Expr *NumThreads, Stmt *HelperNumThreads, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_threads, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumThreads(NumThreads) { setPreInitStmt(HelperNumThreads, CaptureRegion); } /// Build an empty clause. OMPNumThreadsClause() : OMPClause(llvm::omp::OMPC_num_threads, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getNumThreads() const { return cast_or_null<Expr>(NumThreads); } child_range children() { return child_range(&NumThreads, &NumThreads + 1); } const_child_range children() const { return const_child_range(&NumThreads, &NumThreads + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_threads; } }; /// This represents 'safelen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd safelen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'safelen' /// with single expression '4'. /// If the safelen clause is used then no two iterations executed /// concurrently with SIMD instructions can have a greater distance /// in the logical iteration space than its value. The parameter of /// the safelen clause must be a constant positive integer expression. class OMPSafelenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Safelen = nullptr; /// Set safelen. void setSafelen(Expr *Len) { Safelen = Len; } public: /// Build 'safelen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSafelenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_safelen, StartLoc, EndLoc), LParenLoc(LParenLoc), Safelen(Len) {} /// Build an empty clause. explicit OMPSafelenClause() : OMPClause(llvm::omp::OMPC_safelen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSafelen() const { return cast_or_null<Expr>(Safelen); } child_range children() { return child_range(&Safelen, &Safelen + 1); } const_child_range children() const { return const_child_range(&Safelen, &Safelen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_safelen; } }; /// This represents 'simdlen' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd simdlen(4) /// \endcode /// In this example directive '#pragma omp simd' has clause 'simdlen' /// with single expression '4'. /// If the 'simdlen' clause is used then it specifies the preferred number of /// iterations to be executed concurrently. The parameter of the 'simdlen' /// clause must be a constant positive integer expression. class OMPSimdlenClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Simdlen = nullptr; /// Set simdlen. void setSimdlen(Expr *Len) { Simdlen = Len; } public: /// Build 'simdlen' clause. /// /// \param Len Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSimdlenClause(Expr *Len, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simdlen, StartLoc, EndLoc), LParenLoc(LParenLoc), Simdlen(Len) {} /// Build an empty clause. explicit OMPSimdlenClause() : OMPClause(llvm::omp::OMPC_simdlen, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getSimdlen() const { return cast_or_null<Expr>(Simdlen); } child_range children() { return child_range(&Simdlen, &Simdlen + 1); } const_child_range children() const { return const_child_range(&Simdlen, &Simdlen + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simdlen; } }; /// This represents the 'sizes' clause in the '#pragma omp tile' directive. /// /// \code /// #pragma omp tile sizes(5,5) /// for (int i = 0; i < 64; ++i) /// for (int j = 0; j < 64; ++j) /// \endcode class OMPSizesClause final : public OMPClause, private llvm::TrailingObjects<OMPSizesClause, Expr *> { friend class OMPClauseReader; friend class llvm::TrailingObjects<OMPSizesClause, Expr *>; /// Location of '('. SourceLocation LParenLoc; /// Number of tile sizes in the clause. unsigned NumSizes; /// Build an empty clause. explicit OMPSizesClause(int NumSizes) : OMPClause(llvm::omp::OMPC_sizes, SourceLocation(), SourceLocation()), NumSizes(NumSizes) {} public: /// Build a 'sizes' AST node. /// /// \param C Context of the AST. /// \param StartLoc Location of the 'sizes' identifier. /// \param LParenLoc Location of '('. /// \param EndLoc Location of ')'. /// \param Sizes Content of the clause. static OMPSizesClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> Sizes); /// Build an empty 'sizes' AST node for deserialization. /// /// \param C Context of the AST. /// \param Sizes Number of items in the clause. static OMPSizesClause *CreateEmpty(const ASTContext &C, unsigned NumSizes); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns the number of list items. unsigned getNumSizes() const { return NumSizes; } /// Returns the tile size expressions. MutableArrayRef<Expr *> getSizesRefs() { return MutableArrayRef<Expr *>(static_cast<OMPSizesClause *>(this) ->template getTrailingObjects<Expr *>(), NumSizes); } ArrayRef<Expr *> getSizesRefs() const { return ArrayRef<Expr *>(static_cast<const OMPSizesClause *>(this) ->template getTrailingObjects<Expr *>(), NumSizes); } /// Sets the tile size expressions. void setSizesRefs(ArrayRef<Expr *> VL) { assert(VL.size() == NumSizes); std::copy(VL.begin(), VL.end(), static_cast<OMPSizesClause *>(this) ->template getTrailingObjects<Expr *>()); } child_range children() { MutableArrayRef<Expr *> Sizes = getSizesRefs(); return child_range(reinterpret_cast<Stmt **>(Sizes.begin()), reinterpret_cast<Stmt **>(Sizes.end())); } const_child_range children() const { ArrayRef<Expr *> Sizes = getSizesRefs(); return const_child_range(reinterpret_cast<Stmt *const *>(Sizes.begin()), reinterpret_cast<Stmt *const *>(Sizes.end())); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_sizes; } }; /// This represents 'collapse' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp simd collapse(3) /// \endcode /// In this example directive '#pragma omp simd' has clause 'collapse' /// with single expression '3'. /// The parameter must be a constant positive integer expression, it specifies /// the number of nested loops that should be collapsed into a single iteration /// space. class OMPCollapseClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'collapse' clause. /// /// \param Num Expression associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPCollapseClause(Expr *Num, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_collapse, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num) {} /// Build an empty clause. explicit OMPCollapseClause() : OMPClause(llvm::omp::OMPC_collapse, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_collapse; } }; /// This represents 'default' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp parallel default(shared) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'default' /// clause with kind 'shared'. class OMPDefaultClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. llvm::omp::DefaultKind Kind = llvm::omp::OMP_DEFAULT_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clauses. /// /// \param K Argument of clause. void setDefaultKind(llvm::omp::DefaultKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setDefaultKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'default' clause with argument \a A ('none' or 'shared'). /// /// \param A Argument of the clause ('none' or 'shared'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDefaultClause(llvm::omp::DefaultKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_default, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPDefaultClause() : OMPClause(llvm::omp::OMPC_default, SourceLocation(), SourceLocation()) { } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::DefaultKind getDefaultKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getDefaultKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_default; } }; /// This represents 'proc_bind' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp parallel proc_bind(master) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'proc_bind' /// clause with kind 'master'. class OMPProcBindClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'proc_bind' clause. llvm::omp::ProcBindKind Kind = llvm::omp::OMP_PROC_BIND_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setProcBindKind(llvm::omp::ProcBindKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setProcBindKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'proc_bind' clause with argument \a A ('master', 'close' or /// 'spread'). /// /// \param A Argument of the clause ('master', 'close' or 'spread'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPProcBindClause(llvm::omp::ProcBindKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_proc_bind, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPProcBindClause() : OMPClause(llvm::omp::OMPC_proc_bind, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. llvm::omp::ProcBindKind getProcBindKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getProcBindKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_proc_bind; } }; /// This represents 'unified_address' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_address /// \endcode /// In this example directive '#pragma omp requires' has 'unified_address' /// clause. class OMPUnifiedAddressClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_address' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_address, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedAddressClause() : OMPClause(llvm::omp::OMPC_unified_address, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_address; } }; /// This represents 'unified_shared_memory' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires unified_shared_memory /// \endcode /// In this example directive '#pragma omp requires' has 'unified_shared_memory' /// clause. class OMPUnifiedSharedMemoryClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'unified_shared_memory' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_unified_shared_memory, StartLoc, EndLoc) {} /// Build an empty clause. OMPUnifiedSharedMemoryClause() : OMPClause(llvm::omp::OMPC_unified_shared_memory, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_unified_shared_memory; } }; /// This represents 'reverse_offload' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires reverse_offload /// \endcode /// In this example directive '#pragma omp requires' has 'reverse_offload' /// clause. class OMPReverseOffloadClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'reverse_offload' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_reverse_offload, StartLoc, EndLoc) {} /// Build an empty clause. OMPReverseOffloadClause() : OMPClause(llvm::omp::OMPC_reverse_offload, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reverse_offload; } }; /// This represents 'dynamic_allocators' clause in the '#pragma omp requires' /// directive. /// /// \code /// #pragma omp requires dynamic_allocators /// \endcode /// In this example directive '#pragma omp requires' has 'dynamic_allocators' /// clause. class OMPDynamicAllocatorsClause final : public OMPClause { public: friend class OMPClauseReader; /// Build 'dynamic_allocators' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_dynamic_allocators, StartLoc, EndLoc) {} /// Build an empty clause. OMPDynamicAllocatorsClause() : OMPClause(llvm::omp::OMPC_dynamic_allocators, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dynamic_allocators; } }; /// This represents 'atomic_default_mem_order' clause in the '#pragma omp /// requires' directive. /// /// \code /// #pragma omp requires atomic_default_mem_order(seq_cst) /// \endcode /// In this example directive '#pragma omp requires' has simple /// atomic_default_mem_order' clause with kind 'seq_cst'. class OMPAtomicDefaultMemOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '(' SourceLocation LParenLoc; /// A kind of the 'atomic_default_mem_order' clause. OpenMPAtomicDefaultMemOrderClauseKind Kind = OMPC_ATOMIC_DEFAULT_MEM_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Kind of clause. void setAtomicDefaultMemOrderKind(OpenMPAtomicDefaultMemOrderClauseKind K) { Kind = K; } /// Set clause kind location. /// /// \param KLoc Kind location. void setAtomicDefaultMemOrderKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'atomic_default_mem_order' clause with argument \a A ('seq_cst', /// 'acq_rel' or 'relaxed'). /// /// \param A Argument of the clause ('seq_cst', 'acq_rel' or 'relaxed'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPAtomicDefaultMemOrderClause(OpenMPAtomicDefaultMemOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPAtomicDefaultMemOrderClause() : OMPClause(llvm::omp::OMPC_atomic_default_mem_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the locaiton of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPAtomicDefaultMemOrderClauseKind getAtomicDefaultMemOrderKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getAtomicDefaultMemOrderKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_atomic_default_mem_order; } }; /// This represents 'schedule' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for schedule(static, 3) /// \endcode /// In this example directive '#pragma omp for' has 'schedule' clause with /// arguments 'static' and '3'. class OMPScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPScheduleClauseKind Kind = OMPC_SCHEDULE_unknown; /// Modifiers for 'schedule' clause. enum {FIRST, SECOND, NUM_MODIFIERS}; OpenMPScheduleClauseModifier Modifiers[NUM_MODIFIERS]; /// Locations of modifiers. SourceLocation ModifiersLoc[NUM_MODIFIERS]; /// Start location of the schedule ind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setScheduleKind(OpenMPScheduleClauseKind K) { Kind = K; } /// Set the first schedule modifier. /// /// \param M Schedule modifier. void setFirstScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[FIRST] = M; } /// Set the second schedule modifier. /// /// \param M Schedule modifier. void setSecondScheduleModifier(OpenMPScheduleClauseModifier M) { Modifiers[SECOND] = M; } /// Set location of the first schedule modifier. void setFirstScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[FIRST] = Loc; } /// Set location of the second schedule modifier. void setSecondScheduleModifierLoc(SourceLocation Loc) { ModifiersLoc[SECOND] = Loc; } /// Set schedule modifier location. /// /// \param M Schedule modifier location. void setScheduleModifer(OpenMPScheduleClauseModifier M) { if (Modifiers[FIRST] == OMPC_SCHEDULE_MODIFIER_unknown) Modifiers[FIRST] = M; else { assert(Modifiers[SECOND] == OMPC_SCHEDULE_MODIFIER_unknown); Modifiers[SECOND] = M; } } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'schedule' clause with schedule kind \a Kind and chunk size /// expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind Schedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. /// \param M1 The first modifier applied to 'schedule' clause. /// \param M1Loc Location of the first modifier /// \param M2 The second modifier applied to 'schedule' clause. /// \param M2Loc Location of the second modifier OMPScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize, OpenMPScheduleClauseModifier M1, SourceLocation M1Loc, OpenMPScheduleClauseModifier M2, SourceLocation M2Loc) : OMPClause(llvm::omp::OMPC_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); Modifiers[FIRST] = M1; Modifiers[SECOND] = M2; ModifiersLoc[FIRST] = M1Loc; ModifiersLoc[SECOND] = M2Loc; } /// Build an empty clause. explicit OMPScheduleClause() : OMPClause(llvm::omp::OMPC_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) { Modifiers[FIRST] = OMPC_SCHEDULE_MODIFIER_unknown; Modifiers[SECOND] = OMPC_SCHEDULE_MODIFIER_unknown; } /// Get kind of the clause. OpenMPScheduleClauseKind getScheduleKind() const { return Kind; } /// Get the first modifier of the clause. OpenMPScheduleClauseModifier getFirstScheduleModifier() const { return Modifiers[FIRST]; } /// Get the second modifier of the clause. OpenMPScheduleClauseModifier getSecondScheduleModifier() const { return Modifiers[SECOND]; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getScheduleKindLoc() { return KindLoc; } /// Get the first modifier location. SourceLocation getFirstScheduleModifierLoc() const { return ModifiersLoc[FIRST]; } /// Get the second modifier location. SourceLocation getSecondScheduleModifierLoc() const { return ModifiersLoc[SECOND]; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_schedule; } }; /// This represents 'ordered' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for ordered (2) /// \endcode /// In this example directive '#pragma omp for' has 'ordered' clause with /// parameter 2. class OMPOrderedClause final : public OMPClause, private llvm::TrailingObjects<OMPOrderedClause, Expr *> { friend class OMPClauseReader; friend TrailingObjects; /// Location of '('. SourceLocation LParenLoc; /// Number of for-loops. Stmt *NumForLoops = nullptr; /// Real number of loops. unsigned NumberOfLoops = 0; /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderedClause(Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_ordered, StartLoc, EndLoc), LParenLoc(LParenLoc), NumForLoops(Num), NumberOfLoops(NumLoops) {} /// Build an empty clause. explicit OMPOrderedClause(unsigned NumLoops) : OMPClause(llvm::omp::OMPC_ordered, SourceLocation(), SourceLocation()), NumberOfLoops(NumLoops) {} /// Set the number of associated for-loops. void setNumForLoops(Expr *Num) { NumForLoops = Num; } public: /// Build 'ordered' clause. /// /// \param Num Expression, possibly associated with this clause. /// \param NumLoops Number of loops, associated with this clause. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. static OMPOrderedClause *Create(const ASTContext &C, Expr *Num, unsigned NumLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Build an empty clause. static OMPOrderedClause* CreateEmpty(const ASTContext &C, unsigned NumLoops); /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return the number of associated for-loops. Expr *getNumForLoops() const { return cast_or_null<Expr>(NumForLoops); } /// Set number of iterations for the specified loop. void setLoopNumIterations(unsigned NumLoop, Expr *NumIterations); /// Get number of iterations for all the loops. ArrayRef<Expr *> getLoopNumIterations() const; /// Set loop counter for the specified loop. void setLoopCounter(unsigned NumLoop, Expr *Counter); /// Get loops counter for the specified loop. Expr *getLoopCounter(unsigned NumLoop); const Expr *getLoopCounter(unsigned NumLoop) const; child_range children() { return child_range(&NumForLoops, &NumForLoops + 1); } const_child_range children() const { return const_child_range(&NumForLoops, &NumForLoops + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_ordered; } }; /// This represents 'nowait' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp for nowait /// \endcode /// In this example directive '#pragma omp for' has 'nowait' clause. class OMPNowaitClause : public OMPClause { public: /// Build 'nowait' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nowait, StartLoc, EndLoc) {} /// Build an empty clause. OMPNowaitClause() : OMPClause(llvm::omp::OMPC_nowait, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nowait; } }; /// This represents 'untied' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp task untied /// \endcode /// In this example directive '#pragma omp task' has 'untied' clause. class OMPUntiedClause : public OMPClause { public: /// Build 'untied' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_untied, StartLoc, EndLoc) {} /// Build an empty clause. OMPUntiedClause() : OMPClause(llvm::omp::OMPC_untied, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_untied; } }; /// This represents 'mergeable' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task mergeable /// \endcode /// In this example directive '#pragma omp task' has 'mergeable' clause. class OMPMergeableClause : public OMPClause { public: /// Build 'mergeable' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_mergeable, StartLoc, EndLoc) {} /// Build an empty clause. OMPMergeableClause() : OMPClause(llvm::omp::OMPC_mergeable, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_mergeable; } }; /// This represents 'read' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic read /// \endcode /// In this example directive '#pragma omp atomic' has 'read' clause. class OMPReadClause : public OMPClause { public: /// Build 'read' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_read, StartLoc, EndLoc) {} /// Build an empty clause. OMPReadClause() : OMPClause(llvm::omp::OMPC_read, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_read; } }; /// This represents 'write' clause in the '#pragma omp atomic' directive. /// /// \code /// #pragma omp atomic write /// \endcode /// In this example directive '#pragma omp atomic' has 'write' clause. class OMPWriteClause : public OMPClause { public: /// Build 'write' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_write, StartLoc, EndLoc) {} /// Build an empty clause. OMPWriteClause() : OMPClause(llvm::omp::OMPC_write, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_write; } }; /// This represents 'update' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic update /// \endcode /// In this example directive '#pragma omp atomic' has 'update' clause. /// Also, this class represents 'update' clause in '#pragma omp depobj' /// directive. /// /// \code /// #pragma omp depobj(a) update(in) /// \endcode /// In this example directive '#pragma omp depobj' has 'update' clause with 'in' /// dependence kind. class OMPUpdateClause final : public OMPClause, private llvm::TrailingObjects<OMPUpdateClause, SourceLocation, OpenMPDependClauseKind> { friend class OMPClauseReader; friend TrailingObjects; /// true if extended version of the clause for 'depobj' directive. bool IsExtended = false; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<SourceLocation>) const { // 2 locations: for '(' and argument location. return IsExtended ? 2 : 0; } /// Sets the the location of '(' in clause for 'depobj' directive. void setLParenLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<SourceLocation>() = Loc; } /// Sets the the location of '(' in clause for 'depobj' directive. void setArgumentLoc(SourceLocation Loc) { assert(IsExtended && "Expected extended clause."); *std::next(getTrailingObjects<SourceLocation>(), 1) = Loc; } /// Sets the dependence kind for the clause for 'depobj' directive. void setDependencyKind(OpenMPDependClauseKind DK) { assert(IsExtended && "Expected extended clause."); *getTrailingObjects<OpenMPDependClauseKind>() = DK; } /// Build 'update' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc, bool IsExtended) : OMPClause(llvm::omp::OMPC_update, StartLoc, EndLoc), IsExtended(IsExtended) {} /// Build an empty clause. OMPUpdateClause(bool IsExtended) : OMPClause(llvm::omp::OMPC_update, SourceLocation(), SourceLocation()), IsExtended(IsExtended) {} public: /// Creates clause for 'atomic' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation EndLoc); /// Creates clause for 'depobj' directive. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ArgumentLoc Location of the argument. /// \param DK Dependence kind. /// \param EndLoc Ending location of the clause. static OMPUpdateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, OpenMPDependClauseKind DK, SourceLocation EndLoc); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param IsExtended true if extended clause for 'depobj' directive must be /// created. static OMPUpdateClause *CreateEmpty(const ASTContext &C, bool IsExtended); /// Checks if the clause is the extended clauses for 'depobj' directive. bool isExtended() const { return IsExtended; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } /// Gets the the location of '(' in clause for 'depobj' directive. SourceLocation getLParenLoc() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<SourceLocation>(); } /// Gets the the location of argument in clause for 'depobj' directive. SourceLocation getArgumentLoc() const { assert(IsExtended && "Expected extended clause."); return *std::next(getTrailingObjects<SourceLocation>(), 1); } /// Gets the dependence kind in clause for 'depobj' directive. OpenMPDependClauseKind getDependencyKind() const { assert(IsExtended && "Expected extended clause."); return *getTrailingObjects<OpenMPDependClauseKind>(); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_update; } }; /// This represents 'capture' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic capture /// \endcode /// In this example directive '#pragma omp atomic' has 'capture' clause. class OMPCaptureClause : public OMPClause { public: /// Build 'capture' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_capture, StartLoc, EndLoc) {} /// Build an empty clause. OMPCaptureClause() : OMPClause(llvm::omp::OMPC_capture, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_capture; } }; /// This represents 'seq_cst' clause in the '#pragma omp atomic' /// directive. /// /// \code /// #pragma omp atomic seq_cst /// \endcode /// In this example directive '#pragma omp atomic' has 'seq_cst' clause. class OMPSeqCstClause : public OMPClause { public: /// Build 'seq_cst' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_seq_cst, StartLoc, EndLoc) {} /// Build an empty clause. OMPSeqCstClause() : OMPClause(llvm::omp::OMPC_seq_cst, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_seq_cst; } }; /// This represents 'acq_rel' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acq_rel /// \endcode /// In this example directive '#pragma omp flush' has 'acq_rel' clause. class OMPAcqRelClause final : public OMPClause { public: /// Build 'ack_rel' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acq_rel, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcqRelClause() : OMPClause(llvm::omp::OMPC_acq_rel, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acq_rel; } }; /// This represents 'acquire' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush acquire /// \endcode /// In this example directive '#pragma omp flush' has 'acquire' clause. class OMPAcquireClause final : public OMPClause { public: /// Build 'acquire' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_acquire, StartLoc, EndLoc) {} /// Build an empty clause. OMPAcquireClause() : OMPClause(llvm::omp::OMPC_acquire, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_acquire; } }; /// This represents 'release' clause in the '#pragma omp atomic|flush' /// directives. /// /// \code /// #pragma omp flush release /// \endcode /// In this example directive '#pragma omp flush' has 'release' clause. class OMPReleaseClause final : public OMPClause { public: /// Build 'release' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_release, StartLoc, EndLoc) {} /// Build an empty clause. OMPReleaseClause() : OMPClause(llvm::omp::OMPC_release, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_release; } }; /// This represents 'relaxed' clause in the '#pragma omp atomic' /// directives. /// /// \code /// #pragma omp atomic relaxed /// \endcode /// In this example directive '#pragma omp atomic' has 'relaxed' clause. class OMPRelaxedClause final : public OMPClause { public: /// Build 'relaxed' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_relaxed, StartLoc, EndLoc) {} /// Build an empty clause. OMPRelaxedClause() : OMPClause(llvm::omp::OMPC_relaxed, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_relaxed; } }; /// This represents clause 'private' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel private(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'private' /// with the variables 'a' and 'b'. class OMPPrivateClause final : public OMPVarListClause<OMPPrivateClause>, private llvm::TrailingObjects<OMPPrivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPPrivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPPrivateClause(unsigned N) : OMPVarListClause<OMPPrivateClause>(llvm::omp::OMPC_private, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PrivateVL List of references to private copies with initializers. static OMPPrivateClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPPrivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPPrivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_private; } }; /// This represents clause 'firstprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel firstprivate(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'firstprivate' /// with the variables 'a' and 'b'. class OMPFirstprivateClause final : public OMPVarListClause<OMPFirstprivateClause>, public OMPClauseWithPreInit, private llvm::TrailingObjects<OMPFirstprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFirstprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFirstprivateClause>(llvm::omp::OMPC_firstprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPreInit(this) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFirstprivateClause(unsigned N) : OMPVarListClause<OMPFirstprivateClause>( llvm::omp::OMPC_firstprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPreInit(this) {} /// Sets the list of references to private copies with initializers for /// new private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for /// new private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new /// private variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new /// private variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. /// \param PrivateVL List of references to private copies with initializers. /// \param InitVL List of references to auto generated variables used for /// initialization of a single array element. Used if firstprivate variable is /// of array type. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. static OMPFirstprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PrivateVL, ArrayRef<Expr *> InitVL, Stmt *PreInit); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFirstprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPFirstprivateClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_firstprivate; } }; /// This represents clause 'lastprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd lastprivate(a,b) /// \endcode /// In this example directive '#pragma omp simd' has clause 'lastprivate' /// with the variables 'a' and 'b'. class OMPLastprivateClause final : public OMPVarListClause<OMPLastprivateClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLastprivateClause, Expr *> { // There are 4 additional tail-allocated arrays at the end of the class: // 1. Contains list of pseudo variables with the default initialization for // each non-firstprivate variables. Used in codegen for initialization of // lastprivate copies. // 2. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents private variables // (for arrays, single array element). // 3. List of helper expressions for proper generation of assignment operation // required for lastprivate clause. This list represents original variables // (for arrays, single array element). // 4. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of final assignment performed by the // lastprivate clause. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Optional lastprivate kind, e.g. 'conditional', if specified by user. OpenMPLastprivateModifier LPKind; /// Optional location of the lasptrivate kind, if specified by user. SourceLocation LPKindLoc; /// Optional colon location, if specified by user. SourceLocation ColonLoc; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPLastprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, unsigned N) : OMPVarListClause<OMPLastprivateClause>(llvm::omp::OMPC_lastprivate, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), LPKind(LPKind), LPKindLoc(LPKindLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPLastprivateClause(unsigned N) : OMPVarListClause<OMPLastprivateClause>( llvm::omp::OMPC_lastprivate, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Get the list of helper expressions for initialization of private /// copies for lastprivate variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent original variables (for arrays, single /// array element) in the final assignment statement performed by the /// lastprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign private copy of the variable to original variable. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } /// Sets lastprivate kind. void setKind(OpenMPLastprivateModifier Kind) { LPKind = Kind; } /// Sets location of the lastprivate kind. void setKindLoc(SourceLocation Loc) { LPKindLoc = Loc; } /// Sets colon symbol location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// private variables (for arrays, single array element). /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for lastprivate clause. This list represents /// original variables (for arrays, single array element). /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// lastprivate clause. /// \param LPKind Lastprivate kind, e.g. 'conditional'. /// \param LPKindLoc Location of the lastprivate kind. /// \param ColonLoc Location of the ':' symbol if lastprivate kind is used. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLastprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPLastprivateClause *CreateEmpty(const ASTContext &C, unsigned N); /// Lastprivate kind. OpenMPLastprivateModifier getKind() const { return LPKind; } /// Returns the location of the lastprivate kind. SourceLocation getKindLoc() const { return LPKindLoc; } /// Returns the location of the ':' symbol, if any. SourceLocation getColonLoc() const { return ColonLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; /// Set list of helper expressions, required for generation of private /// copies of original lastprivate variables. void setPrivateCopies(ArrayRef<Expr *> PrivateCopies); helper_expr_const_range private_copies() const { return helper_expr_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_range private_copies() { return helper_expr_range(getPrivateCopies().begin(), getPrivateCopies().end()); } helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLastprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_lastprivate; } }; /// This represents clause 'shared' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel shared(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'shared' /// with the variables 'a' and 'b'. class OMPSharedClause final : public OMPVarListClause<OMPSharedClause>, private llvm::TrailingObjects<OMPSharedClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPSharedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPSharedClause(unsigned N) : OMPVarListClause<OMPSharedClause>(llvm::omp::OMPC_shared, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPSharedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPSharedClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPSharedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_shared; } }; /// This represents clause 'reduction' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp parallel reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'reduction' /// with operator '+' and the variables 'a' and 'b'. class OMPReductionClause final : public OMPVarListClause<OMPReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Reduction modifier. OpenMPReductionClauseModifier Modifier = OMPC_REDUCTION_unknown; /// Reduction modifier location. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPReductionClause(unsigned N) : OMPVarListClause<OMPReductionClause>(llvm::omp::OMPC_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets reduction modifier. void setModifier(OpenMPReductionClauseModifier M) { Modifier = M; } /// Sets location of the modifier. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent private copy of the reduction /// variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent LHS expression in the final /// reduction expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent RHS expression in the final /// reduction expression performed by the reduction clause. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper copy operations for inscan reductions. /// The form is: Temps[i] = LHS[i]; void setInscanCopyOps(ArrayRef<Expr *> Ops); /// Get the list of helper inscan copy operations. MutableArrayRef<Expr *> getInscanCopyOps() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyOps() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } /// Set list of helper temp vars for inscan copy array operations. void setInscanCopyArrayTemps(ArrayRef<Expr *> CopyArrayTemps); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayTemps() { return MutableArrayRef<Expr *>(getInscanCopyOps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayTemps() const { return llvm::makeArrayRef(getInscanCopyOps().end(), varlist_size()); } /// Set list of helper temp elements vars for inscan copy array operations. void setInscanCopyArrayElems(ArrayRef<Expr *> CopyArrayElems); /// Get the list of helper inscan copy temps. MutableArrayRef<Expr *> getInscanCopyArrayElems() { return MutableArrayRef<Expr *>(getInscanCopyArrayTemps().end(), varlist_size()); } ArrayRef<const Expr *> getInscanCopyArrayElems() const { return llvm::makeArrayRef(getInscanCopyArrayTemps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param CopyOps List of copy operations for inscan reductions: /// \code /// TempExprs = LHSExprs; /// \endcode /// \param CopyArrayTemps Temp arrays for prefix sums. /// \param CopyArrayElems Temp arrays for prefix sums. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, OpenMPReductionClauseModifier Modifier, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> CopyOps, ArrayRef<Expr *> CopyArrayTemps, ArrayRef<Expr *> CopyArrayElems, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param Modifier Reduction modifier. static OMPReductionClause * CreateEmpty(const ASTContext &C, unsigned N, OpenMPReductionClauseModifier Modifier); /// Returns modifier. OpenMPReductionClauseModifier getModifier() const { return Modifier; } /// Returns modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range copy_ops() const { return helper_expr_const_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_range copy_ops() { return helper_expr_range(getInscanCopyOps().begin(), getInscanCopyOps().end()); } helper_expr_const_range copy_array_temps() const { return helper_expr_const_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_range copy_array_temps() { return helper_expr_range(getInscanCopyArrayTemps().begin(), getInscanCopyArrayTemps().end()); } helper_expr_const_range copy_array_elems() const { return helper_expr_const_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } helper_expr_range copy_array_elems() { return helper_expr_range(getInscanCopyArrayElems().begin(), getInscanCopyArrayElems().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range used_children() const { auto Children = const_cast<OMPReductionClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_reduction; } }; /// This represents clause 'task_reduction' in the '#pragma omp taskgroup' /// directives. /// /// \code /// #pragma omp taskgroup task_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp taskgroup' has clause /// 'task_reduction' with operator '+' and the variables 'a' and 'b'. class OMPTaskReductionClause final : public OMPVarListClause<OMPTaskReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPTaskReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPTaskReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPTaskReductionClause(unsigned N) : OMPVarListClause<OMPTaskReductionClause>( llvm::omp::OMPC_task_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPTaskReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPTaskReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPTaskReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_task_reduction; } }; /// This represents clause 'in_reduction' in the '#pragma omp task' directives. /// /// \code /// #pragma omp task in_reduction(+:a,b) /// \endcode /// In this example directive '#pragma omp task' has clause 'in_reduction' with /// operator '+' and the variables 'a' and 'b'. class OMPInReductionClause final : public OMPVarListClause<OMPInReductionClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPInReductionClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Nested name specifier for C++. NestedNameSpecifierLoc QualifierLoc; /// Name of custom operator. DeclarationNameInfo NameInfo; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param ColonLoc Location of ':'. /// \param N Number of the variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. OMPInReductionClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo) : OMPVarListClause<OMPInReductionClause>(llvm::omp::OMPC_in_reduction, StartLoc, LParenLoc, EndLoc, N), OMPClauseWithPostUpdate(this), ColonLoc(ColonLoc), QualifierLoc(QualifierLoc), NameInfo(NameInfo) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInReductionClause(unsigned N) : OMPVarListClause<OMPInReductionClause>( llvm::omp::OMPC_in_reduction, SourceLocation(), SourceLocation(), SourceLocation(), N), OMPClauseWithPostUpdate(this) {} /// Sets location of ':' symbol in clause. void setColonLoc(SourceLocation CL) { ColonLoc = CL; } /// Sets the name info for specified reduction identifier. void setNameInfo(DeclarationNameInfo DNI) { NameInfo = DNI; } /// Sets the nested name specifier. void setQualifierLoc(NestedNameSpecifierLoc NSL) { QualifierLoc = NSL; } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent private copy of the reduction variable. void setPrivates(ArrayRef<Expr *> Privates); /// Get the list of helper privates. MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent LHS expression in the final reduction /// expression performed by the reduction clause. void setLHSExprs(ArrayRef<Expr *> LHSExprs); /// Get the list of helper LHS expressions. MutableArrayRef<Expr *> getLHSExprs() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getLHSExprs() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the clause. /// These expressions represent RHS expression in the final reduction /// expression performed by the reduction clause. Also, variables in these /// expressions are used for proper initialization of reduction copies. void setRHSExprs(ArrayRef<Expr *> RHSExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getRHSExprs() { return MutableArrayRef<Expr *>(getLHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getRHSExprs() const { return llvm::makeArrayRef(getLHSExprs().end(), varlist_size()); } /// Set list of helper reduction expressions, required for proper /// codegen of the clause. These expressions are binary expressions or /// operator/custom reduction call that calculates new value from source /// helper expressions to destination helper expressions. void setReductionOps(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction expressions. MutableArrayRef<Expr *> getReductionOps() { return MutableArrayRef<Expr *>(getRHSExprs().end(), varlist_size()); } ArrayRef<const Expr *> getReductionOps() const { return llvm::makeArrayRef(getRHSExprs().end(), varlist_size()); } /// Set list of helper reduction taskgroup descriptors. void setTaskgroupDescriptors(ArrayRef<Expr *> ReductionOps); /// Get the list of helper reduction taskgroup descriptors. MutableArrayRef<Expr *> getTaskgroupDescriptors() { return MutableArrayRef<Expr *>(getReductionOps().end(), varlist_size()); } ArrayRef<const Expr *> getTaskgroupDescriptors() const { return llvm::makeArrayRef(getReductionOps().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL The variables in the clause. /// \param QualifierLoc The nested-name qualifier with location information /// \param NameInfo The full name info for reduction identifier. /// \param Privates List of helper expressions for proper generation of /// private copies. /// \param LHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// LHSs of the reduction expressions. /// \param RHSExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// RHSs of the reduction expressions. /// Also, variables in these expressions are used for proper initialization of /// reduction copies. /// \param ReductionOps List of helper expressions that represents reduction /// expressions: /// \code /// LHSExprs binop RHSExprs; /// operator binop(LHSExpr, RHSExpr); /// <CutomReduction>(LHSExpr, RHSExpr); /// \endcode /// Required for proper codegen of final reduction operation performed by the /// reduction clause. /// \param TaskgroupDescriptors List of helper taskgroup descriptors for /// corresponding items in parent taskgroup task_reduction clause. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPInReductionClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, NestedNameSpecifierLoc QualifierLoc, const DeclarationNameInfo &NameInfo, ArrayRef<Expr *> Privates, ArrayRef<Expr *> LHSExprs, ArrayRef<Expr *> RHSExprs, ArrayRef<Expr *> ReductionOps, ArrayRef<Expr *> TaskgroupDescriptors, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInReductionClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets location of ':' symbol in clause. SourceLocation getColonLoc() const { return ColonLoc; } /// Gets the name info for specified reduction identifier. const DeclarationNameInfo &getNameInfo() const { return NameInfo; } /// Gets the nested name specifier. NestedNameSpecifierLoc getQualifierLoc() const { return QualifierLoc; } using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range privates() const { return helper_expr_const_range(getPrivates().begin(), getPrivates().end()); } helper_expr_range privates() { return helper_expr_range(getPrivates().begin(), getPrivates().end()); } helper_expr_const_range lhs_exprs() const { return helper_expr_const_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_range lhs_exprs() { return helper_expr_range(getLHSExprs().begin(), getLHSExprs().end()); } helper_expr_const_range rhs_exprs() const { return helper_expr_const_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_range rhs_exprs() { return helper_expr_range(getRHSExprs().begin(), getRHSExprs().end()); } helper_expr_const_range reduction_ops() const { return helper_expr_const_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_range reduction_ops() { return helper_expr_range(getReductionOps().begin(), getReductionOps().end()); } helper_expr_const_range taskgroup_descriptors() const { return helper_expr_const_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } helper_expr_range taskgroup_descriptors() { return helper_expr_range(getTaskgroupDescriptors().begin(), getTaskgroupDescriptors().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInReductionClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_in_reduction; } }; /// This represents clause 'linear' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd linear(a,b : 2) /// \endcode /// In this example directive '#pragma omp simd' has clause 'linear' /// with variables 'a', 'b' and linear step '2'. class OMPLinearClause final : public OMPVarListClause<OMPLinearClause>, public OMPClauseWithPostUpdate, private llvm::TrailingObjects<OMPLinearClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Modifier of 'linear' clause. OpenMPLinearClauseKind Modifier = OMPC_LINEAR_val; /// Location of linear modifier if any. SourceLocation ModifierLoc; /// Location of ':'. SourceLocation ColonLoc; /// Sets the linear step for clause. void setStep(Expr *Step) { *(getFinals().end()) = Step; } /// Sets the expression to calculate linear step for clause. void setCalcStep(Expr *CalcStep) { *(getFinals().end() + 1) = CalcStep; } /// Build 'linear' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPLinearClause(SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, StartLoc, LParenLoc, EndLoc, NumVars), OMPClauseWithPostUpdate(this), Modifier(Modifier), ModifierLoc(ModifierLoc), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPLinearClause(unsigned NumVars) : OMPVarListClause<OMPLinearClause>(llvm::omp::OMPC_linear, SourceLocation(), SourceLocation(), SourceLocation(), NumVars), OMPClauseWithPostUpdate(this) {} /// Gets the list of initial values for linear variables. /// /// There are NumVars expressions with initial values allocated after the /// varlist, they are followed by NumVars update expressions (used to update /// the linear variable's value on current iteration) and they are followed by /// NumVars final expressions (used to calculate the linear variable's /// value after the loop body). After these lists, there are 2 helper /// expressions - linear step and a helper to calculate it before the /// loop body (used when the linear step is not constant): /// /// { Vars[] /* in OMPVarListClause */; Privates[]; Inits[]; Updates[]; /// Finals[]; Step; CalcStep; } MutableArrayRef<Expr *> getPrivates() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivates() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivates().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivates().end(), varlist_size()); } /// Sets the list of update expressions for linear variables. MutableArrayRef<Expr *> getUpdates() { return MutableArrayRef<Expr *>(getInits().end(), varlist_size()); } ArrayRef<const Expr *> getUpdates() const { return llvm::makeArrayRef(getInits().end(), varlist_size()); } /// Sets the list of final update expressions for linear variables. MutableArrayRef<Expr *> getFinals() { return MutableArrayRef<Expr *>(getUpdates().end(), varlist_size()); } ArrayRef<const Expr *> getFinals() const { return llvm::makeArrayRef(getUpdates().end(), varlist_size()); } /// Gets the list of used expressions for linear variables. MutableArrayRef<Expr *> getUsedExprs() { return MutableArrayRef<Expr *>(getFinals().end() + 2, varlist_size() + 1); } ArrayRef<const Expr *> getUsedExprs() const { return llvm::makeArrayRef(getFinals().end() + 2, varlist_size() + 1); } /// Sets the list of the copies of original linear variables. /// \param PL List of expressions. void setPrivates(ArrayRef<Expr *> PL); /// Sets the list of the initial values for linear variables. /// \param IL List of expressions. void setInits(ArrayRef<Expr *> IL); public: /// Creates clause with a list of variables \a VL and a linear step /// \a Step. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param Modifier Modifier of 'linear' clause. /// \param ModifierLoc Modifier location. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param PL List of private copies of original variables. /// \param IL List of initial values for the variables. /// \param Step Linear step. /// \param CalcStep Calculation of the linear step. /// \param PreInit Statement that must be executed before entering the OpenMP /// region with this clause. /// \param PostUpdate Expression that must be executed after exit from the /// OpenMP region with this clause. static OMPLinearClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind Modifier, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> PL, ArrayRef<Expr *> IL, Expr *Step, Expr *CalcStep, Stmt *PreInit, Expr *PostUpdate); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPLinearClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Set modifier. void setModifier(OpenMPLinearClauseKind Kind) { Modifier = Kind; } /// Return modifier. OpenMPLinearClauseKind getModifier() const { return Modifier; } /// Set modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Return modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns linear step. Expr *getStep() { return *(getFinals().end()); } /// Returns linear step. const Expr *getStep() const { return *(getFinals().end()); } /// Returns expression to calculate linear step. Expr *getCalcStep() { return *(getFinals().end() + 1); } /// Returns expression to calculate linear step. const Expr *getCalcStep() const { return *(getFinals().end() + 1); } /// Sets the list of update expressions for linear variables. /// \param UL List of expressions. void setUpdates(ArrayRef<Expr *> UL); /// Sets the list of final update expressions for linear variables. /// \param FL List of expressions. void setFinals(ArrayRef<Expr *> FL); /// Sets the list of used expressions for the linear clause. void setUsedExprs(ArrayRef<Expr *> UE); using privates_iterator = MutableArrayRef<Expr *>::iterator; using privates_const_iterator = ArrayRef<const Expr *>::iterator; using privates_range = llvm::iterator_range<privates_iterator>; using privates_const_range = llvm::iterator_range<privates_const_iterator>; privates_range privates() { return privates_range(getPrivates().begin(), getPrivates().end()); } privates_const_range privates() const { return privates_const_range(getPrivates().begin(), getPrivates().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } using updates_iterator = MutableArrayRef<Expr *>::iterator; using updates_const_iterator = ArrayRef<const Expr *>::iterator; using updates_range = llvm::iterator_range<updates_iterator>; using updates_const_range = llvm::iterator_range<updates_const_iterator>; updates_range updates() { return updates_range(getUpdates().begin(), getUpdates().end()); } updates_const_range updates() const { return updates_const_range(getUpdates().begin(), getUpdates().end()); } using finals_iterator = MutableArrayRef<Expr *>::iterator; using finals_const_iterator = ArrayRef<const Expr *>::iterator; using finals_range = llvm::iterator_range<finals_iterator>; using finals_const_range = llvm::iterator_range<finals_const_iterator>; finals_range finals() { return finals_range(getFinals().begin(), getFinals().end()); } finals_const_range finals() const { return finals_const_range(getFinals().begin(), getFinals().end()); } using used_expressions_iterator = MutableArrayRef<Expr *>::iterator; using used_expressions_const_iterator = ArrayRef<const Expr *>::iterator; using used_expressions_range = llvm::iterator_range<used_expressions_iterator>; using used_expressions_const_range = llvm::iterator_range<used_expressions_const_iterator>; used_expressions_range used_expressions() { return finals_range(getUsedExprs().begin(), getUsedExprs().end()); } used_expressions_const_range used_expressions() const { return finals_const_range(getUsedExprs().begin(), getUsedExprs().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPLinearClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPLinearClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_linear; } }; /// This represents clause 'aligned' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp simd aligned(a,b : 8) /// \endcode /// In this example directive '#pragma omp simd' has clause 'aligned' /// with variables 'a', 'b' and alignment '8'. class OMPAlignedClause final : public OMPVarListClause<OMPAlignedClause>, private llvm::TrailingObjects<OMPAlignedClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':'. SourceLocation ColonLoc; /// Sets the alignment for clause. void setAlignment(Expr *A) { *varlist_end() = A; } /// Build 'aligned' clause with given number of variables \a NumVars. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param NumVars Number of variables. OMPAlignedClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, StartLoc, LParenLoc, EndLoc, NumVars), ColonLoc(ColonLoc) {} /// Build an empty clause. /// /// \param NumVars Number of variables. explicit OMPAlignedClause(unsigned NumVars) : OMPVarListClause<OMPAlignedClause>(llvm::omp::OMPC_aligned, SourceLocation(), SourceLocation(), SourceLocation(), NumVars) {} public: /// Creates clause with a list of variables \a VL and alignment \a A. /// /// \param C AST Context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param A Alignment. static OMPAlignedClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, Expr *A); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param NumVars Number of variables. static OMPAlignedClause *CreateEmpty(const ASTContext &C, unsigned NumVars); /// Sets the location of ':'. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Returns the location of ':'. SourceLocation getColonLoc() const { return ColonLoc; } /// Returns alignment. Expr *getAlignment() { return *varlist_end(); } /// Returns alignment. const Expr *getAlignment() const { return *varlist_end(); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPAlignedClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_aligned; } }; /// This represents clause 'copyin' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp parallel copyin(a,b) /// \endcode /// In this example directive '#pragma omp parallel' has clause 'copyin' /// with the variables 'a' and 'b'. class OMPCopyinClause final : public OMPVarListClause<OMPCopyinClause>, private llvm::TrailingObjects<OMPCopyinClause, Expr *> { // Class has 3 additional tail allocated arrays: // 1. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents sources. // 2. List of helper expressions for proper generation of assignment operation // required for copyin clause. This list represents destinations. // 3. List of helper expressions that represents assignment operation: // \code // DstExprs = SrcExprs; // \endcode // Required for proper codegen of propagation of master's thread values of // threadprivate variables to local instances of that variables in other // implicit threads. friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyinClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyinClause(unsigned N) : OMPVarListClause<OMPCopyinClause>(llvm::omp::OMPC_copyin, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyin clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyin clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyin clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of propagation of master's thread values of /// threadprivate variables to local instances of that variables in other /// implicit threads. static OMPCopyinClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyinClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyinClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyin; } }; /// This represents clause 'copyprivate' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp single copyprivate(a,b) /// \endcode /// In this example directive '#pragma omp single' has clause 'copyprivate' /// with the variables 'a' and 'b'. class OMPCopyprivateClause final : public OMPVarListClause<OMPCopyprivateClause>, private llvm::TrailingObjects<OMPCopyprivateClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPCopyprivateClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPCopyprivateClause>(llvm::omp::OMPC_copyprivate, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPCopyprivateClause(unsigned N) : OMPVarListClause<OMPCopyprivateClause>( llvm::omp::OMPC_copyprivate, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent source expression in the final /// assignment statement performed by the copyprivate clause. void setSourceExprs(ArrayRef<Expr *> SrcExprs); /// Get the list of helper source expressions. MutableArrayRef<Expr *> getSourceExprs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getSourceExprs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Set list of helper expressions, required for proper codegen of the /// clause. These expressions represent destination expression in the final /// assignment statement performed by the copyprivate clause. void setDestinationExprs(ArrayRef<Expr *> DstExprs); /// Get the list of helper destination expressions. MutableArrayRef<Expr *> getDestinationExprs() { return MutableArrayRef<Expr *>(getSourceExprs().end(), varlist_size()); } ArrayRef<const Expr *> getDestinationExprs() const { return llvm::makeArrayRef(getSourceExprs().end(), varlist_size()); } /// Set list of helper assignment expressions, required for proper /// codegen of the clause. These expressions are assignment expressions that /// assign source helper expressions to destination helper expressions /// correspondingly. void setAssignmentOps(ArrayRef<Expr *> AssignmentOps); /// Get the list of helper assignment expressions. MutableArrayRef<Expr *> getAssignmentOps() { return MutableArrayRef<Expr *>(getDestinationExprs().end(), varlist_size()); } ArrayRef<const Expr *> getAssignmentOps() const { return llvm::makeArrayRef(getDestinationExprs().end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. /// \param SrcExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// sources. /// \param DstExprs List of helper expressions for proper generation of /// assignment operation required for copyprivate clause. This list represents /// destinations. /// \param AssignmentOps List of helper expressions that represents assignment /// operation: /// \code /// DstExprs = SrcExprs; /// \endcode /// Required for proper codegen of final assignment performed by the /// copyprivate clause. static OMPCopyprivateClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL, ArrayRef<Expr *> SrcExprs, ArrayRef<Expr *> DstExprs, ArrayRef<Expr *> AssignmentOps); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPCopyprivateClause *CreateEmpty(const ASTContext &C, unsigned N); using helper_expr_iterator = MutableArrayRef<Expr *>::iterator; using helper_expr_const_iterator = ArrayRef<const Expr *>::iterator; using helper_expr_range = llvm::iterator_range<helper_expr_iterator>; using helper_expr_const_range = llvm::iterator_range<helper_expr_const_iterator>; helper_expr_const_range source_exprs() const { return helper_expr_const_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_range source_exprs() { return helper_expr_range(getSourceExprs().begin(), getSourceExprs().end()); } helper_expr_const_range destination_exprs() const { return helper_expr_const_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_range destination_exprs() { return helper_expr_range(getDestinationExprs().begin(), getDestinationExprs().end()); } helper_expr_const_range assignment_ops() const { return helper_expr_const_range(getAssignmentOps().begin(), getAssignmentOps().end()); } helper_expr_range assignment_ops() { return helper_expr_range(getAssignmentOps().begin(), getAssignmentOps().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPCopyprivateClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_copyprivate; } }; /// This represents implicit clause 'flush' for the '#pragma omp flush' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// flush' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp flush(a,b) /// \endcode /// In this example directive '#pragma omp flush' has implicit clause 'flush' /// with the variables 'a' and 'b'. class OMPFlushClause final : public OMPVarListClause<OMPFlushClause>, private llvm::TrailingObjects<OMPFlushClause, Expr *> { friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPFlushClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPFlushClause(unsigned N) : OMPVarListClause<OMPFlushClause>(llvm::omp::OMPC_flush, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPFlushClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPFlushClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFlushClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_flush; } }; /// This represents implicit clause 'depobj' for the '#pragma omp depobj' /// directive. /// This clause does not exist by itself, it can be only as a part of 'omp /// depobj' directive. This clause is introduced to keep the original structure /// of \a OMPExecutableDirective class and its derivatives and to use the /// existing infrastructure of clauses with the list of variables. /// /// \code /// #pragma omp depobj(a) destroy /// \endcode /// In this example directive '#pragma omp depobj' has implicit clause 'depobj' /// with the depobj 'a'. class OMPDepobjClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Chunk size. Expr *Depobj = nullptr; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDepobjClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_depobj, StartLoc, EndLoc), LParenLoc(LParenLoc) {} /// Build an empty clause. /// explicit OMPDepobjClause() : OMPClause(llvm::omp::OMPC_depobj, SourceLocation(), SourceLocation()) {} void setDepobj(Expr *E) { Depobj = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Creates clause. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Depobj depobj expression associated with the 'depobj' directive. static OMPDepobjClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *Depobj); /// Creates an empty clause. /// /// \param C AST context. static OMPDepobjClause *CreateEmpty(const ASTContext &C); /// Returns depobj expression associated with the clause. Expr *getDepobj() { return Depobj; } const Expr *getDepobj() const { return Depobj; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&Depobj), reinterpret_cast<Stmt **>(&Depobj) + 1); } const_child_range children() const { auto Children = const_cast<OMPDepobjClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depobj; } }; /// This represents implicit clause 'depend' for the '#pragma omp task' /// directive. /// /// \code /// #pragma omp task depend(in:a,b) /// \endcode /// In this example directive '#pragma omp task' with clause 'depend' with the /// variables 'a' and 'b' with dependency 'in'. class OMPDependClause final : public OMPVarListClause<OMPDependClause>, private llvm::TrailingObjects<OMPDependClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Dependency type (one of in, out, inout). OpenMPDependClauseKind DepKind = OMPC_DEPEND_unknown; /// Dependency type location. SourceLocation DepLoc; /// Colon location. SourceLocation ColonLoc; /// Number of loops, associated with the depend clause. unsigned NumLoops = 0; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. /// \param NumLoops Number of loops that is associated with this depend /// clause. OMPDependClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, StartLoc, LParenLoc, EndLoc, N), NumLoops(NumLoops) {} /// Build an empty clause. /// /// \param N Number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. explicit OMPDependClause(unsigned N, unsigned NumLoops) : OMPVarListClause<OMPDependClause>(llvm::omp::OMPC_depend, SourceLocation(), SourceLocation(), SourceLocation(), N), NumLoops(NumLoops) {} /// Set dependency kind. void setDependencyKind(OpenMPDependClauseKind K) { DepKind = K; } /// Set dependency kind and its location. void setDependencyLoc(SourceLocation Loc) { DepLoc = Loc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Sets optional dependency modifier. void setModifier(Expr *DepModifier); public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param DepKind Dependency type. /// \param DepLoc Location of the dependency type. /// \param ColonLoc Colon location. /// \param VL List of references to the variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VL, unsigned NumLoops); /// Creates an empty clause with \a N variables. /// /// \param C AST context. /// \param N The number of variables. /// \param NumLoops Number of loops that is associated with this depend /// clause. static OMPDependClause *CreateEmpty(const ASTContext &C, unsigned N, unsigned NumLoops); /// Get dependency type. OpenMPDependClauseKind getDependencyKind() const { return DepKind; } /// Return optional depend modifier. Expr *getModifier(); const Expr *getModifier() const { return const_cast<OMPDependClause *>(this)->getModifier(); } /// Get dependency type location. SourceLocation getDependencyLoc() const { return DepLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } /// Get number of loops associated with the clause. unsigned getNumLoops() const { return NumLoops; } /// Set the loop data for the depend clauses with 'sink|source' kind of /// dependency. void setLoopData(unsigned NumLoop, Expr *Cnt); /// Get the loop data. Expr *getLoopData(unsigned NumLoop); const Expr *getLoopData(unsigned NumLoop) const; child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPDependClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_depend; } }; /// This represents 'device' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp target device(a) /// \endcode /// In this example directive '#pragma omp target' has clause 'device' /// with single expression 'a'. class OMPDeviceClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Device clause modifier. OpenMPDeviceClauseModifier Modifier = OMPC_DEVICE_unknown; /// Location of the modifier. SourceLocation ModifierLoc; /// Device number. Stmt *Device = nullptr; /// Set the device number. /// /// \param E Device number. void setDevice(Expr *E) { Device = E; } /// Sets modifier. void setModifier(OpenMPDeviceClauseModifier M) { Modifier = M; } /// Setst modifier location. void setModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } public: /// Build 'device' clause. /// /// \param Modifier Clause modifier. /// \param E Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param ModifierLoc Modifier location. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_device, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Modifier(Modifier), ModifierLoc(ModifierLoc), Device(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPDeviceClause() : OMPClause(llvm::omp::OMPC_device, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return device number. Expr *getDevice() { return cast<Expr>(Device); } /// Return device number. Expr *getDevice() const { return cast<Expr>(Device); } /// Gets modifier. OpenMPDeviceClauseModifier getModifier() const { return Modifier; } /// Gets modifier location. SourceLocation getModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(&Device, &Device + 1); } const_child_range children() const { return const_child_range(&Device, &Device + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_device; } }; /// This represents 'threads' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered threads /// \endcode /// In this example directive '#pragma omp ordered' has simple 'threads' clause. class OMPThreadsClause : public OMPClause { public: /// Build 'threads' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_threads, StartLoc, EndLoc) {} /// Build an empty clause. OMPThreadsClause() : OMPClause(llvm::omp::OMPC_threads, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_threads; } }; /// This represents 'simd' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp ordered simd /// \endcode /// In this example directive '#pragma omp ordered' has simple 'simd' clause. class OMPSIMDClause : public OMPClause { public: /// Build 'simd' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_simd, StartLoc, EndLoc) {} /// Build an empty clause. OMPSIMDClause() : OMPClause(llvm::omp::OMPC_simd, SourceLocation(), SourceLocation()) {} child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_simd; } }; /// Struct that defines common infrastructure to handle mappable /// expressions used in OpenMP clauses. class OMPClauseMappableExprCommon { public: /// Class that represents a component of a mappable expression. E.g. /// for an expression S.a, the first component is a declaration reference /// expression associated with 'S' and the second is a member expression /// associated with the field declaration 'a'. If the expression is an array /// subscript it may not have any associated declaration. In that case the /// associated declaration is set to nullptr. class MappableComponent { /// Pair of Expression and Non-contiguous pair associated with the /// component. llvm::PointerIntPair<Expr *, 1, bool> AssociatedExpressionNonContiguousPr; /// Declaration associated with the declaration. If the component does /// not have a declaration (e.g. array subscripts or section), this is set /// to nullptr. ValueDecl *AssociatedDeclaration = nullptr; public: explicit MappableComponent() = default; explicit MappableComponent(Expr *AssociatedExpression, ValueDecl *AssociatedDeclaration, bool IsNonContiguous) : AssociatedExpressionNonContiguousPr(AssociatedExpression, IsNonContiguous), AssociatedDeclaration( AssociatedDeclaration ? cast<ValueDecl>(AssociatedDeclaration->getCanonicalDecl()) : nullptr) {} Expr *getAssociatedExpression() const { return AssociatedExpressionNonContiguousPr.getPointer(); } bool isNonContiguous() const { return AssociatedExpressionNonContiguousPr.getInt(); } ValueDecl *getAssociatedDeclaration() const { return AssociatedDeclaration; } }; // List of components of an expression. This first one is the whole // expression and the last one is the base expression. using MappableExprComponentList = SmallVector<MappableComponent, 8>; using MappableExprComponentListRef = ArrayRef<MappableComponent>; // List of all component lists associated to the same base declaration. // E.g. if both 'S.a' and 'S.b' are a mappable expressions, each will have // their component list but the same base declaration 'S'. using MappableExprComponentLists = SmallVector<MappableExprComponentList, 8>; using MappableExprComponentListsRef = ArrayRef<MappableExprComponentList>; protected: // Return the total number of elements in a list of component lists. static unsigned getComponentsTotalNumber(MappableExprComponentListsRef ComponentLists); // Return the total number of elements in a list of declarations. All // declarations are expected to be canonical. static unsigned getUniqueDeclarationsTotalNumber(ArrayRef<const ValueDecl *> Declarations); }; /// This structure contains all sizes needed for by an /// OMPMappableExprListClause. struct OMPMappableExprListSizeTy { /// Number of expressions listed. unsigned NumVars; /// Number of unique base declarations. unsigned NumUniqueDeclarations; /// Number of component lists. unsigned NumComponentLists; /// Total number of expression components. unsigned NumComponents; OMPMappableExprListSizeTy() = default; OMPMappableExprListSizeTy(unsigned NumVars, unsigned NumUniqueDeclarations, unsigned NumComponentLists, unsigned NumComponents) : NumVars(NumVars), NumUniqueDeclarations(NumUniqueDeclarations), NumComponentLists(NumComponentLists), NumComponents(NumComponents) {} }; /// This represents clauses with a list of expressions that are mappable. /// Examples of these clauses are 'map' in /// '#pragma omp target [enter|exit] [data]...' directives, and 'to' and 'from /// in '#pragma omp target update...' directives. template <class T> class OMPMappableExprListClause : public OMPVarListClause<T>, public OMPClauseMappableExprCommon { friend class OMPClauseReader; /// Number of unique declarations in this clause. unsigned NumUniqueDeclarations; /// Number of component lists in this clause. unsigned NumComponentLists; /// Total number of components in this clause. unsigned NumComponents; /// Whether this clause is possible to have user-defined mappers associated. /// It should be true for map, to, and from clauses, and false for /// use_device_ptr and is_device_ptr. const bool SupportsMapper; /// C++ nested name specifier for the associated user-defined mapper. NestedNameSpecifierLoc MapperQualifierLoc; /// The associated user-defined mapper identifier information. DeclarationNameInfo MapperIdInfo; protected: /// Build a clause for \a NumUniqueDeclarations declarations, \a /// NumComponentLists total component lists, and \a NumComponents total /// components. /// /// \param K Kind of the clause. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. /// \param SupportsMapper Indicates whether this clause is possible to have /// user-defined mappers associated. /// \param MapperQualifierLocPtr C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfoPtr The identifier of associated user-defined mapper. OMPMappableExprListClause( OpenMPClauseKind K, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes, bool SupportsMapper = false, NestedNameSpecifierLoc *MapperQualifierLocPtr = nullptr, DeclarationNameInfo *MapperIdInfoPtr = nullptr) : OMPVarListClause<T>(K, Locs.StartLoc, Locs.LParenLoc, Locs.EndLoc, Sizes.NumVars), NumUniqueDeclarations(Sizes.NumUniqueDeclarations), NumComponentLists(Sizes.NumComponentLists), NumComponents(Sizes.NumComponents), SupportsMapper(SupportsMapper) { if (MapperQualifierLocPtr) MapperQualifierLoc = *MapperQualifierLocPtr; if (MapperIdInfoPtr) MapperIdInfo = *MapperIdInfoPtr; } /// Get the unique declarations that are in the trailing objects of the /// class. MutableArrayRef<ValueDecl *> getUniqueDeclsRef() { return MutableArrayRef<ValueDecl *>( static_cast<T *>(this)->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Get the unique declarations that are in the trailing objects of the /// class. ArrayRef<ValueDecl *> getUniqueDeclsRef() const { return ArrayRef<ValueDecl *>( static_cast<const T *>(this) ->template getTrailingObjects<ValueDecl *>(), NumUniqueDeclarations); } /// Set the unique declarations that are in the trailing objects of the /// class. void setUniqueDecls(ArrayRef<ValueDecl *> UDs) { assert(UDs.size() == NumUniqueDeclarations && "Unexpected amount of unique declarations."); std::copy(UDs.begin(), UDs.end(), getUniqueDeclsRef().begin()); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. MutableArrayRef<unsigned> getDeclNumListsRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Get the number of lists per declaration that are in the trailing /// objects of the class. ArrayRef<unsigned> getDeclNumListsRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>(), NumUniqueDeclarations); } /// Set the number of lists per declaration that are in the trailing /// objects of the class. void setDeclNumLists(ArrayRef<unsigned> DNLs) { assert(DNLs.size() == NumUniqueDeclarations && "Unexpected amount of list numbers."); std::copy(DNLs.begin(), DNLs.end(), getDeclNumListsRef().begin()); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. MutableArrayRef<unsigned> getComponentListSizesRef() { return MutableArrayRef<unsigned>( static_cast<T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Get the cumulative component lists sizes that are in the trailing /// objects of the class. They are appended after the number of lists. ArrayRef<unsigned> getComponentListSizesRef() const { return ArrayRef<unsigned>( static_cast<const T *>(this)->template getTrailingObjects<unsigned>() + NumUniqueDeclarations, NumComponentLists); } /// Set the cumulative component lists sizes that are in the trailing /// objects of the class. void setComponentListSizes(ArrayRef<unsigned> CLSs) { assert(CLSs.size() == NumComponentLists && "Unexpected amount of component lists."); std::copy(CLSs.begin(), CLSs.end(), getComponentListSizesRef().begin()); } /// Get the components that are in the trailing objects of the class. MutableArrayRef<MappableComponent> getComponentsRef() { return MutableArrayRef<MappableComponent>( static_cast<T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Get the components that are in the trailing objects of the class. ArrayRef<MappableComponent> getComponentsRef() const { return ArrayRef<MappableComponent>( static_cast<const T *>(this) ->template getTrailingObjects<MappableComponent>(), NumComponents); } /// Set the components that are in the trailing objects of the class. /// This requires the list sizes so that it can also fill the original /// expressions, which are the first component of each list. void setComponents(ArrayRef<MappableComponent> Components, ArrayRef<unsigned> CLSs) { assert(Components.size() == NumComponents && "Unexpected amount of component lists."); assert(CLSs.size() == NumComponentLists && "Unexpected amount of list sizes."); std::copy(Components.begin(), Components.end(), getComponentsRef().begin()); } /// Fill the clause information from the list of declarations and /// associated component lists. void setClauseInfo(ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists) { // Perform some checks to make sure the data sizes are consistent with the // information available when the clause was created. assert(getUniqueDeclarationsTotalNumber(Declarations) == NumUniqueDeclarations && "Unexpected number of mappable expression info entries!"); assert(getComponentsTotalNumber(ComponentLists) == NumComponents && "Unexpected total number of components!"); assert(Declarations.size() == ComponentLists.size() && "Declaration and component lists size is not consistent!"); assert(Declarations.size() == NumComponentLists && "Unexpected declaration and component lists size!"); // Organize the components by declaration and retrieve the original // expression. Original expressions are always the first component of the // mappable component list. llvm::MapVector<ValueDecl *, SmallVector<MappableExprComponentListRef, 8>> ComponentListMap; { auto CI = ComponentLists.begin(); for (auto DI = Declarations.begin(), DE = Declarations.end(); DI != DE; ++DI, ++CI) { assert(!CI->empty() && "Invalid component list!"); ComponentListMap[*DI].push_back(*CI); } } // Iterators of the target storage. auto UniqueDeclarations = getUniqueDeclsRef(); auto UDI = UniqueDeclarations.begin(); auto DeclNumLists = getDeclNumListsRef(); auto DNLI = DeclNumLists.begin(); auto ComponentListSizes = getComponentListSizesRef(); auto CLSI = ComponentListSizes.begin(); auto Components = getComponentsRef(); auto CI = Components.begin(); // Variable to compute the accumulation of the number of components. unsigned PrevSize = 0u; // Scan all the declarations and associated component lists. for (auto &M : ComponentListMap) { // The declaration. auto *D = M.first; // The component lists. auto CL = M.second; // Initialize the entry. *UDI = D; ++UDI; *DNLI = CL.size(); ++DNLI; // Obtain the cumulative sizes and concatenate all the components in the // reserved storage. for (auto C : CL) { // Accumulate with the previous size. PrevSize += C.size(); // Save the size. *CLSI = PrevSize; ++CLSI; // Append components after the current components iterator. CI = std::copy(C.begin(), C.end(), CI); } } } /// Set the nested name specifier of associated user-defined mapper. void setMapperQualifierLoc(NestedNameSpecifierLoc NNSL) { MapperQualifierLoc = NNSL; } /// Set the name of associated user-defined mapper. void setMapperIdInfo(DeclarationNameInfo MapperId) { MapperIdInfo = MapperId; } /// Get the user-defined mapper references that are in the trailing objects of /// the class. MutableArrayRef<Expr *> getUDMapperRefs() { assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); return llvm::makeMutableArrayRef<Expr *>( static_cast<T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Get the user-defined mappers references that are in the trailing objects /// of the class. ArrayRef<Expr *> getUDMapperRefs() const { assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); return llvm::makeArrayRef<Expr *>( static_cast<const T *>(this)->template getTrailingObjects<Expr *>() + OMPVarListClause<T>::varlist_size(), OMPVarListClause<T>::varlist_size()); } /// Set the user-defined mappers that are in the trailing objects of the /// class. void setUDMapperRefs(ArrayRef<Expr *> DMDs) { assert(DMDs.size() == OMPVarListClause<T>::varlist_size() && "Unexpected number of user-defined mappers."); assert(SupportsMapper && "Must be a clause that is possible to have user-defined mappers"); std::copy(DMDs.begin(), DMDs.end(), getUDMapperRefs().begin()); } public: /// Return the number of unique base declarations in this clause. unsigned getUniqueDeclarationsNum() const { return NumUniqueDeclarations; } /// Return the number of lists derived from the clause expressions. unsigned getTotalComponentListNum() const { return NumComponentLists; } /// Return the total number of components in all lists derived from the /// clause. unsigned getTotalComponentsNum() const { return NumComponents; } /// Gets the nested name specifier for associated user-defined mapper. NestedNameSpecifierLoc getMapperQualifierLoc() const { return MapperQualifierLoc; } /// Gets the name info for associated user-defined mapper. const DeclarationNameInfo &getMapperIdInfo() const { return MapperIdInfo; } /// Iterator that browse the components by lists. It also allows /// browsing components of a single declaration. class const_component_lists_iterator : public llvm::iterator_adaptor_base< const_component_lists_iterator, MappableExprComponentListRef::const_iterator, std::forward_iterator_tag, MappableComponent, ptrdiff_t, MappableComponent, MappableComponent> { // The declaration the iterator currently refers to. ArrayRef<ValueDecl *>::iterator DeclCur; // The list number associated with the current declaration. ArrayRef<unsigned>::iterator NumListsCur; // Whether this clause is possible to have user-defined mappers associated. const bool SupportsMapper; // The user-defined mapper associated with the current declaration. ArrayRef<Expr *>::iterator MapperCur; // Remaining lists for the current declaration. unsigned RemainingLists = 0; // The cumulative size of the previous list, or zero if there is no previous // list. unsigned PrevListSize = 0; // The cumulative sizes of the current list - it will delimit the remaining // range of interest. ArrayRef<unsigned>::const_iterator ListSizeCur; ArrayRef<unsigned>::const_iterator ListSizeEnd; // Iterator to the end of the components storage. MappableExprComponentListRef::const_iterator End; public: /// Construct an iterator that scans all lists. explicit const_component_lists_iterator( ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components, bool SupportsMapper, ArrayRef<Expr *> Mappers) : const_component_lists_iterator::iterator_adaptor_base( Components.begin()), DeclCur(UniqueDecls.begin()), NumListsCur(DeclsListNum.begin()), SupportsMapper(SupportsMapper), ListSizeCur(CumulativeListSizes.begin()), ListSizeEnd(CumulativeListSizes.end()), End(Components.end()) { assert(UniqueDecls.size() == DeclsListNum.size() && "Inconsistent number of declarations and list sizes!"); if (!DeclsListNum.empty()) RemainingLists = *NumListsCur; if (SupportsMapper) MapperCur = Mappers.begin(); } /// Construct an iterator that scan lists for a given declaration \a /// Declaration. explicit const_component_lists_iterator( const ValueDecl *Declaration, ArrayRef<ValueDecl *> UniqueDecls, ArrayRef<unsigned> DeclsListNum, ArrayRef<unsigned> CumulativeListSizes, MappableExprComponentListRef Components, bool SupportsMapper, ArrayRef<Expr *> Mappers) : const_component_lists_iterator(UniqueDecls, DeclsListNum, CumulativeListSizes, Components, SupportsMapper, Mappers) { // Look for the desired declaration. While we are looking for it, we // update the state so that we know the component where a given list // starts. for (; DeclCur != UniqueDecls.end(); ++DeclCur, ++NumListsCur) { if (*DeclCur == Declaration) break; assert(*NumListsCur > 0 && "No lists associated with declaration??"); // Skip the lists associated with the current declaration, but save the // last list size that was skipped. std::advance(ListSizeCur, *NumListsCur - 1); PrevListSize = *ListSizeCur; ++ListSizeCur; if (SupportsMapper) ++MapperCur; } // If we didn't find any declaration, advance the iterator to after the // last component and set remaining lists to zero. if (ListSizeCur == CumulativeListSizes.end()) { this->I = End; RemainingLists = 0u; return; } // Set the remaining lists with the total number of lists of the current // declaration. RemainingLists = *NumListsCur; // Adjust the list size end iterator to the end of the relevant range. ListSizeEnd = ListSizeCur; std::advance(ListSizeEnd, RemainingLists); // Given that the list sizes are cumulative, the index of the component // that start the list is the size of the previous list. std::advance(this->I, PrevListSize); } // Return the array with the current list. The sizes are cumulative, so the // array size is the difference between the current size and previous one. std::tuple<const ValueDecl *, MappableExprComponentListRef, const ValueDecl *> operator*() const { assert(ListSizeCur != ListSizeEnd && "Invalid iterator!"); const ValueDecl *Mapper = nullptr; if (SupportsMapper && *MapperCur) Mapper = cast<ValueDecl>(cast<DeclRefExpr>(*MapperCur)->getDecl()); return std::make_tuple( *DeclCur, MappableExprComponentListRef(&*this->I, *ListSizeCur - PrevListSize), Mapper); } std::tuple<const ValueDecl *, MappableExprComponentListRef, const ValueDecl *> operator->() const { return **this; } // Skip the components of the current list. const_component_lists_iterator &operator++() { assert(ListSizeCur != ListSizeEnd && RemainingLists && "Invalid iterator!"); // If we don't have more lists just skip all the components. Otherwise, // advance the iterator by the number of components in the current list. if (std::next(ListSizeCur) == ListSizeEnd) { this->I = End; RemainingLists = 0; } else { std::advance(this->I, *ListSizeCur - PrevListSize); PrevListSize = *ListSizeCur; // We are done with a declaration, move to the next one. if (!(--RemainingLists)) { ++DeclCur; ++NumListsCur; if (SupportsMapper) ++MapperCur; RemainingLists = *NumListsCur; assert(RemainingLists && "No lists in the following declaration??"); } } ++ListSizeCur; return *this; } }; using const_component_lists_range = llvm::iterator_range<const_component_lists_iterator>; /// Iterators for all component lists. const_component_lists_iterator component_lists_begin() const { return const_component_lists_iterator( getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef(), SupportsMapper, SupportsMapper ? getUDMapperRefs() : llvm::None); } const_component_lists_iterator component_lists_end() const { return const_component_lists_iterator( ArrayRef<ValueDecl *>(), ArrayRef<unsigned>(), ArrayRef<unsigned>(), MappableExprComponentListRef(getComponentsRef().end(), getComponentsRef().end()), SupportsMapper, llvm::None); } const_component_lists_range component_lists() const { return {component_lists_begin(), component_lists_end()}; } /// Iterators for component lists associated with the provided /// declaration. const_component_lists_iterator decl_component_lists_begin(const ValueDecl *VD) const { return const_component_lists_iterator( VD, getUniqueDeclsRef(), getDeclNumListsRef(), getComponentListSizesRef(), getComponentsRef(), SupportsMapper, SupportsMapper ? getUDMapperRefs() : llvm::None); } const_component_lists_iterator decl_component_lists_end() const { return component_lists_end(); } const_component_lists_range decl_component_lists(const ValueDecl *VD) const { return {decl_component_lists_begin(VD), decl_component_lists_end()}; } /// Iterators to access all the declarations, number of lists, list sizes, and /// components. using const_all_decls_iterator = ArrayRef<ValueDecl *>::iterator; using const_all_decls_range = llvm::iterator_range<const_all_decls_iterator>; const_all_decls_range all_decls() const { auto A = getUniqueDeclsRef(); return const_all_decls_range(A.begin(), A.end()); } using const_all_num_lists_iterator = ArrayRef<unsigned>::iterator; using const_all_num_lists_range = llvm::iterator_range<const_all_num_lists_iterator>; const_all_num_lists_range all_num_lists() const { auto A = getDeclNumListsRef(); return const_all_num_lists_range(A.begin(), A.end()); } using const_all_lists_sizes_iterator = ArrayRef<unsigned>::iterator; using const_all_lists_sizes_range = llvm::iterator_range<const_all_lists_sizes_iterator>; const_all_lists_sizes_range all_lists_sizes() const { auto A = getComponentListSizesRef(); return const_all_lists_sizes_range(A.begin(), A.end()); } using const_all_components_iterator = ArrayRef<MappableComponent>::iterator; using const_all_components_range = llvm::iterator_range<const_all_components_iterator>; const_all_components_range all_components() const { auto A = getComponentsRef(); return const_all_components_range(A.begin(), A.end()); } using mapperlist_iterator = MutableArrayRef<Expr *>::iterator; using mapperlist_const_iterator = ArrayRef<const Expr *>::iterator; using mapperlist_range = llvm::iterator_range<mapperlist_iterator>; using mapperlist_const_range = llvm::iterator_range<mapperlist_const_iterator>; mapperlist_iterator mapperlist_begin() { return getUDMapperRefs().begin(); } mapperlist_iterator mapperlist_end() { return getUDMapperRefs().end(); } mapperlist_const_iterator mapperlist_begin() const { return getUDMapperRefs().begin(); } mapperlist_const_iterator mapperlist_end() const { return getUDMapperRefs().end(); } mapperlist_range mapperlists() { return mapperlist_range(mapperlist_begin(), mapperlist_end()); } mapperlist_const_range mapperlists() const { return mapperlist_const_range(mapperlist_begin(), mapperlist_end()); } }; /// This represents clause 'map' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target map(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause 'map' /// with the variables 'a' and 'b'. class OMPMapClause final : public OMPMappableExprListClause<OMPMapClause>, private llvm::TrailingObjects< OMPMapClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } private: /// Map-type-modifiers for the 'map' clause. OpenMPMapModifierKind MapTypeModifiers[NumberOfOMPMapClauseModifiers] = { OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown, OMPC_MAP_MODIFIER_unknown}; /// Location of map-type-modifiers for the 'map' clause. SourceLocation MapTypeModifiersLoc[NumberOfOMPMapClauseModifiers]; /// Map type for the 'map' clause. OpenMPMapClauseKind MapType = OMPC_MAP_unknown; /// Is this an implicit map type or not. bool MapTypeIsImplicit = false; /// Location of the map type. SourceLocation MapLoc; /// Colon location. SourceLocation ColonLoc; /// Build a clause for \a NumVars listed expressions, \a /// NumUniqueDeclarations declarations, \a NumComponentLists total component /// lists, and \a NumComponents total expression components. /// /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Locations of map-type-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param MapType Map type. /// \param MapTypeIsImplicit Map type is inferred implicitly. /// \param MapLoc Location of the map type. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, OpenMPMapClauseKind MapType, bool MapTypeIsImplicit, SourceLocation MapLoc, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo), MapType(MapType), MapTypeIsImplicit(MapTypeIsImplicit), MapLoc(MapLoc) { assert(llvm::array_lengthof(MapTypeModifiers) == MapModifiers.size() && "Unexpected number of map type modifiers."); llvm::copy(MapModifiers, std::begin(MapTypeModifiers)); assert(llvm::array_lengthof(MapTypeModifiersLoc) == MapModifiersLoc.size() && "Unexpected number of map type modifier locations."); llvm::copy(MapModifiersLoc, std::begin(MapTypeModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPMapClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_map, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set map-type-modifier for the clause. /// /// \param I index for map-type-modifier. /// \param T map-type-modifier for the clause. void setMapTypeModifier(unsigned I, OpenMPMapModifierKind T) { assert(I < NumberOfOMPMapClauseModifiers && "Unexpected index to store map type modifier, exceeds array size."); MapTypeModifiers[I] = T; } /// Set location for the map-type-modifier. /// /// \param I index for map-type-modifier location. /// \param TLoc map-type-modifier location. void setMapTypeModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMapClauseModifiers && "Index to store map type modifier location exceeds array size."); MapTypeModifiersLoc[I] = TLoc; } /// Set type for the clause. /// /// \param T Type for the clause. void setMapType(OpenMPMapClauseKind T) { MapType = T; } /// Set type location. /// /// \param TLoc Type location. void setMapLoc(SourceLocation TLoc) { MapLoc = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param MapModifiers Map-type-modifiers. /// \param MapModifiersLoc Location of map-type-modifiers. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. /// \param Type Map type. /// \param TypeIsImplicit Map type is inferred implicitly. /// \param TypeLoc Location of the map type. static OMPMapClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMapModifierKind> MapModifiers, ArrayRef<SourceLocation> MapModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId, OpenMPMapClauseKind Type, bool TypeIsImplicit, SourceLocation TypeLoc); /// Creates an empty clause with the place for \a NumVars original /// expressions, \a NumUniqueDeclarations declarations, \NumComponentLists /// lists, and \a NumComponents expression components. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPMapClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches mapping kind for the clause. OpenMPMapClauseKind getMapType() const LLVM_READONLY { return MapType; } /// Is this an implicit map type? /// We have to capture 'IsMapTypeImplicit' from the parser for more /// informative error messages. It helps distinguish map(r) from /// map(tofrom: r), which is important to print more helpful error /// messages for some target directives. bool isImplicitMapType() const LLVM_READONLY { return MapTypeIsImplicit; } /// Fetches the map-type-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for map-type-modifier. OpenMPMapModifierKind getMapTypeModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier exceeds the total number of modifiers."); return MapTypeModifiers[Cnt]; } /// Fetches the map-type-modifier location at 'Cnt' index of array of /// modifiers' locations. /// /// \param Cnt index for map-type-modifier location. SourceLocation getMapTypeModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMapClauseModifiers && "Requested modifier location exceeds total number of modifiers."); return MapTypeModifiersLoc[Cnt]; } /// Fetches ArrayRef of map-type-modifiers. ArrayRef<OpenMPMapModifierKind> getMapTypeModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiers); } /// Fetches ArrayRef of location of map-type-modifiers. ArrayRef<SourceLocation> getMapTypeModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MapTypeModifiersLoc); } /// Fetches location of clause mapping kind. SourceLocation getMapLoc() const LLVM_READONLY { return MapLoc; } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range( reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPMapClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { if (MapType == OMPC_MAP_to || MapType == OMPC_MAP_tofrom) return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { auto Children = const_cast<OMPMapClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_map; } }; /// This represents 'num_teams' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams num_teams(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'num_teams' /// with single expression 'n'. class OMPNumTeamsClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// NumTeams number. Stmt *NumTeams = nullptr; /// Set the NumTeams number. /// /// \param E NumTeams number. void setNumTeams(Expr *E) { NumTeams = E; } public: /// Build 'num_teams' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPNumTeamsClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_teams, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTeams(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPNumTeamsClause() : OMPClause(llvm::omp::OMPC_num_teams, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return NumTeams number. Expr *getNumTeams() { return cast<Expr>(NumTeams); } /// Return NumTeams number. Expr *getNumTeams() const { return cast<Expr>(NumTeams); } child_range children() { return child_range(&NumTeams, &NumTeams + 1); } const_child_range children() const { return const_child_range(&NumTeams, &NumTeams + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_teams; } }; /// This represents 'thread_limit' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp teams thread_limit(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'thread_limit' /// with single expression 'n'. class OMPThreadLimitClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// ThreadLimit number. Stmt *ThreadLimit = nullptr; /// Set the ThreadLimit number. /// /// \param E ThreadLimit number. void setThreadLimit(Expr *E) { ThreadLimit = E; } public: /// Build 'thread_limit' clause. /// /// \param E Expression associated with this clause. /// \param HelperE Helper Expression associated with this clause. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPThreadLimitClause(Expr *E, Stmt *HelperE, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_thread_limit, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), ThreadLimit(E) { setPreInitStmt(HelperE, CaptureRegion); } /// Build an empty clause. OMPThreadLimitClause() : OMPClause(llvm::omp::OMPC_thread_limit, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return ThreadLimit number. Expr *getThreadLimit() { return cast<Expr>(ThreadLimit); } /// Return ThreadLimit number. Expr *getThreadLimit() const { return cast<Expr>(ThreadLimit); } child_range children() { return child_range(&ThreadLimit, &ThreadLimit + 1); } const_child_range children() const { return const_child_range(&ThreadLimit, &ThreadLimit + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_thread_limit; } }; /// This represents 'priority' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp task priority(n) /// \endcode /// In this example directive '#pragma omp teams' has clause 'priority' with /// single expression 'n'. class OMPPriorityClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Priority number. Stmt *Priority = nullptr; /// Set the Priority number. /// /// \param E Priority number. void setPriority(Expr *E) { Priority = E; } public: /// Build 'priority' clause. /// /// \param Priority Expression associated with this clause. /// \param HelperPriority Helper priority for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPPriorityClause(Expr *Priority, Stmt *HelperPriority, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_priority, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Priority(Priority) { setPreInitStmt(HelperPriority, CaptureRegion); } /// Build an empty clause. OMPPriorityClause() : OMPClause(llvm::omp::OMPC_priority, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return Priority number. Expr *getPriority() { return cast<Expr>(Priority); } /// Return Priority number. Expr *getPriority() const { return cast<Expr>(Priority); } child_range children() { return child_range(&Priority, &Priority + 1); } const_child_range children() const { return const_child_range(&Priority, &Priority + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPPriorityClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_priority; } }; /// This represents 'grainsize' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop grainsize(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'grainsize' /// with single expression '4'. class OMPGrainsizeClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *Grainsize = nullptr; /// Set safelen. void setGrainsize(Expr *Size) { Grainsize = Size; } public: /// Build 'grainsize' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPGrainsizeClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_grainsize, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Grainsize(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPGrainsizeClause() : OMPClause(llvm::omp::OMPC_grainsize, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getGrainsize() const { return cast_or_null<Expr>(Grainsize); } child_range children() { return child_range(&Grainsize, &Grainsize + 1); } const_child_range children() const { return const_child_range(&Grainsize, &Grainsize + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPGrainsizeClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_grainsize; } }; /// This represents 'nogroup' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp taskloop nogroup /// \endcode /// In this example directive '#pragma omp taskloop' has 'nogroup' clause. class OMPNogroupClause : public OMPClause { public: /// Build 'nogroup' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_nogroup, StartLoc, EndLoc) {} /// Build an empty clause. OMPNogroupClause() : OMPClause(llvm::omp::OMPC_nogroup, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nogroup; } }; /// This represents 'num_tasks' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp taskloop num_tasks(4) /// \endcode /// In this example directive '#pragma omp taskloop' has clause 'num_tasks' /// with single expression '4'. class OMPNumTasksClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Safe iteration space distance. Stmt *NumTasks = nullptr; /// Set safelen. void setNumTasks(Expr *Size) { NumTasks = Size; } public: /// Build 'num_tasks' clause. /// /// \param Size Expression associated with this clause. /// \param HelperSize Helper grainsize for the construct. /// \param CaptureRegion Innermost OpenMP region where expressions in this /// clause must be captured. /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPNumTasksClause(Expr *Size, Stmt *HelperSize, OpenMPDirectiveKind CaptureRegion, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_num_tasks, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), NumTasks(Size) { setPreInitStmt(HelperSize, CaptureRegion); } /// Build an empty clause. explicit OMPNumTasksClause() : OMPClause(llvm::omp::OMPC_num_tasks, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Return safe iteration space distance. Expr *getNumTasks() const { return cast_or_null<Expr>(NumTasks); } child_range children() { return child_range(&NumTasks, &NumTasks + 1); } const_child_range children() const { return const_child_range(&NumTasks, &NumTasks + 1); } child_range used_children(); const_child_range used_children() const { auto Children = const_cast<OMPNumTasksClause *>(this)->used_children(); return const_child_range(Children.begin(), Children.end()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_num_tasks; } }; /// This represents 'hint' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp critical (name) hint(6) /// \endcode /// In this example directive '#pragma omp critical' has name 'name' and clause /// 'hint' with argument '6'. class OMPHintClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Hint expression of the 'hint' clause. Stmt *Hint = nullptr; /// Set hint expression. void setHint(Expr *H) { Hint = H; } public: /// Build 'hint' clause with expression \a Hint. /// /// \param Hint Hint expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_hint, StartLoc, EndLoc), LParenLoc(LParenLoc), Hint(Hint) {} /// Build an empty clause. OMPHintClause() : OMPClause(llvm::omp::OMPC_hint, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of threads. Expr *getHint() const { return cast_or_null<Expr>(Hint); } child_range children() { return child_range(&Hint, &Hint + 1); } const_child_range children() const { return const_child_range(&Hint, &Hint + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_hint; } }; /// This represents 'dist_schedule' clause in the '#pragma omp ...' /// directive. /// /// \code /// #pragma omp distribute dist_schedule(static, 3) /// \endcode /// In this example directive '#pragma omp distribute' has 'dist_schedule' /// clause with arguments 'static' and '3'. class OMPDistScheduleClause : public OMPClause, public OMPClauseWithPreInit { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'schedule' clause. OpenMPDistScheduleClauseKind Kind = OMPC_DIST_SCHEDULE_unknown; /// Start location of the schedule kind in source code. SourceLocation KindLoc; /// Location of ',' (if any). SourceLocation CommaLoc; /// Chunk size. Expr *ChunkSize = nullptr; /// Set schedule kind. /// /// \param K Schedule kind. void setDistScheduleKind(OpenMPDistScheduleClauseKind K) { Kind = K; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set schedule kind start location. /// /// \param KLoc Schedule kind location. void setDistScheduleKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } /// Set location of ','. /// /// \param Loc Location of ','. void setCommaLoc(SourceLocation Loc) { CommaLoc = Loc; } /// Set chunk size. /// /// \param E Chunk size. void setChunkSize(Expr *E) { ChunkSize = E; } public: /// Build 'dist_schedule' clause with schedule kind \a Kind and chunk /// size expression \a ChunkSize. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param CommaLoc Location of ','. /// \param EndLoc Ending location of the clause. /// \param Kind DistSchedule kind. /// \param ChunkSize Chunk size. /// \param HelperChunkSize Helper chunk size for combined directives. OMPDistScheduleClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KLoc, SourceLocation CommaLoc, SourceLocation EndLoc, OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, Stmt *HelperChunkSize) : OMPClause(llvm::omp::OMPC_dist_schedule, StartLoc, EndLoc), OMPClauseWithPreInit(this), LParenLoc(LParenLoc), Kind(Kind), KindLoc(KLoc), CommaLoc(CommaLoc), ChunkSize(ChunkSize) { setPreInitStmt(HelperChunkSize); } /// Build an empty clause. explicit OMPDistScheduleClause() : OMPClause(llvm::omp::OMPC_dist_schedule, SourceLocation(), SourceLocation()), OMPClauseWithPreInit(this) {} /// Get kind of the clause. OpenMPDistScheduleClauseKind getDistScheduleKind() const { return Kind; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDistScheduleKindLoc() { return KindLoc; } /// Get location of ','. SourceLocation getCommaLoc() { return CommaLoc; } /// Get chunk size. Expr *getChunkSize() { return ChunkSize; } /// Get chunk size. const Expr *getChunkSize() const { return ChunkSize; } child_range children() { return child_range(reinterpret_cast<Stmt **>(&ChunkSize), reinterpret_cast<Stmt **>(&ChunkSize) + 1); } const_child_range children() const { auto Children = const_cast<OMPDistScheduleClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_dist_schedule; } }; /// This represents 'defaultmap' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp target defaultmap(tofrom: scalar) /// \endcode /// In this example directive '#pragma omp target' has 'defaultmap' clause of kind /// 'scalar' with modifier 'tofrom'. class OMPDefaultmapClause : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Modifiers for 'defaultmap' clause. OpenMPDefaultmapClauseModifier Modifier = OMPC_DEFAULTMAP_MODIFIER_unknown; /// Locations of modifiers. SourceLocation ModifierLoc; /// A kind of the 'defaultmap' clause. OpenMPDefaultmapClauseKind Kind = OMPC_DEFAULTMAP_unknown; /// Start location of the defaultmap kind in source code. SourceLocation KindLoc; /// Set defaultmap kind. /// /// \param K Defaultmap kind. void setDefaultmapKind(OpenMPDefaultmapClauseKind K) { Kind = K; } /// Set the defaultmap modifier. /// /// \param M Defaultmap modifier. void setDefaultmapModifier(OpenMPDefaultmapClauseModifier M) { Modifier = M; } /// Set location of the defaultmap modifier. void setDefaultmapModifierLoc(SourceLocation Loc) { ModifierLoc = Loc; } /// Sets the location of '('. /// /// \param Loc Location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Set defaultmap kind start location. /// /// \param KLoc Defaultmap kind location. void setDefaultmapKindLoc(SourceLocation KLoc) { KindLoc = KLoc; } public: /// Build 'defaultmap' clause with defaultmap kind \a Kind /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param KLoc Starting location of the argument. /// \param EndLoc Ending location of the clause. /// \param Kind Defaultmap kind. /// \param M The modifier applied to 'defaultmap' clause. /// \param MLoc Location of the modifier OMPDefaultmapClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KLoc, SourceLocation EndLoc, OpenMPDefaultmapClauseKind Kind, OpenMPDefaultmapClauseModifier M) : OMPClause(llvm::omp::OMPC_defaultmap, StartLoc, EndLoc), LParenLoc(LParenLoc), Modifier(M), ModifierLoc(MLoc), Kind(Kind), KindLoc(KLoc) {} /// Build an empty clause. explicit OMPDefaultmapClause() : OMPClause(llvm::omp::OMPC_defaultmap, SourceLocation(), SourceLocation()) {} /// Get kind of the clause. OpenMPDefaultmapClauseKind getDefaultmapKind() const { return Kind; } /// Get the modifier of the clause. OpenMPDefaultmapClauseModifier getDefaultmapModifier() const { return Modifier; } /// Get location of '('. SourceLocation getLParenLoc() { return LParenLoc; } /// Get kind location. SourceLocation getDefaultmapKindLoc() { return KindLoc; } /// Get the modifier location. SourceLocation getDefaultmapModifierLoc() const { return ModifierLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_defaultmap; } }; /// This represents clause 'to' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update to(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'to' /// with the variables 'a' and 'b'. class OMPToClause final : public OMPMappableExprListClause<OMPToClause>, private llvm::TrailingObjects< OMPToClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Motion-modifiers for the 'to' clause. OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = { OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown}; /// Location of motion-modifiers for the 'to' clause. SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers]; /// Colon location. SourceLocation ColonLoc; /// Build clause with number of variables \a NumVars. /// /// \param TheMotionModifiers Motion-modifiers. /// \param TheMotionModifiersLoc Locations of motion-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers, ArrayRef<SourceLocation> TheMotionModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo) { assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() && "Unexpected number of motion modifiers."); llvm::copy(TheMotionModifiers, std::begin(MotionModifiers)); assert(llvm::array_lengthof(MotionModifiersLoc) == TheMotionModifiersLoc.size() && "Unexpected number of motion modifier locations."); llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPToClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_to, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set motion-modifier for the clause. /// /// \param I index for motion-modifier. /// \param T motion-modifier for the clause. void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) { assert(I < NumberOfOMPMotionModifiers && "Unexpected index to store motion modifier, exceeds array size."); MotionModifiers[I] = T; } /// Set location for the motion-modifier. /// /// \param I index for motion-modifier location. /// \param TLoc motion-modifier location. void setMotionModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMotionModifiers && "Index to store motion modifier location exceeds array size."); MotionModifiersLoc[I] = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param MotionModifiers Motion-modifiers. /// \param MotionModifiersLoc Location of motion-modifiers. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPToClause *Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPToClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches the motion-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for motion-modifier. OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier exceeds the total number of modifiers."); return MotionModifiers[Cnt]; } /// Fetches the motion-modifier location at 'Cnt' index of array of modifiers' /// locations. /// /// \param Cnt index for motion-modifier location. SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier location exceeds total number of modifiers."); return MotionModifiersLoc[Cnt]; } /// Fetches ArrayRef of motion-modifiers. ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiers); } /// Fetches ArrayRef of location of motion-modifiers. ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiersLoc); } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPToClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_to; } }; /// This represents clause 'from' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target update from(a,b) /// \endcode /// In this example directive '#pragma omp target update' has clause 'from' /// with the variables 'a' and 'b'. class OMPFromClause final : public OMPMappableExprListClause<OMPFromClause>, private llvm::TrailingObjects< OMPFromClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Motion-modifiers for the 'from' clause. OpenMPMotionModifierKind MotionModifiers[NumberOfOMPMotionModifiers] = { OMPC_MOTION_MODIFIER_unknown, OMPC_MOTION_MODIFIER_unknown}; /// Location of motion-modifiers for the 'from' clause. SourceLocation MotionModifiersLoc[NumberOfOMPMotionModifiers]; /// Colon location. SourceLocation ColonLoc; /// Build clause with number of variables \a NumVars. /// /// \param TheMotionModifiers Motion-modifiers. /// \param TheMotionModifiersLoc Locations of motion-modifiers. /// \param MapperQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperIdInfo The identifier of associated user-defined mapper. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(ArrayRef<OpenMPMotionModifierKind> TheMotionModifiers, ArrayRef<SourceLocation> TheMotionModifiersLoc, NestedNameSpecifierLoc MapperQualifierLoc, DeclarationNameInfo MapperIdInfo, const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, Locs, Sizes, /*SupportsMapper=*/true, &MapperQualifierLoc, &MapperIdInfo) { assert(llvm::array_lengthof(MotionModifiers) == TheMotionModifiers.size() && "Unexpected number of motion modifiers."); llvm::copy(TheMotionModifiers, std::begin(MotionModifiers)); assert(llvm::array_lengthof(MotionModifiersLoc) == TheMotionModifiersLoc.size() && "Unexpected number of motion modifier locations."); llvm::copy(TheMotionModifiersLoc, std::begin(MotionModifiersLoc)); } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPFromClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_from, OMPVarListLocTy(), Sizes, /*SupportsMapper=*/true) {} /// Set motion-modifier for the clause. /// /// \param I index for motion-modifier. /// \param T motion-modifier for the clause. void setMotionModifier(unsigned I, OpenMPMotionModifierKind T) { assert(I < NumberOfOMPMotionModifiers && "Unexpected index to store motion modifier, exceeds array size."); MotionModifiers[I] = T; } /// Set location for the motion-modifier. /// /// \param I index for motion-modifier location. /// \param TLoc motion-modifier location. void setMotionModifierLoc(unsigned I, SourceLocation TLoc) { assert(I < NumberOfOMPMotionModifiers && "Index to store motion modifier location exceeds array size."); MotionModifiersLoc[I] = TLoc; } /// Set colon location. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { // There are varlist_size() of expressions, and varlist_size() of // user-defined mappers. return 2 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. /// \param MotionModifiers Motion-modifiers. /// \param MotionModifiersLoc Location of motion-modifiers. /// \param UDMapperRefs References to user-defined mappers associated with /// expressions used in the clause. /// \param UDMQualifierLoc C++ nested name specifier for the associated /// user-defined mapper. /// \param MapperId The identifier of associated user-defined mapper. static OMPFromClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists, ArrayRef<Expr *> UDMapperRefs, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, NestedNameSpecifierLoc UDMQualifierLoc, DeclarationNameInfo MapperId); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPFromClause *CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); /// Fetches the motion-modifier at 'Cnt' index of array of modifiers. /// /// \param Cnt index for motion-modifier. OpenMPMotionModifierKind getMotionModifier(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier exceeds the total number of modifiers."); return MotionModifiers[Cnt]; } /// Fetches the motion-modifier location at 'Cnt' index of array of modifiers' /// locations. /// /// \param Cnt index for motion-modifier location. SourceLocation getMotionModifierLoc(unsigned Cnt) const LLVM_READONLY { assert(Cnt < NumberOfOMPMotionModifiers && "Requested modifier location exceeds total number of modifiers."); return MotionModifiersLoc[Cnt]; } /// Fetches ArrayRef of motion-modifiers. ArrayRef<OpenMPMotionModifierKind> getMotionModifiers() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiers); } /// Fetches ArrayRef of location of motion-modifiers. ArrayRef<SourceLocation> getMotionModifiersLoc() const LLVM_READONLY { return llvm::makeArrayRef(MotionModifiersLoc); } /// Get colon location. SourceLocation getColonLoc() const { return ColonLoc; } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPFromClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_from; } }; /// This represents clause 'use_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_ptr' with the variables 'a' and 'b'. class OMPUseDevicePtrClause final : public OMPMappableExprListClause<OMPUseDevicePtrClause>, private llvm::TrailingObjects< OMPUseDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, Locs, Sizes) { } /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return 3 * varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } /// Sets the list of references to private copies with initializers for new /// private variables. /// \param VL List of references. void setPrivateCopies(ArrayRef<Expr *> VL); /// Gets the list of references to private copies with initializers for new /// private variables. MutableArrayRef<Expr *> getPrivateCopies() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateCopies() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } /// Sets the list of references to initializer variables for new private /// variables. /// \param VL List of references. void setInits(ArrayRef<Expr *> VL); /// Gets the list of references to initializer variables for new private /// variables. MutableArrayRef<Expr *> getInits() { return MutableArrayRef<Expr *>(getPrivateCopies().end(), varlist_size()); } ArrayRef<const Expr *> getInits() const { return llvm::makeArrayRef(getPrivateCopies().end(), varlist_size()); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param PrivateVars Expressions referring to private copies. /// \param Inits Expressions referring to private copy initializers. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<Expr *> PrivateVars, ArrayRef<Expr *> Inits, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); using private_copies_iterator = MutableArrayRef<Expr *>::iterator; using private_copies_const_iterator = ArrayRef<const Expr *>::iterator; using private_copies_range = llvm::iterator_range<private_copies_iterator>; using private_copies_const_range = llvm::iterator_range<private_copies_const_iterator>; private_copies_range private_copies() { return private_copies_range(getPrivateCopies().begin(), getPrivateCopies().end()); } private_copies_const_range private_copies() const { return private_copies_const_range(getPrivateCopies().begin(), getPrivateCopies().end()); } using inits_iterator = MutableArrayRef<Expr *>::iterator; using inits_const_iterator = ArrayRef<const Expr *>::iterator; using inits_range = llvm::iterator_range<inits_iterator>; using inits_const_range = llvm::iterator_range<inits_const_iterator>; inits_range inits() { return inits_range(getInits().begin(), getInits().end()); } inits_const_range inits() const { return inits_const_range(getInits().begin(), getInits().end()); } child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_ptr; } }; /// This represents clause 'use_device_addr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target data use_device_addr(a,b) /// \endcode /// In this example directive '#pragma omp target data' has clause /// 'use_device_addr' with the variables 'a' and 'b'. class OMPUseDeviceAddrClause final : public OMPMappableExprListClause<OMPUseDeviceAddrClause>, private llvm::TrailingObjects< OMPUseDeviceAddrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPUseDeviceAddrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_use_device_addr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPUseDeviceAddrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPUseDeviceAddrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPUseDeviceAddrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_use_device_addr; } }; /// This represents clause 'is_device_ptr' in the '#pragma omp ...' /// directives. /// /// \code /// #pragma omp target is_device_ptr(a,b) /// \endcode /// In this example directive '#pragma omp target' has clause /// 'is_device_ptr' with the variables 'a' and 'b'. class OMPIsDevicePtrClause final : public OMPMappableExprListClause<OMPIsDevicePtrClause>, private llvm::TrailingObjects< OMPIsDevicePtrClause, Expr *, ValueDecl *, unsigned, OMPClauseMappableExprCommon::MappableComponent> { friend class OMPClauseReader; friend OMPMappableExprListClause; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a NumVars. /// /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPVarListLocTy &Locs, const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, Locs, Sizes) {} /// Build an empty clause. /// /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. explicit OMPIsDevicePtrClause(const OMPMappableExprListSizeTy &Sizes) : OMPMappableExprListClause(llvm::omp::OMPC_is_device_ptr, OMPVarListLocTy(), Sizes) {} /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<Expr *>) const { return varlist_size(); } size_t numTrailingObjects(OverloadToken<ValueDecl *>) const { return getUniqueDeclarationsNum(); } size_t numTrailingObjects(OverloadToken<unsigned>) const { return getUniqueDeclarationsNum() + getTotalComponentListNum(); } public: /// Creates clause with a list of variables \a Vars. /// /// \param C AST context. /// \param Locs Locations needed to build a mappable clause. It includes 1) /// StartLoc: starting location of the clause (the clause keyword); 2) /// LParenLoc: location of '('; 3) EndLoc: ending location of the clause. /// \param Vars The original expression used in the clause. /// \param Declarations Declarations used in the clause. /// \param ComponentLists Component lists used in the clause. static OMPIsDevicePtrClause * Create(const ASTContext &C, const OMPVarListLocTy &Locs, ArrayRef<Expr *> Vars, ArrayRef<ValueDecl *> Declarations, MappableExprComponentListsRef ComponentLists); /// Creates an empty clause with the place for \a NumVars variables. /// /// \param C AST context. /// \param Sizes All required sizes to build a mappable clause. It includes 1) /// NumVars: number of expressions listed in this clause; 2) /// NumUniqueDeclarations: number of unique base declarations in this clause; /// 3) NumComponentLists: number of component lists in this clause; and 4) /// NumComponents: total number of expression components in the clause. static OMPIsDevicePtrClause * CreateEmpty(const ASTContext &C, const OMPMappableExprListSizeTy &Sizes); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPIsDevicePtrClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_is_device_ptr; } }; /// This represents clause 'nontemporal' in the '#pragma omp ...' directives. /// /// \code /// #pragma omp simd nontemporal(a) /// \endcode /// In this example directive '#pragma omp simd' has clause 'nontemporal' for /// the variable 'a'. class OMPNontemporalClause final : public OMPVarListClause<OMPNontemporalClause>, private llvm::TrailingObjects<OMPNontemporalClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPNontemporalClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPNontemporalClause>(llvm::omp::OMPC_nontemporal, StartLoc, LParenLoc, EndLoc, N) { } /// Build an empty clause. /// /// \param N Number of variables. explicit OMPNontemporalClause(unsigned N) : OMPVarListClause<OMPNontemporalClause>( llvm::omp::OMPC_nontemporal, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Get the list of privatied copies if the member expression was captured by /// one of the privatization clauses. MutableArrayRef<Expr *> getPrivateRefs() { return MutableArrayRef<Expr *>(varlist_end(), varlist_size()); } ArrayRef<const Expr *> getPrivateRefs() const { return llvm::makeArrayRef(varlist_end(), varlist_size()); } public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the variables. static OMPNontemporalClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPNontemporalClause *CreateEmpty(const ASTContext &C, unsigned N); /// Sets the list of references to private copies created in private clauses. /// \param VL List of references. void setPrivateRefs(ArrayRef<Expr *> VL); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPNontemporalClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range private_refs() { return child_range(reinterpret_cast<Stmt **>(getPrivateRefs().begin()), reinterpret_cast<Stmt **>(getPrivateRefs().end())); } const_child_range private_refs() const { auto Children = const_cast<OMPNontemporalClause *>(this)->private_refs(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_nontemporal; } }; /// This represents 'order' clause in the '#pragma omp ...' directive. /// /// \code /// #pragma omp simd order(concurrent) /// \endcode /// In this example directive '#pragma omp parallel' has simple 'order' /// clause with kind 'concurrent'. class OMPOrderClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// A kind of the 'default' clause. OpenMPOrderClauseKind Kind = OMPC_ORDER_unknown; /// Start location of the kind in source code. SourceLocation KindKwLoc; /// Set kind of the clause. /// /// \param K Argument of clause. void setKind(OpenMPOrderClauseKind K) { Kind = K; } /// Set argument location. /// /// \param KLoc Argument location. void setKindKwLoc(SourceLocation KLoc) { KindKwLoc = KLoc; } public: /// Build 'order' clause with argument \p A ('concurrent'). /// /// \param A Argument of the clause ('concurrent'). /// \param ALoc Starting location of the argument. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPOrderClause(OpenMPOrderClauseKind A, SourceLocation ALoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_order, StartLoc, EndLoc), LParenLoc(LParenLoc), Kind(A), KindKwLoc(ALoc) {} /// Build an empty clause. OMPOrderClause() : OMPClause(llvm::omp::OMPC_order, SourceLocation(), SourceLocation()) {} /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns kind of the clause. OpenMPOrderClauseKind getKind() const { return Kind; } /// Returns location of clause kind. SourceLocation getKindKwLoc() const { return KindKwLoc; } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_order; } }; /// This represents 'destroy' clause in the '#pragma omp depobj' /// directive. /// /// \code /// #pragma omp depobj(a) destroy /// \endcode /// In this example directive '#pragma omp depobj' has 'destroy' clause. class OMPDestroyClause final : public OMPClause { public: /// Build 'destroy' clause. /// /// \param StartLoc Starting location of the clause. /// \param EndLoc Ending location of the clause. OMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_destroy, StartLoc, EndLoc) {} /// Build an empty clause. OMPDestroyClause() : OMPClause(llvm::omp::OMPC_destroy, SourceLocation(), SourceLocation()) { } child_range children() { return child_range(child_iterator(), child_iterator()); } const_child_range children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_destroy; } }; /// This represents 'detach' clause in the '#pragma omp task' directive. /// /// \code /// #pragma omp task detach(evt) /// \endcode /// In this example directive '#pragma omp detach' has simple 'detach' clause /// with the variable 'evt'. class OMPDetachClause final : public OMPClause { friend class OMPClauseReader; /// Location of '('. SourceLocation LParenLoc; /// Expression of the 'detach' clause. Stmt *Evt = nullptr; /// Set condition. void setEventHandler(Expr *E) { Evt = E; } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } public: /// Build 'detach' clause with event-handler \a Evt. /// /// \param Evt Event handler expression. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. OMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc) : OMPClause(llvm::omp::OMPC_detach, StartLoc, EndLoc), LParenLoc(LParenLoc), Evt(Evt) {} /// Build an empty clause. OMPDetachClause() : OMPClause(llvm::omp::OMPC_detach, SourceLocation(), SourceLocation()) {} /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns event-handler expression. Expr *getEventHandler() const { return cast_or_null<Expr>(Evt); } child_range children() { return child_range(&Evt, &Evt + 1); } const_child_range children() const { return const_child_range(&Evt, &Evt + 1); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_detach; } }; /// This represents clause 'inclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan inclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'inclusive' /// with the variables 'a' and 'b'. class OMPInclusiveClause final : public OMPVarListClause<OMPInclusiveClause>, private llvm::TrailingObjects<OMPInclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPInclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPInclusiveClause(unsigned N) : OMPVarListClause<OMPInclusiveClause>(llvm::omp::OMPC_inclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPInclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPInclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPInclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_inclusive; } }; /// This represents clause 'exclusive' in the '#pragma omp scan' directive. /// /// \code /// #pragma omp scan exclusive(a,b) /// \endcode /// In this example directive '#pragma omp scan' has clause 'exclusive' /// with the variables 'a' and 'b'. class OMPExclusiveClause final : public OMPVarListClause<OMPExclusiveClause>, private llvm::TrailingObjects<OMPExclusiveClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Build clause with number of variables \a N. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of the variables in the clause. OMPExclusiveClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// /// \param N Number of variables. explicit OMPExclusiveClause(unsigned N) : OMPVarListClause<OMPExclusiveClause>(llvm::omp::OMPC_exclusive, SourceLocation(), SourceLocation(), SourceLocation(), N) {} public: /// Creates clause with a list of variables \a VL. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param VL List of references to the original variables. static OMPExclusiveClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<Expr *> VL); /// Creates an empty clause with the place for \a N variables. /// /// \param C AST context. /// \param N The number of variables. static OMPExclusiveClause *CreateEmpty(const ASTContext &C, unsigned N); child_range children() { return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end())); } const_child_range children() const { auto Children = const_cast<OMPExclusiveClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_exclusive; } }; /// This represents clause 'uses_allocators' in the '#pragma omp target'-based /// directives. /// /// \code /// #pragma omp target uses_allocators(default_allocator, my_allocator(traits)) /// \endcode /// In this example directive '#pragma omp target' has clause 'uses_allocators' /// with the allocators 'default_allocator' and user-defined 'my_allocator'. class OMPUsesAllocatorsClause final : public OMPClause, private llvm::TrailingObjects<OMPUsesAllocatorsClause, Expr *, SourceLocation> { public: /// Data for list of allocators. struct Data { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; private: friend class OMPClauseReader; friend TrailingObjects; enum class ExprOffsets { Allocator, AllocatorTraits, Total, }; enum class ParenLocsOffsets { LParen, RParen, Total, }; /// Location of '('. SourceLocation LParenLoc; /// Total number of allocators in the clause. unsigned NumOfAllocators = 0; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param N Number of allocators asssociated with the clause. OMPUsesAllocatorsClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, StartLoc, EndLoc), LParenLoc(LParenLoc), NumOfAllocators(N) {} /// Build an empty clause. /// \param N Number of allocators asssociated with the clause. /// explicit OMPUsesAllocatorsClause(unsigned N) : OMPClause(llvm::omp::OMPC_uses_allocators, SourceLocation(), SourceLocation()), NumOfAllocators(N) {} unsigned numTrailingObjects(OverloadToken<Expr *>) const { return NumOfAllocators * static_cast<int>(ExprOffsets::Total); } /// Sets the location of '('. void setLParenLoc(SourceLocation Loc) { LParenLoc = Loc; } /// Sets the allocators data for the clause. void setAllocatorsData(ArrayRef<OMPUsesAllocatorsClause::Data> Data); public: /// Creates clause with a list of allocators \p Data. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param EndLoc Ending location of the clause. /// \param Data List of allocators. static OMPUsesAllocatorsClause * Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<OMPUsesAllocatorsClause::Data> Data); /// Creates an empty clause with the place for \p N allocators. /// /// \param C AST context. /// \param N The number of allocators. static OMPUsesAllocatorsClause *CreateEmpty(const ASTContext &C, unsigned N); /// Returns the location of '('. SourceLocation getLParenLoc() const { return LParenLoc; } /// Returns number of allocators associated with the clause. unsigned getNumberOfAllocators() const { return NumOfAllocators; } /// Returns data for the specified allocator. OMPUsesAllocatorsClause::Data getAllocatorData(unsigned I) const; // Iterators child_range children() { Stmt **Begin = reinterpret_cast<Stmt **>(getTrailingObjects<Expr *>()); return child_range(Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } const_child_range children() const { Stmt *const *Begin = reinterpret_cast<Stmt *const *>(getTrailingObjects<Expr *>()); return const_child_range( Begin, Begin + NumOfAllocators * static_cast<int>(ExprOffsets::Total)); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_uses_allocators; } }; /// This represents clause 'affinity' in the '#pragma omp task'-based /// directives. /// /// \code /// #pragma omp task affinity(iterator(i = 0:n) : ([3][n])a, b[:n], c[i]) /// \endcode /// In this example directive '#pragma omp task' has clause 'affinity' with the /// affinity modifer 'iterator(i = 0:n)' and locator items '([3][n])a', 'b[:n]' /// and 'c[i]'. class OMPAffinityClause final : public OMPVarListClause<OMPAffinityClause>, private llvm::TrailingObjects<OMPAffinityClause, Expr *> { friend class OMPClauseReader; friend OMPVarListClause; friend TrailingObjects; /// Location of ':' symbol. SourceLocation ColonLoc; /// Build clause. /// /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param N Number of locators asssociated with the clause. OMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, StartLoc, LParenLoc, EndLoc, N) {} /// Build an empty clause. /// \param N Number of locators asssociated with the clause. /// explicit OMPAffinityClause(unsigned N) : OMPVarListClause<OMPAffinityClause>(llvm::omp::OMPC_affinity, SourceLocation(), SourceLocation(), SourceLocation(), N) {} /// Sets the affinity modifier for the clause, if any. void setModifier(Expr *E) { getTrailingObjects<Expr *>()[varlist_size()] = E; } /// Sets the location of ':' symbol. void setColonLoc(SourceLocation Loc) { ColonLoc = Loc; } public: /// Creates clause with a modifier a list of locator items. /// /// \param C AST context. /// \param StartLoc Starting location of the clause. /// \param LParenLoc Location of '('. /// \param ColonLoc Location of ':'. /// \param EndLoc Ending location of the clause. /// \param Locators List of locator items. static OMPAffinityClause *Create(const ASTContext &C, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// Creates an empty clause with the place for \p N locator items. /// /// \param C AST context. /// \param N The number of locator items. static OMPAffinityClause *CreateEmpty(const ASTContext &C, unsigned N); /// Gets affinity modifier. Expr *getModifier() { return getTrailingObjects<Expr *>()[varlist_size()]; } Expr *getModifier() const { return getTrailingObjects<Expr *>()[varlist_size()]; } /// Gets the location of ':' symbol. SourceLocation getColonLoc() const { return ColonLoc; } // Iterators child_range children() { int Offset = getModifier() ? 1 : 0; return child_range(reinterpret_cast<Stmt **>(varlist_begin()), reinterpret_cast<Stmt **>(varlist_end() + Offset)); } const_child_range children() const { auto Children = const_cast<OMPAffinityClause *>(this)->children(); return const_child_range(Children.begin(), Children.end()); } child_range used_children() { return child_range(child_iterator(), child_iterator()); } const_child_range used_children() const { return const_child_range(const_child_iterator(), const_child_iterator()); } static bool classof(const OMPClause *T) { return T->getClauseKind() == llvm::omp::OMPC_affinity; } }; /// This class implements a simple visitor for OMPClause /// subclasses. template<class ImplClass, template <typename> class Ptr, typename RetTy> class OMPClauseVisitorBase { public: #define PTR(CLASS) Ptr<CLASS> #define DISPATCH(CLASS) \ return static_cast<ImplClass*>(this)->Visit##CLASS(static_cast<PTR(CLASS)>(S)) #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) \ RetTy Visit##Class(PTR(Class) S) { DISPATCH(Class); } #include "llvm/Frontend/OpenMP/OMP.inc" RetTy Visit(PTR(OMPClause) S) { // Top switch clause: visit each OMPClause. switch (S->getClauseKind()) { #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) \ case llvm::omp::Clause::Enum: \ return Visit##Class(static_cast<PTR(Class)>(S)); #define CLAUSE_NO_CLASS(Enum, Str) \ case llvm::omp::Clause::Enum: \ break; #include "llvm/Frontend/OpenMP/OMP.inc" } } // Base case, ignore it. :) RetTy VisitOMPClause(PTR(OMPClause) Node) { return RetTy(); } #undef PTR #undef DISPATCH }; template <typename T> using const_ptr = std::add_pointer_t<std::add_const_t<T>>; template <class ImplClass, typename RetTy = void> class OMPClauseVisitor : public OMPClauseVisitorBase<ImplClass, std::add_pointer_t, RetTy> {}; template<class ImplClass, typename RetTy = void> class ConstOMPClauseVisitor : public OMPClauseVisitorBase <ImplClass, const_ptr, RetTy> {}; class OMPClausePrinter final : public OMPClauseVisitor<OMPClausePrinter> { raw_ostream &OS; const PrintingPolicy &Policy; /// Process clauses with list of variables. template <typename T> void VisitOMPClauseList(T *Node, char StartSym); /// Process motion clauses. template <typename T> void VisitOMPMotionClause(T *Node); public: OMPClausePrinter(raw_ostream &OS, const PrintingPolicy &Policy) : OS(OS), Policy(Policy) {} #define GEN_CLANG_CLAUSE_CLASS #define CLAUSE_CLASS(Enum, Str, Class) void Visit##Class(Class *S); #include "llvm/Frontend/OpenMP/OMP.inc" }; struct OMPTraitProperty { llvm::omp::TraitProperty Kind = llvm::omp::TraitProperty::invalid; /// The raw string as we parsed it. This is needed for the `isa` trait set /// (which accepts anything) and (later) extensions. StringRef RawString; }; struct OMPTraitSelector { Expr *ScoreOrCondition = nullptr; llvm::omp::TraitSelector Kind = llvm::omp::TraitSelector::invalid; llvm::SmallVector<OMPTraitProperty, 1> Properties; }; struct OMPTraitSet { llvm::omp::TraitSet Kind = llvm::omp::TraitSet::invalid; llvm::SmallVector<OMPTraitSelector, 2> Selectors; }; /// Helper data structure representing the traits in a match clause of an /// `declare variant` or `metadirective`. The outer level is an ordered /// collection of selector sets, each with an associated kind and an ordered /// collection of selectors. A selector has a kind, an optional score/condition, /// and an ordered collection of properties. class OMPTraitInfo { /// Private constructor accesible only by ASTContext. OMPTraitInfo() {} friend class ASTContext; public: /// Reconstruct a (partial) OMPTraitInfo object from a mangled name. OMPTraitInfo(StringRef MangledName); /// The outermost level of selector sets. llvm::SmallVector<OMPTraitSet, 2> Sets; bool anyScoreOrCondition( llvm::function_ref<bool(Expr *&, bool /* IsScore */)> Cond) { return llvm::any_of(Sets, [&](OMPTraitSet &Set) { return llvm::any_of( Set.Selectors, [&](OMPTraitSelector &Selector) { return Cond(Selector.ScoreOrCondition, /* IsScore */ Selector.Kind != llvm::omp::TraitSelector::user_condition); }); }); } /// Create a variant match info object from this trait info object. While the /// former is a flat representation the actual main difference is that the /// latter uses clang::Expr to store the score/condition while the former is /// independent of clang. Thus, expressions and conditions are evaluated in /// this method. void getAsVariantMatchInfo(ASTContext &ASTCtx, llvm::omp::VariantMatchInfo &VMI) const; /// Return a string representation identifying this context selector. std::string getMangledName() const; /// Check the extension trait \p TP is active. bool isExtensionActive(llvm::omp::TraitProperty TP) { for (const OMPTraitSet &Set : Sets) { if (Set.Kind != llvm::omp::TraitSet::implementation) continue; for (const OMPTraitSelector &Selector : Set.Selectors) { if (Selector.Kind != llvm::omp::TraitSelector::implementation_extension) continue; for (const OMPTraitProperty &Property : Selector.Properties) { if (Property.Kind == TP) return true; } } } return false; } /// Print a human readable representation into \p OS. void print(llvm::raw_ostream &OS, const PrintingPolicy &Policy) const; }; llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo &TI); llvm::raw_ostream &operator<<(llvm::raw_ostream &OS, const OMPTraitInfo *TI); /// Clang specific specialization of the OMPContext to lookup target features. struct TargetOMPContext final : public llvm::omp::OMPContext { TargetOMPContext(ASTContext &ASTCtx, std::function<void(StringRef)> &&DiagUnknownTrait, const FunctionDecl *CurrentFunctionDecl); virtual ~TargetOMPContext() = default; /// See llvm::omp::OMPContext::matchesISATrait bool matchesISATrait(StringRef RawString) const override; private: std::function<bool(StringRef)> FeatureValidityCheck; std::function<void(StringRef)> DiagUnknownTrait; llvm::StringMap<bool> FeatureMap; }; /// Contains data for OpenMP directives: clauses, children /// expressions/statements (helpers for codegen) and associated statement, if /// any. class OMPChildren final : private llvm::TrailingObjects<OMPChildren, OMPClause *, Stmt *> { friend TrailingObjects; friend class OMPClauseReader; friend class OMPExecutableDirective; template <typename T> friend class OMPDeclarativeDirective; /// Numbers of clauses. unsigned NumClauses = 0; /// Number of child expressions/stmts. unsigned NumChildren = 0; /// true if the directive has associated statement. bool HasAssociatedStmt = false; /// Define the sizes of each trailing object array except the last one. This /// is required for TrailingObjects to work properly. size_t numTrailingObjects(OverloadToken<OMPClause *>) const { return NumClauses; } OMPChildren() = delete; OMPChildren(unsigned NumClauses, unsigned NumChildren, bool HasAssociatedStmt) : NumClauses(NumClauses), NumChildren(NumChildren), HasAssociatedStmt(HasAssociatedStmt) {} static size_t size(unsigned NumClauses, bool HasAssociatedStmt, unsigned NumChildren); static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses); static OMPChildren *Create(void *Mem, ArrayRef<OMPClause *> Clauses, Stmt *S, unsigned NumChildren = 0); static OMPChildren *CreateEmpty(void *Mem, unsigned NumClauses, bool HasAssociatedStmt = false, unsigned NumChildren = 0); public: unsigned getNumClauses() const { return NumClauses; } unsigned getNumChildren() const { return NumChildren; } bool hasAssociatedStmt() const { return HasAssociatedStmt; } /// Set associated statement. void setAssociatedStmt(Stmt *S) { getTrailingObjects<Stmt *>()[NumChildren] = S; } void setChildren(ArrayRef<Stmt *> Children); /// Sets the list of variables for this clause. /// /// \param Clauses The list of clauses for the directive. /// void setClauses(ArrayRef<OMPClause *> Clauses); /// Returns statement associated with the directive. const Stmt *getAssociatedStmt() const { return const_cast<OMPChildren *>(this)->getAssociatedStmt(); } Stmt *getAssociatedStmt() { assert(HasAssociatedStmt && "Expected directive with the associated statement."); return getTrailingObjects<Stmt *>()[NumChildren]; } /// Get the clauses storage. MutableArrayRef<OMPClause *> getClauses() { return llvm::makeMutableArrayRef(getTrailingObjects<OMPClause *>(), NumClauses); } ArrayRef<OMPClause *> getClauses() const { return const_cast<OMPChildren *>(this)->getClauses(); } /// Returns the captured statement associated with the /// component region within the (combined) directive. /// /// \param RegionKind Component region kind. const CapturedStmt * getCapturedStmt(OpenMPDirectiveKind RegionKind, ArrayRef<OpenMPDirectiveKind> CaptureRegions) const { assert(llvm::any_of( CaptureRegions, [=](const OpenMPDirectiveKind K) { return K == RegionKind; }) && "RegionKind not found in OpenMP CaptureRegions."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (auto ThisCaptureRegion : CaptureRegions) { if (ThisCaptureRegion == RegionKind) return CS; CS = cast<CapturedStmt>(CS->getCapturedStmt()); } llvm_unreachable("Incorrect RegionKind specified for directive."); } /// Get innermost captured statement for the construct. CapturedStmt * getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) { assert(hasAssociatedStmt() && "Must have associated captured statement."); assert(!CaptureRegions.empty() && "At least one captured statement must be provided."); auto *CS = cast<CapturedStmt>(getAssociatedStmt()); for (unsigned Level = CaptureRegions.size(); Level > 1; --Level) CS = cast<CapturedStmt>(CS->getCapturedStmt()); return CS; } const CapturedStmt * getInnermostCapturedStmt(ArrayRef<OpenMPDirectiveKind> CaptureRegions) const { return const_cast<OMPChildren *>(this)->getInnermostCapturedStmt( CaptureRegions); } MutableArrayRef<Stmt *> getChildren(); ArrayRef<Stmt *> getChildren() const { return const_cast<OMPChildren *>(this)->getChildren(); } Stmt *getRawStmt() { assert(HasAssociatedStmt && "Expected directive with the associated statement."); if (auto *CS = dyn_cast<CapturedStmt>(getAssociatedStmt())) { Stmt *S = nullptr; do { S = CS->getCapturedStmt(); CS = dyn_cast<CapturedStmt>(S); } while (CS); return S; } return getAssociatedStmt(); } const Stmt *getRawStmt() const { return const_cast<OMPChildren *>(this)->getRawStmt(); } Stmt::child_range getAssociatedStmtAsRange() { if (!HasAssociatedStmt) return Stmt::child_range(Stmt::child_iterator(), Stmt::child_iterator()); return Stmt::child_range(&getTrailingObjects<Stmt *>()[NumChildren], &getTrailingObjects<Stmt *>()[NumChildren + 1]); } }; } // namespace clang #endif // LLVM_CLANG_AST_OPENMPCLAUSE_H
bfs_custom.c
/* Copyright (C) 2010-2011 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #include "common.h" #include "oned_csr.h" #include <mpi.h> #include <stdint.h> #include <inttypes.h> #include <stdlib.h> #include <stddef.h> #include <string.h> #include <limits.h> #include <assert.h> char IMPLEMENTATION[] = "MPI BFS_CUSTOM"; /* Add your own BFS code into this file (or a copy of it). */ /* Data structure definitions: customize these for your own data distribution * and temporary data structures. */ static oned_csr_graph g; void make_graph_data_structure(const tuple_graph* const tg) { convert_graph_to_oned_csr(tg, &g); } void free_graph_data_structure(void) { free_oned_csr_graph(&g); } int bfs_writes_depth_map(void) { /* Change to 1 if high 16 bits of each entry of pred are the (zero-based) BFS * level number, with UINT16_MAX for unreachable vertices. */ return 0; } /* BFS implementation. */ void run_bfs(int64_t root, int64_t* pred) { /* Predefined entities you can use in your BFS (from common.h and oned_csr.h): * + rank: global variable containing MPI rank * + size: global variable containing MPI size * + DIV_SIZE: single-parameter macro that divides by size (using a shift * when properly set up) * + MOD_SIZE: single-parameter macro that reduces modulo size (using a * mask when properly set up) * + VERTEX_OWNER: single-parameter macro returning the owner of a global * vertex number * + VERTEX_LOCAL: single-parameter macro returning the local offset of a * global vertex number * + VERTEX_TO_GLOBAL: single-parameter macro converting a local vertex * offset to a global number * + g.nlocalverts: number of vertices stored on the local rank * + g.nglobalverts: total number of vertices in the graph * + g.nlocaledges: number of graph edges stored locally * + g.rowstarts, g.column: zero-based compressed sparse row data * structure for the local part of the graph * * All macros documented above evaluate their arguments exactly once. * * The graph is stored using a 1-D, cyclic distribution: all edges incident * to vertex v are stored on rank (v % size) (aka VERTEX_OWNER(v)). Edges * that are not self-loops are stored twice, once for each endpoint; * duplicates edges are kept. The neighbors of vertex v can be obtained on * rank VERTEX_OWNER(v); they are stored in elements * {g.rowstarts[VERTEX_LOCAL(v)] ... g.rowstarts[VERTEX_LOCAL(v) + 1] - 1} * (inclusive) of g.column. * * Upon exit, your BFS must have filled in: * + pred (an array of size g.nlocalverts): * - The predecessor of vertex v in the BFS tree should go into * pred[VERTEX_LOCAL(v)] on rank VERTEX_OWNER(v) * - The predecessor of root is root * - The predecessor of any unreachable vertex is -1 * * The validator will check this for correctness. */ } void get_vertex_distribution_for_pred(size_t count, const int64_t* vertex_p, int* owner_p, size_t* local_p) { const int64_t* restrict vertex = vertex_p; int* restrict owner = owner_p; size_t* restrict local = local_p; ptrdiff_t i; #pragma omp parallel for for (i = 0; i < (ptrdiff_t)count; ++i) { owner[i] = VERTEX_OWNER(vertex[i]); local[i] = VERTEX_LOCAL(vertex[i]); } } int64_t vertex_to_global_for_pred(int v_rank, size_t v_local) { return VERTEX_TO_GLOBAL(v_rank, v_local); } size_t get_nlocalverts_for_pred(void) { return g.nlocalverts; }
CBitVolume.h
/////////////////////////////////////////////////////////////////////////////// // $Id$ // // 3DimViewer // Lightweight 3D DICOM viewer. // // Copyright 2008-2016 3Dim Laboratory s.r.o. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. // /////////////////////////////////////////////////////////////////////////////// #ifndef CBitVolume_H #define CBitVolume_H /////////////////////////////////////////////////////////////////////////////// // include files #include <VPL/Image/Volume.h> #include <data/CBitOperations.h> namespace data { //! Functor gets the value of bit at given position and set this value to bit at target position. Clears the source bit. template <typename T> class CBitReplace { public: //! Default constructor. CBitReplace(const T& bitToClear, const T& bitToSet) : m_bitToClear(bitToClear), m_bitToSet(bitToSet) {} //! Checks and eventually replaces value of a given parameter. void operator ()(T& value) { // check if the bit is set int bit = data::getBitFromValue<T>(value, m_bitToClear); if (bit > 0) { // clear bit data::clearBitInValue<T>(value, m_bitToClear); // set bit if (m_bitToSet > 0) { data::setBitInValue<T>(value, m_bitToSet); } } } protected: //! bit indexes T m_bitToClear, m_bitToSet; }; //! Functor moves bit to the target position with shifting the rest of the bits. template <typename T> class CBitMove { public: //! Default constructor. CBitMove(const T& sourceBitIndex, const T& destinationBitIndex) : m_sourceBitIndex(sourceBitIndex), m_destinationBitIndex(destinationBitIndex) {} //! Moves bit. void operator ()(T& value) { if (value == 0) { return; } // save the bit value int sourceBit = data::getBitFromValue<T>(value, m_sourceBitIndex); if (m_sourceBitIndex < m_destinationBitIndex) { for (T i = m_sourceBitIndex; i < m_destinationBitIndex; ++i) { // get the bit on the left int leftBit = data::getBitFromValue<T>(value, i + 1); if (leftBit > 0) { // set bit data::setBitInValue<T>(value, i); } else { // clear bit data::clearBitInValue<T>(value, i); } } } else { for (T i = m_sourceBitIndex; i > m_destinationBitIndex; --i) { // get the bit on the right int rightBit = data::getBitFromValue<T>(value, i - 1); if (rightBit > 0) { // set bit data::setBitInValue<T>(value, i); } else { // clear bit data::clearBitInValue<T>(value, i); } } } if (sourceBit > 0) { // set bit data::setBitInValue<T>(value, m_destinationBitIndex); } else { // clear bit data::clearBitInValue<T>(value, m_destinationBitIndex); } } protected: //! bit indexes T m_sourceBitIndex, m_destinationBitIndex; }; //! Functor clears bit at given position and shifts higher bits to the left. template <typename T> class CBitClearAndShift { public: //! Default constructor. CBitClearAndShift(const T& clearBitIndex) : m_clearBitIndex(clearBitIndex) {} //! Clears given bit and shifts all higher bits. void operator ()(T& value) { if (value == 0) { return; } T valueCopy = value; T leftMostSetBitIndex = 0; while (valueCopy > 1) { ++leftMostSetBitIndex; valueCopy = valueCopy >> 1; } if (leftMostSetBitIndex >= m_clearBitIndex) { for (T i = m_clearBitIndex; i < leftMostSetBitIndex; ++i) { // get the bit on the left int leftBit = data::getBitFromValue<T>(value, i + 1); if (leftBit > 0) { // set bit data::setBitInValue<T>(value, i); } else { // clear bit data::clearBitInValue<T>(value, i); } } data::clearBitInValue<T>(value, leftMostSetBitIndex); } } protected: //! bit indexes T m_clearBitIndex; }; //! Functor copies bit value at given position to bit at the target position. template <typename T> class CBitCopy { public: //! Default constructor. CBitCopy(const T& sourceBitIndex, const T& destinationBitIndex) : m_sourceBitIndex(sourceBitIndex), m_destinationBitIndex(destinationBitIndex) {} //! Copies bit. void operator ()(T& value) { if (value == 0) { return; } // get the bit value int sourceBit = data::getBitFromValue<T>(value, m_sourceBitIndex); if (sourceBit > 0) { // set bit data::setBitInValue<T>(value, m_destinationBitIndex); } else { // clear bit data::clearBitInValue<T>(value, m_destinationBitIndex); } } protected: //! bit indexes T m_sourceBitIndex, m_destinationBitIndex; }; //! Volume, which works with bits in each voxel. //! Every bit in voxel represent one region, so there can be maximum of sizeof(tVoxel) overlapping regions. template <typename tVoxel> class CBitVolume : public vpl::img::CVolume<tVoxel> { public: //! Default constructor creates volume of zero size. CBitVolume() : vpl::img::CVolume<tVoxel>() {} //! Constructor that allocates volume data. CBitVolume(vpl::tSize XSize, vpl::tSize YSize, vpl::tSize ZSize, vpl::tSize Margin = 0) : vpl::img::CVolume<tVoxel>(XSize, YSize, ZSize, Margin) {} ~CBitVolume() {} using vpl::img::CVolume<tVoxel>::at; //! Is bit set? //! \param i Index to volume to get voxel. //! \param bitIndex Index of bit in voxel. bool at(vpl::tSize i, vpl::tSize bitIndex) { vpl::tSize bit = data::getBitFromValue<tVoxel>(at(i), bitIndex); return bit > 0; } //! Is bit set? //! \param x, y, z Coordinates of voxel. //! \param bitIndex Index of bit in voxel. bool at(vpl::tSize x, vpl::tSize y, vpl::tSize z, vpl::tSize bitIndex) { vpl::tSize bit = data::getBitFromValue<tVoxel>(at(x, y, z), bitIndex); return bit > 0; } //! Sets the subscripted bit in voxel (to 1). //! \param i Index to volume to get voxel. //! \param bitIndex Index of bit in voxel. CBitVolume& setBit(vpl::tSize i, vpl::tSize bitIndex) { tVoxel& value = at(i); data::setBitInValue<tVoxel>(value, bitIndex); vpl::img::CVolume<tVoxel>::set(i, value); return *this; } //! Sets the subscripted bit in voxel (to 1). //! \param x, y, z Coordinates of voxel. //! \param bitIndex Index of bit in voxel. CBitVolume& setBit(vpl::tSize x, vpl::tSize y, vpl::tSize z, vpl::tSize bitIndex) { tVoxel& value = at(x, y, z); data::setBitInValue<tVoxel>(value, bitIndex); return *this; } //! Clears the subscripted bit in voxel. (to 0) //! \param i Index to volume to get voxel. //! \param bitIndex Index of bit in voxel. CBitVolume& clearBit(vpl::tSize i, vpl::tSize bitIndex) { tVoxel& value = at(i); data::clearBitInValue<tVoxel>(value, bitIndex); vpl::img::CVolume<tVoxel>::set(i, value); return *this; } //! Clears the subscripted bit in voxel. (to 0) //! \param x, y, z Coordinates of voxel. //! \param bitIndex Index of bit in voxel. CBitVolume& clearBit(vpl::tSize x, vpl::tSize y, vpl::tSize z, vpl::tSize bitIndex) { tVoxel& value = at(x, y, z); data::clearBitInValue<tVoxel>(value, bitIndex); return *this; } //! Moves bit on sourceBitIndex to destinationBitIndex with shifting all bits in between. //! \param sourceBitIndex Index of bit, which will be moved. //! \param destinationBitIndex Index of bit, where the value of source bit will be moved. void moveBitInVolume(tVoxel sourceBitIndex, tVoxel destinationBitIndex) { vpl::img::CVolume<tVoxel>::pforEach(CBitMove<tVoxel>(sourceBitIndex, destinationBitIndex)); } //! Extract one bit from volume and return it in new volume. //! Creates new volume and returns it. //! \param bitIndex Index of bit, which values will be copied to new volume. All the other bits will be 0. CBitVolume getMaskedVolume(vpl::tSize bitIndex) { CBitVolume volume; volume.resize(vpl::img::CVolume<tVoxel>::m_Size, vpl::img::CVolume<tVoxel>::m_Margin); volume.fillEntire(0); const vpl::tSize sx = vpl::img::CVolume<tVoxel>::m_Size.x(); const vpl::tSize sy = vpl::img::CVolume<tVoxel>::m_Size.y(); const vpl::tSize sz = vpl::img::CVolume<tVoxel>::m_Size.z(); const vpl::tSize Offset = vpl::img::CVolume<tVoxel>::getXOffset(); #pragma omp parallel for for (vpl::tSize k = 0; k < sz; ++k) { for (vpl::tSize j = 0; j < sy; ++j) { vpl::tSize idx = vpl::img::CVolume<tVoxel>::getIdx(0, j, k); for (vpl::tSize i = 0; i < sx; ++i, idx += Offset) { if (at(idx, bitIndex)) { volume.setBit(idx, bitIndex); } } } } return volume; } //! Replace value of one bit with values from given volume. //! Volumes must be of the same size! //! \param volume Volume from which the bit value will be read. //! \param dstBitIndex Index of bit, which will be set. //! \param inputVolumeBitIndex Index of bit in given volume, which value will be read. CBitVolume& setBitFromVolume(CBitVolume& volume, vpl::tSize dstBitIndex, vpl::tSize inputVolumeBitIndex) { const vpl::tSize sx = vpl::img::CVolume<tVoxel>::m_Size.x(); const vpl::tSize sy = vpl::img::CVolume<tVoxel>::m_Size.y(); const vpl::tSize sz = vpl::img::CVolume<tVoxel>::m_Size.z(); const vpl::tSize Offset = vpl::img::CVolume<tVoxel>::getXOffset(); #pragma omp parallel for for (vpl::tSize k = 0; k < sz; ++k) { for (vpl::tSize j = 0; j < sy; ++j) { vpl::tSize idx = vpl::img::CVolume<tVoxel>::getIdx(0, j, k); for (vpl::tSize i = 0; i < sx; ++i, idx += Offset) { if (volume.at(idx, inputVolumeBitIndex)) { setBit(idx, dstBitIndex); } else { clearBit(idx, dstBitIndex); } } } } return *this; } //! Appends given amount of bits from given volume from given bit index. //! Volumes must be of the same size! //! \param volume Volume from which the bits value will be read. //! \param fromBitIndex Index of first bit, which will be appended. //! \param appendBitCnt Number of bits, which will be appended. fromBitIndex + appendBitIndex must be smaller than voxel bits count (sizeof(tVoxel))! CBitVolume& appendVolume(CBitVolume& volume, vpl::tSize fromBitIndex, vpl::tSize appendBitCnt) { const vpl::tSize sx = vpl::img::CVolume<tVoxel>::m_Size.x(); const vpl::tSize sy = vpl::img::CVolume<tVoxel>::m_Size.y(); const vpl::tSize sz = vpl::img::CVolume<tVoxel>::m_Size.z(); const vpl::tSize Offset = vpl::img::CVolume<tVoxel>::getXOffset(); #pragma omp parallel for for (vpl::tSize k = 0; k < sz; ++k) { for (vpl::tSize j = 0; j < sy; ++j) { vpl::tSize idx = vpl::img::CVolume<tVoxel>::getIdx(0, j, k); for (vpl::tSize i = 0; i < sx; ++i, idx += Offset) { for (int b = 0; b < appendBitCnt; ++b) { if (volume.at(idx, b)) { setBit(idx, fromBitIndex + b); } else { clearBit(idx, fromBitIndex + b); } } } } } return *this; } //! Performs bits union. Takes bit value from volume and bit value from given volume, performs union and set the value to output bit. //! Volumes must be of the same size! //! \param maskVolume Volume, with which the union will be performed. //! \param sourceBitIndex Index of bit in current volume. //! \param maskSourceBitIndex Index of bit in given volume. //! \param outputBitIndex Index of bit in current volume, in which the result of the union will be written. CBitVolume& performUnion(CBitVolume& maskVolume, vpl::tSize sourceBitIndex, vpl::tSize maskSourceBitIndex, vpl::tSize outputBitIndex) { const vpl::tSize sx = vpl::img::CVolume<tVoxel>::m_Size.x(); const vpl::tSize sy = vpl::img::CVolume<tVoxel>::m_Size.y(); const vpl::tSize sz = vpl::img::CVolume<tVoxel>::m_Size.z(); const vpl::tSize Offset = vpl::img::CVolume<tVoxel>::getXOffset(); #pragma omp parallel for for (vpl::tSize k = 0; k < sz; ++k) { for (vpl::tSize j = 0; j < sy; ++j) { vpl::tSize idx = vpl::img::CVolume<tVoxel>::getIdx(0, j, k); for (vpl::tSize i = 0; i < sx; ++i, idx += Offset) { if (at(idx, sourceBitIndex) || maskVolume.at(idx, maskSourceBitIndex)) { setBit(idx, outputBitIndex); } else { clearBit(idx, outputBitIndex); } } } } return *this; } //! Performs bits intersection. Takes bit value from volume and bit value from given volume, performs intersection and set the value to output bit. //! Volumes must be of the same size! //! \param maskVolume Volume, with which the intersection will be performed. //! \param sourceBitIndex Index of bit in current volume. //! \param maskSourceBitIndex Index of bit in given volume. //! \param outputBitIndex Index of bit in current volume, in which the result of the intersection will be written. CBitVolume& performIntersection(CBitVolume& maskVolume, vpl::tSize sourceBitIndex, vpl::tSize maskSourceBitIndex, vpl::tSize outputBitIndex) { const vpl::tSize sx = vpl::img::CVolume<tVoxel>::m_Size.x(); const vpl::tSize sy = vpl::img::CVolume<tVoxel>::m_Size.y(); const vpl::tSize sz = vpl::img::CVolume<tVoxel>::m_Size.z(); const vpl::tSize Offset = vpl::img::CVolume<tVoxel>::getXOffset(); #pragma omp parallel for for (vpl::tSize k = 0; k < sz; ++k) { for (vpl::tSize j = 0; j < sy; ++j) { vpl::tSize idx = vpl::img::CVolume<tVoxel>::getIdx(0, j, k); for (vpl::tSize i = 0; i < sx; ++i, idx += Offset) { if (at(idx, sourceBitIndex)) { if (maskVolume.at(idx, maskSourceBitIndex)) { setBit(idx, outputBitIndex); } else { clearBit(idx, outputBitIndex); } } } } } return *this; } //! Performs bits difference. Takes bit value from volume and bit value from given volume, performs difference and set the value to output bit. //! Volumes must be of the same size! //! \param maskVolume Volume, with which the difference will be performed. //! \param sourceBitIndex Index of bit in current volume. //! \param maskSourceBitIndex Index of bit in given volume. //! \param outputBitIndex Index of bit in current volume, in which the result of the difference will be written. CBitVolume& performDifference(CBitVolume& maskVolume, vpl::tSize sourceBitIndex, vpl::tSize maskSourceBitIndex, vpl::tSize outputBitIndex) { const vpl::tSize sx = vpl::img::CVolume<tVoxel>::m_Size.x(); const vpl::tSize sy = vpl::img::CVolume<tVoxel>::m_Size.y(); const vpl::tSize sz = vpl::img::CVolume<tVoxel>::m_Size.z(); const vpl::tSize Offset = vpl::img::CVolume<tVoxel>::getXOffset(); #pragma omp parallel for for (vpl::tSize k = 0; k < sz; ++k) { for (vpl::tSize j = 0; j < sy; ++j) { vpl::tSize idx = vpl::img::CVolume<tVoxel>::getIdx(0, j, k); for (vpl::tSize i = 0; i < sx; ++i, idx += Offset) { if (at(idx, sourceBitIndex)) { if (maskVolume.at(idx, maskSourceBitIndex)) { clearBit(idx, outputBitIndex); } else { setBit(idx, outputBitIndex); } } } } } return *this; } }; } // namespace data #endif // CBitVolume_H /////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////
RelativeNeighborhoodGraph.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_COMMON_RNG_H_ #define _SPTAG_COMMON_RNG_H_ #include "NeighborhoodGraph.h" namespace SPTAG { namespace COMMON { class RelativeNeighborhoodGraph: public NeighborhoodGraph { public: RelativeNeighborhoodGraph() { m_pNeighborhoodGraph.SetName("RNG"); } void RebuildNeighbors(VectorIndex* index, const SizeType node, SizeType* nodes, const BasicResult* queryResults, const int numResults) { DimensionType count = 0; for (int j = 0; j < numResults && count < m_iNeighborhoodSize; j++) { const BasicResult& item = queryResults[j]; if (item.VID < 0) break; if (item.VID == node) continue; bool good = true; for (DimensionType k = 0; k < count; k++) { if (index->ComputeDistance(index->GetSample(nodes[k]), index->GetSample(item.VID)) <= item.Dist) { good = false; break; } } if (good) nodes[count++] = item.VID; } for (DimensionType j = count; j < m_iNeighborhoodSize; j++) nodes[j] = -1; } void InsertNeighbors(VectorIndex* index, const SizeType node, SizeType insertNode, float insertDist) { std::lock_guard<std::mutex> lock(m_dataUpdateLock[node]); SizeType* nodes = m_pNeighborhoodGraph[node]; SizeType tmpNode; float tmpDist; for (DimensionType k = 0; k < m_iNeighborhoodSize; k++) { tmpNode = nodes[k]; if (tmpNode < -1) break; if (tmpNode < 0 || (tmpDist = index->ComputeDistance(index->GetSample(node), index->GetSample(tmpNode))) > insertDist || (insertDist == tmpDist && insertNode < tmpNode)) { bool good = true; for (DimensionType t = 0; t < k; t++) { if (index->ComputeDistance(index->GetSample(insertNode), index->GetSample(nodes[t])) < insertDist) { good = false; break; } } if (good) { nodes[k] = insertNode; while (tmpNode >= 0 && ++k < m_iNeighborhoodSize && nodes[k] >= -1 && index->ComputeDistance(index->GetSample(tmpNode), index->GetSample(insertNode)) >= index->ComputeDistance(index->GetSample(node), index->GetSample(tmpNode))) { std::swap(tmpNode, nodes[k]); } } break; } } } float GraphAccuracyEstimation(VectorIndex* index, const SizeType samples, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { DimensionType* correct = new DimensionType[samples]; #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < samples; i++) { SizeType x = COMMON::Utils::rand(m_iGraphSize); //int x = i; COMMON::QueryResultSet<void> query(nullptr, m_iCEF); for (SizeType y = 0; y < m_iGraphSize; y++) { if ((idmap != nullptr && idmap->find(y) != idmap->end())) continue; float dist = index->ComputeDistance(index->GetSample(x), index->GetSample(y)); query.AddPoint(y, dist); } query.SortResult(); SizeType * exact_rng = new SizeType[m_iNeighborhoodSize]; RebuildNeighbors(index, x, exact_rng, query.GetResults(), m_iCEF); correct[i] = 0; for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) { if (exact_rng[j] == -1) { correct[i] += m_iNeighborhoodSize - j; break; } for (DimensionType k = 0; k < m_iNeighborhoodSize; k++) if ((m_pNeighborhoodGraph)[x][k] == exact_rng[j]) { correct[i]++; break; } } delete[] exact_rng; } float acc = 0; for (SizeType i = 0; i < samples; i++) acc += float(correct[i]); acc = acc / samples / m_iNeighborhoodSize; delete[] correct; return acc; } }; } } #endif
matrix.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M M AAA TTTTT RRRR IIIII X X % % MM MM A A T R R I X X % % M M M AAAAA T RRRR I X % % M M A A T R R I X X % % M M A A T R R IIIII X X % % % % % % MagickCore Matrix Methods % % % % Software Design % % Cristy % % August 2007 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image-private.h" #include "MagickCore/matrix.h" #include "MagickCore/matrix-private.h" #include "MagickCore/memory_.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" /* Typedef declaration. */ struct _MatrixInfo { CacheType type; size_t columns, rows, stride; MagickSizeType length; MagickBooleanType mapped, synchronize; char path[MagickPathExtent]; int file; void *elements; SemaphoreInfo *semaphore; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M a t r i x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMatrixInfo() allocates the ImageInfo structure. % % The format of the AcquireMatrixInfo method is: % % MatrixInfo *AcquireMatrixInfo(const size_t columns,const size_t rows, % const size_t stride,ExceptionInfo *exception) % % A description of each parameter follows: % % o columns: the matrix columns. % % o rows: the matrix rows. % % o stride: the matrix stride. % % o exception: return any errors or warnings in this structure. % */ #if defined(SIGBUS) static void MatrixSignalHandler(int status) { ThrowFatalException(CacheFatalError,"UnableToExtendMatrixCache"); } #endif static inline MagickOffsetType WriteMatrixElements( const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file,offset,SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return((MagickOffsetType) -1); } #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX)); #else count=pwrite(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PWRITE) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return(i); } static MagickBooleanType SetMatrixExtent( MatrixInfo *magick_restrict matrix_info, MagickSizeType length) { MagickOffsetType count, extent, offset; if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(matrix_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) return(MagickTrue); extent=(MagickOffsetType) length-1; count=WriteMatrixElements(matrix_info,extent,1,(const unsigned char *) ""); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (matrix_info->synchronize != MagickFalse) (void) posix_fallocate(matrix_info->file,offset+1,extent-offset); #endif #if defined(SIGBUS) (void) signal(SIGBUS,MatrixSignalHandler); #endif return(count != (MagickOffsetType) 1 ? MagickFalse : MagickTrue); } MagickExport MatrixInfo *AcquireMatrixInfo(const size_t columns, const size_t rows,const size_t stride,ExceptionInfo *exception) { char *synchronize; MagickBooleanType status; MatrixInfo *matrix_info; matrix_info=(MatrixInfo *) AcquireMagickMemory(sizeof(*matrix_info)); if (matrix_info == (MatrixInfo *) NULL) return((MatrixInfo *) NULL); (void) ResetMagickMemory(matrix_info,0,sizeof(*matrix_info)); matrix_info->signature=MagickCoreSignature; matrix_info->columns=columns; matrix_info->rows=rows; matrix_info->stride=stride; matrix_info->semaphore=AcquireSemaphoreInfo(); synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { matrix_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } matrix_info->length=(MagickSizeType) columns*rows*stride; if (matrix_info->columns != (size_t) (matrix_info->length/rows/stride)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'","matrix cache"); return(DestroyMatrixInfo(matrix_info)); } matrix_info->type=MemoryCache; status=AcquireMagickResource(AreaResource,matrix_info->length); if ((status != MagickFalse) && (matrix_info->length == (MagickSizeType) ((size_t) matrix_info->length))) { status=AcquireMagickResource(MemoryResource,matrix_info->length); if (status != MagickFalse) { matrix_info->mapped=MagickFalse; matrix_info->elements=AcquireMagickMemory((size_t) matrix_info->length); if (matrix_info->elements == NULL) { matrix_info->mapped=MagickTrue; matrix_info->elements=MapBlob(-1,IOMode,0,(size_t) matrix_info->length); } if (matrix_info->elements == (unsigned short *) NULL) RelinquishMagickResource(MemoryResource,matrix_info->length); } } matrix_info->file=(-1); if (matrix_info->elements == (unsigned short *) NULL) { status=AcquireMagickResource(DiskResource,matrix_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'","matrix cache"); return(DestroyMatrixInfo(matrix_info)); } matrix_info->type=DiskCache; (void) AcquireMagickResource(MemoryResource,matrix_info->length); matrix_info->file=AcquireUniqueFileResource(matrix_info->path); if (matrix_info->file == -1) return(DestroyMatrixInfo(matrix_info)); status=AcquireMagickResource(MapResource,matrix_info->length); if (status != MagickFalse) { status=SetMatrixExtent(matrix_info,matrix_info->length); if (status != MagickFalse) { matrix_info->elements=(void *) MapBlob(matrix_info->file,IOMode,0, (size_t) matrix_info->length); if (matrix_info->elements != NULL) matrix_info->type=MapCache; else RelinquishMagickResource(MapResource,matrix_info->length); } } } return(matrix_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M a g i c k M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMagickMatrix() allocates and returns a matrix in the form of an % array of pointers to an array of doubles, with all values pre-set to zero. % % This used to generate the two dimensional matrix, and vectors required % for the GaussJordanElimination() method below, solving some system of % simultanious equations. % % The format of the AcquireMagickMatrix method is: % % double **AcquireMagickMatrix(const size_t number_rows, % const size_t size) % % A description of each parameter follows: % % o number_rows: the number pointers for the array of pointers % (first dimension). % % o size: the size of the array of doubles each pointer points to % (second dimension). % */ MagickExport double **AcquireMagickMatrix(const size_t number_rows, const size_t size) { double **matrix; register ssize_t i, j; matrix=(double **) AcquireQuantumMemory(number_rows,sizeof(*matrix)); if (matrix == (double **) NULL) return((double **) NULL); for (i=0; i < (ssize_t) number_rows; i++) { matrix[i]=(double *) AcquireQuantumMemory(size,sizeof(*matrix[i])); if (matrix[i] == (double *) NULL) { for (j=0; j < i; j++) matrix[j]=(double *) RelinquishMagickMemory(matrix[j]); matrix=(double **) RelinquishMagickMemory(matrix); return((double **) NULL); } for (j=0; j < (ssize_t) size; j++) matrix[i][j]=0.0; } return(matrix); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y M a t r i x I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyMatrixInfo() dereferences a matrix, deallocating memory associated % with the matrix. % % The format of the DestroyImage method is: % % MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport MatrixInfo *DestroyMatrixInfo(MatrixInfo *matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); LockSemaphoreInfo(matrix_info->semaphore); switch (matrix_info->type) { case MemoryCache: { if (matrix_info->mapped == MagickFalse) matrix_info->elements=RelinquishMagickMemory(matrix_info->elements); else { (void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length); matrix_info->elements=(unsigned short *) NULL; } RelinquishMagickResource(MemoryResource,matrix_info->length); break; } case MapCache: { (void) UnmapBlob(matrix_info->elements,(size_t) matrix_info->length); matrix_info->elements=NULL; RelinquishMagickResource(MapResource,matrix_info->length); } case DiskCache: { if (matrix_info->file != -1) (void) close(matrix_info->file); (void) RelinquishUniqueFileResource(matrix_info->path); RelinquishMagickResource(DiskResource,matrix_info->length); break; } default: break; } UnlockSemaphoreInfo(matrix_info->semaphore); RelinquishSemaphoreInfo(&matrix_info->semaphore); return((MatrixInfo *) RelinquishMagickMemory(matrix_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G a u s s J o r d a n E l i m i n a t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GaussJordanElimination() returns a matrix in reduced row echelon form, % while simultaneously reducing and thus solving the augumented results % matrix. % % See also http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % % The format of the GaussJordanElimination method is: % % MagickBooleanType GaussJordanElimination(double **matrix, % double **vectors,const size_t rank,const size_t number_vectors) % % A description of each parameter follows: % % o matrix: the matrix to be reduced, as an 'array of row pointers'. % % o vectors: the additional matrix argumenting the matrix for row reduction. % Producing an 'array of column vectors'. % % o rank: The size of the matrix (both rows and columns). % Also represents the number terms that need to be solved. % % o number_vectors: Number of vectors columns, argumenting the above matrix. % Usally 1, but can be more for more complex equation solving. % % Note that the 'matrix' is given as a 'array of row pointers' of rank size. % That is values can be assigned as matrix[row][column] where 'row' is % typically the equation, and 'column' is the term of the equation. % That is the matrix is in the form of a 'row first array'. % % However 'vectors' is a 'array of column pointers' which can have any number % of columns, with each column array the same 'rank' size as 'matrix'. % % This allows for simpler handling of the results, especially is only one % column 'vector' is all that is required to produce the desired solution. % % For example, the 'vectors' can consist of a pointer to a simple array of % doubles. when only one set of simultanious equations is to be solved from % the given set of coefficient weighted terms. % % double **matrix = AcquireMagickMatrix(8UL,8UL); % double coefficents[8]; % ... % GaussJordanElimination(matrix, &coefficents, 8UL, 1UL); % % However by specifing more 'columns' (as an 'array of vector columns', % you can use this function to solve a set of 'separable' equations. % % For example a distortion function where u = U(x,y) v = V(x,y) % And the functions U() and V() have separate coefficents, but are being % generated from a common x,y->u,v data set. % % Another example is generation of a color gradient from a set of colors at % specific coordients, such as a list x,y -> r,g,b,a. % % You can also use the 'vectors' to generate an inverse of the given 'matrix' % though as a 'column first array' rather than a 'row first array'. For % details see http://en.wikipedia.org/wiki/Gauss-Jordan_elimination % */ MagickPrivate MagickBooleanType GaussJordanElimination(double **matrix, double **vectors,const size_t rank,const size_t number_vectors) { #define GaussJordanSwap(x,y) \ { \ if ((x) != (y)) \ { \ (x)+=(y); \ (y)=(x)-(y); \ (x)=(x)-(y); \ } \ } double max, scale; register ssize_t i, j, k; ssize_t column, *columns, *pivots, row, *rows; columns=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*columns)); rows=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*rows)); pivots=(ssize_t *) AcquireQuantumMemory(rank,sizeof(*pivots)); if ((rows == (ssize_t *) NULL) || (columns == (ssize_t *) NULL) || (pivots == (ssize_t *) NULL)) { if (pivots != (ssize_t *) NULL) pivots=(ssize_t *) RelinquishMagickMemory(pivots); if (columns != (ssize_t *) NULL) columns=(ssize_t *) RelinquishMagickMemory(columns); if (rows != (ssize_t *) NULL) rows=(ssize_t *) RelinquishMagickMemory(rows); return(MagickFalse); } (void) ResetMagickMemory(columns,0,rank*sizeof(*columns)); (void) ResetMagickMemory(rows,0,rank*sizeof(*rows)); (void) ResetMagickMemory(pivots,0,rank*sizeof(*pivots)); column=0; row=0; for (i=0; i < (ssize_t) rank; i++) { max=0.0; for (j=0; j < (ssize_t) rank; j++) if (pivots[j] != 1) { for (k=0; k < (ssize_t) rank; k++) if (pivots[k] != 0) { if (pivots[k] > 1) return(MagickFalse); } else if (fabs(matrix[j][k]) >= max) { max=fabs(matrix[j][k]); row=j; column=k; } } pivots[column]++; if (row != column) { for (k=0; k < (ssize_t) rank; k++) GaussJordanSwap(matrix[row][k],matrix[column][k]); for (k=0; k < (ssize_t) number_vectors; k++) GaussJordanSwap(vectors[k][row],vectors[k][column]); } rows[i]=row; columns[i]=column; if (matrix[column][column] == 0.0) return(MagickFalse); /* sigularity */ scale=PerceptibleReciprocal(matrix[column][column]); matrix[column][column]=1.0; for (j=0; j < (ssize_t) rank; j++) matrix[column][j]*=scale; for (j=0; j < (ssize_t) number_vectors; j++) vectors[j][column]*=scale; for (j=0; j < (ssize_t) rank; j++) if (j != column) { scale=matrix[j][column]; matrix[j][column]=0.0; for (k=0; k < (ssize_t) rank; k++) matrix[j][k]-=scale*matrix[column][k]; for (k=0; k < (ssize_t) number_vectors; k++) vectors[k][j]-=scale*vectors[k][column]; } } for (j=(ssize_t) rank-1; j >= 0; j--) if (columns[j] != rows[j]) for (i=0; i < (ssize_t) rank; i++) GaussJordanSwap(matrix[i][rows[j]],matrix[i][columns[j]]); pivots=(ssize_t *) RelinquishMagickMemory(pivots); rows=(ssize_t *) RelinquishMagickMemory(rows); columns=(ssize_t *) RelinquishMagickMemory(columns); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x C o l u m n s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixColumns() returns the number of columns in the matrix. % % The format of the GetMatrixColumns method is: % % size_t GetMatrixColumns(const MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixColumns(const MatrixInfo *matrix_info) { assert(matrix_info != (MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); return(matrix_info->columns); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x E l e m e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixElement() returns the specifed element in the matrix. % % The format of the GetMatrixElement method is: % % MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info, % const ssize_t x,const ssize_t y,void *value) % % A description of each parameter follows: % % o matrix_info: the matrix columns. % % o x: the matrix x-offset. % % o y: the matrix y-offset. % % o value: return the matrix element in this buffer. % */ static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline MagickOffsetType ReadMatrixElements( const MatrixInfo *magick_restrict matrix_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) LockSemaphoreInfo(matrix_info->semaphore); if (lseek(matrix_info->file,offset,SEEK_SET) < 0) { UnlockSemaphoreInfo(matrix_info->semaphore); return((MagickOffsetType) -1); } #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX)); #else count=pread(matrix_info->file,buffer+i,(size_t) MagickMin(length-i, (MagickSizeType) SSIZE_MAX),(off_t) (offset+i)); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } #if !defined(MAGICKCORE_HAVE_PREAD) UnlockSemaphoreInfo(matrix_info->semaphore); #endif return(i); } MagickExport MagickBooleanType GetMatrixElement(const MatrixInfo *matrix_info, const ssize_t x,const ssize_t y,void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); i=(MagickOffsetType) EdgeY(y,matrix_info->rows)*matrix_info->columns+ EdgeX(x,matrix_info->columns); if (matrix_info->type != DiskCache) { (void) memcpy(value,(unsigned char *) matrix_info->elements+i* matrix_info->stride,matrix_info->stride); return(MagickTrue); } count=ReadMatrixElements(matrix_info,i*matrix_info->stride, matrix_info->stride,(unsigned char *) value); if (count != (MagickOffsetType) matrix_info->stride) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a t r i x R o w s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMatrixRows() returns the number of rows in the matrix. % % The format of the GetMatrixRows method is: % % size_t GetMatrixRows(const MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport size_t GetMatrixRows(const MatrixInfo *matrix_info) { assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); return(matrix_info->rows); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L e a s t S q u a r e s A d d T e r m s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LeastSquaresAddTerms() adds one set of terms and associate results to the % given matrix and vectors for solving using least-squares function fitting. % % The format of the AcquireMagickMatrix method is: % % void LeastSquaresAddTerms(double **matrix,double **vectors, % const double *terms,const double *results,const size_t rank, % const size_t number_vectors); % % A description of each parameter follows: % % o matrix: the square matrix to add given terms/results to. % % o vectors: the result vectors to add terms/results to. % % o terms: the pre-calculated terms (without the unknown coefficent % weights) that forms the equation being added. % % o results: the result(s) that should be generated from the given terms % weighted by the yet-to-be-solved coefficents. % % o rank: the rank or size of the dimensions of the square matrix. % Also the length of vectors, and number of terms being added. % % o number_vectors: Number of result vectors, and number or results being % added. Also represents the number of separable systems of equations % that is being solved. % % Example of use... % % 2 dimensional Affine Equations (which are separable) % c0*x + c2*y + c4*1 => u % c1*x + c3*y + c5*1 => v % % double **matrix = AcquireMagickMatrix(3UL,3UL); % double **vectors = AcquireMagickMatrix(2UL,3UL); % double terms[3], results[2]; % ... % for each given x,y -> u,v % terms[0] = x; % terms[1] = y; % terms[2] = 1; % results[0] = u; % results[1] = v; % LeastSquaresAddTerms(matrix,vectors,terms,results,3UL,2UL); % ... % if ( GaussJordanElimination(matrix,vectors,3UL,2UL) ) { % c0 = vectors[0][0]; % c2 = vectors[0][1]; % c4 = vectors[0][2]; % c1 = vectors[1][0]; % c3 = vectors[1][1]; % c5 = vectors[1][2]; % } % else % printf("Matrix unsolvable\n); % RelinquishMagickMatrix(matrix,3UL); % RelinquishMagickMatrix(vectors,2UL); % */ MagickPrivate void LeastSquaresAddTerms(double **matrix,double **vectors, const double *terms,const double *results,const size_t rank, const size_t number_vectors) { register ssize_t i, j; for (j=0; j < (ssize_t) rank; j++) { for (i=0; i < (ssize_t) rank; i++) matrix[i][j]+=terms[i]*terms[j]; for (i=0; i < (ssize_t) number_vectors; i++) vectors[i][j]+=results[i]*terms[j]; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a t r i x T o I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MatrixToImage() returns a matrix as an image. The matrix elements must be % of type double otherwise nonsense is returned. % % The format of the MatrixToImage method is: % % Image *MatrixToImage(const MatrixInfo *matrix_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o matrix_info: the matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MatrixToImage(const MatrixInfo *matrix_info, ExceptionInfo *exception) { CacheView *image_view; double max_value, min_value, scale_factor, value; Image *image; MagickBooleanType status; ssize_t y; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (matrix_info->stride < sizeof(double)) return((Image *) NULL); /* Determine range of matrix. */ (void) GetMatrixElement(matrix_info,0,0,&value); min_value=value; max_value=value; for (y=0; y < (ssize_t) matrix_info->rows; y++) { register ssize_t x; for (x=0; x < (ssize_t) matrix_info->columns; x++) { if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse) continue; if (value < min_value) min_value=value; else if (value > max_value) max_value=value; } } if ((min_value == 0.0) && (max_value == 0.0)) scale_factor=0; else if (min_value == max_value) { scale_factor=(double) QuantumRange/min_value; min_value=0; } else scale_factor=(double) QuantumRange/(max_value-min_value); /* Convert matrix to image. */ image=AcquireImage((ImageInfo *) NULL,exception); image->columns=matrix_info->columns; image->rows=matrix_info->rows; image->colorspace=GRAYColorspace; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double value; register Quantum *q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetMatrixElement(matrix_info,x,y,&value) == MagickFalse) continue; value=scale_factor*(value-min_value); *q=ClampToQuantum(value); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N u l l M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NullMatrix() sets all elements of the matrix to zero. % % The format of the ResetMagickMemory method is: % % MagickBooleanType *NullMatrix(MatrixInfo *matrix_info) % % A description of each parameter follows: % % o matrix_info: the matrix. % */ MagickExport MagickBooleanType NullMatrix(MatrixInfo *matrix_info) { register ssize_t x; ssize_t count, y; unsigned char value; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); if (matrix_info->type != DiskCache) { (void) ResetMagickMemory(matrix_info->elements,0,(size_t) matrix_info->length); return(MagickTrue); } value=0; (void) lseek(matrix_info->file,0,SEEK_SET); for (y=0; y < (ssize_t) matrix_info->rows; y++) { for (x=0; x < (ssize_t) matrix_info->length; x++) { count=write(matrix_info->file,&value,sizeof(value)); if (count != (ssize_t) sizeof(value)) break; } if (x < (ssize_t) matrix_info->length) break; } return(y < (ssize_t) matrix_info->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e l i n q u i s h M a g i c k M a t r i x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RelinquishMagickMatrix() frees the previously acquired matrix (array of % pointers to arrays of doubles). % % The format of the RelinquishMagickMatrix method is: % % double **RelinquishMagickMatrix(double **matrix, % const size_t number_rows) % % A description of each parameter follows: % % o matrix: the matrix to relinquish % % o number_rows: the first dimension of the acquired matrix (number of % pointers) % */ MagickExport double **RelinquishMagickMatrix(double **matrix, const size_t number_rows) { register ssize_t i; if (matrix == (double **) NULL ) return(matrix); for (i=0; i < (ssize_t) number_rows; i++) matrix[i]=(double *) RelinquishMagickMemory(matrix[i]); matrix=(double **) RelinquishMagickMemory(matrix); return(matrix); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t M a t r i x E l e m e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetMatrixElement() sets the specifed element in the matrix. % % The format of the SetMatrixElement method is: % % MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info, % const ssize_t x,const ssize_t y,void *value) % % A description of each parameter follows: % % o matrix_info: the matrix columns. % % o x: the matrix x-offset. % % o y: the matrix y-offset. % % o value: set the matrix element to this value. % */ MagickExport MagickBooleanType SetMatrixElement(const MatrixInfo *matrix_info, const ssize_t x,const ssize_t y,const void *value) { MagickOffsetType count, i; assert(matrix_info != (const MatrixInfo *) NULL); assert(matrix_info->signature == MagickCoreSignature); i=(MagickOffsetType) y*matrix_info->columns+x; if ((i < 0) || ((MagickSizeType) (i*matrix_info->stride) >= matrix_info->length)) return(MagickFalse); if (matrix_info->type != DiskCache) { (void) memcpy((unsigned char *) matrix_info->elements+i* matrix_info->stride,value,matrix_info->stride); return(MagickTrue); } count=WriteMatrixElements(matrix_info,i*matrix_info->stride, matrix_info->stride,(unsigned char *) value); if (count != (MagickOffsetType) matrix_info->stride) return(MagickFalse); return(MagickTrue); }
flexProxDualDataL2.h
#ifndef flexProxDualL2_H #define flexProxDualL2_H #include "flexProx.h" //! represents prox for a L2 data term /*! \f$ \frac{\alpha}{2}\|\cdot-f\|_2^2 \f$ */ template<typename T> class flexProxDualDataL2 : public flexProx<T> { #ifdef __CUDACC__ typedef thrust::device_vector<T> Tdata; #else typedef std::vector<T> Tdata; #endif public: flexProxDualDataL2() : flexProx<T>(dualL2DataProx) { } ~flexProxDualDataL2() { if (VERBOSE > 0) printf("Destructor prox\n!"); } void applyProx(T alpha, flexBoxData<T>* data, const std::vector<int> &dualNumbers, const std::vector<int> &primalNumbers) { } #ifdef __CUDACC__ struct flexProxDualDataL2Functor { __host__ __device__ flexProxDualDataL2Functor(T _alpha) : alpha(_alpha){}; template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = alpha / (thrust::get<2>(t) + alpha) * (thrust::get<1>(t) - thrust::get<2>(t) * thrust::get<3>(t)); } const T alpha; }; #endif void applyProx(T alpha, flexBoxData<T>* data, const std::vector<int> &dualNumbers, const std::vector<int> &primalNumbers, std::vector<Tdata> &fList) { #ifdef __CUDACC__ for (int i = 0; i < dualNumbers.size(); i++) { auto startIterator = thrust::make_zip_iterator(thrust::make_tuple(data->y[dualNumbers[i]].begin(), data->yTilde[dualNumbers[i]].begin(), data->sigmaElt[dualNumbers[i]].begin(), fList[i].begin())); auto endIterator = thrust::make_zip_iterator( thrust::make_tuple(data->y[dualNumbers[i]].end(), data->yTilde[dualNumbers[i]].end(), data->sigmaElt[dualNumbers[i]].end(), fList[i].end())); thrust::for_each(startIterator,endIterator,flexProxDualDataL2Functor(alpha)); } #else for (int i = 0; i < dualNumbers.size(); i++) { T* ptrY = data->y[dualNumbers[i]].data(); T* ptrYtilde = data->yTilde[dualNumbers[i]].data(); T* ptrSigma = data->sigmaElt[dualNumbers[i]].data(); T* ptrF = fList[i].data(); int numElements = (int)data->yTilde[dualNumbers[i]].size(); #pragma omp parallel for for (int j = 0; j < numElements; j++) { ptrY[j] = alpha / (ptrSigma[j] + alpha) * (ptrYtilde[j] - ptrSigma[j] * ptrF[j]); } } #endif } }; #endif
GB_unop__log10_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__log10_fc64_fc64 // op(A') function: GB_unop_tran__log10_fc64_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = GB_clog10 (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_clog10 (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = GB_clog10 (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LOG10 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__log10_fc64_fc64 ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = GB_clog10 (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__log10_fc64_fc64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
declare_mapper_ast_print.c
// RUN: %clang_cc1 -verify -fopenmp -ast-print %s | FileCheck %s // RUN: %clang_cc1 -fopenmp -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -include-pch %t -fsyntax-only -verify %s -ast-print | FileCheck %s // RUN: %clang_cc1 -verify -fopenmp-simd -ast-print %s | FileCheck %s // RUN: %clang_cc1 -fopenmp-simd -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp-simd -include-pch %t -fsyntax-only -verify %s -ast-print | FileCheck %s // expected-no-diagnostics #ifndef HEADER #define HEADER // CHECK: struct vec { struct vec { int len; double *data; }; // CHECK: }; // CHECK: struct dat { struct dat { int i; double d; #pragma omp declare mapper(id: struct vec v) map(v.len) // CHECK: #pragma omp declare mapper (id : struct vec v) map(tofrom: v.len){{$}} }; // CHECK: }; #pragma omp declare mapper(id: struct vec v) map(v.len) // CHECK: #pragma omp declare mapper (id : struct vec v) map(tofrom: v.len){{$}} #pragma omp declare mapper(default : struct vec kk) map(kk.len) map(kk.data[0:2]) // CHECK: #pragma omp declare mapper (default : struct vec kk) map(tofrom: kk.len) map(tofrom: kk.data[0:2]){{$}} #pragma omp declare mapper(struct dat d) map(to: d.d) // CHECK: #pragma omp declare mapper (default : struct dat d) map(to: d.d){{$}} // CHECK: int main() { int main() { #pragma omp declare mapper(id: struct vec v) map(v.len) // CHECK: #pragma omp declare mapper (id : struct vec v) map(tofrom: v.len) { #pragma omp declare mapper(id: struct vec v) map(v.len) // CHECK: #pragma omp declare mapper (id : struct vec v) map(tofrom: v.len) } return 0; } // CHECK: } #endif
integrator.h
#ifndef _INTEGRATOR_H #define _INTEGRATOR_H #include <omp.h> #include <optional> #include "core.h" #include "photon_map.h" #include "scene.h" class Integrator { public: // do preliminary jobs before calling integrate virtual void build(const Scene& scene, Sampler& sampler) = 0; // compute radiance coming from the given ray virtual Vec3f integrate(const Ray& ray, const Scene& scene, Sampler& sampler) const = 0; // compute cosine term // NOTE: need to account for the asymmetry of BSDF when photon tracing // https://pbr-book.org/3ed-2018/Light_Transport_III_Bidirectional_Methods/The_Path-Space_Measurement_Equation#x3-Non-symmetryDuetoShadingNormals // Veach, Eric. Robust Monte Carlo methods for light transport simulation. // Stanford University, 1998. Section 5.3 static float cosTerm(const Vec3f& wo, const Vec3f& wi, const SurfaceInfo& surfaceInfo, const TransportDirection& transport_dir) { const float wi_ns = dot(wi, surfaceInfo.shadingNormal); const float wi_ng = dot(wi, surfaceInfo.geometricNormal); const float wo_ns = dot(wo, surfaceInfo.shadingNormal); const float wo_ng = dot(wo, surfaceInfo.geometricNormal); // prevent light leaks if (wi_ng * wi_ns <= 0 || wo_ng * wo_ns <= 0) { return 0; } if (transport_dir == TransportDirection::FROM_CAMERA) { return std::abs(wi_ns); } else if (transport_dir == TransportDirection::FROM_LIGHT) { return std::abs(wo_ns) * std::abs(wi_ng) / std::abs(wo_ng); } else { spdlog::error("invalid transport direction"); std::exit(EXIT_FAILURE); } } }; // implementation of path tracing // NOTE: for reference purpose class PathTracing : public Integrator { private: const int maxDepth; public: PathTracing(int maxDepth = 100) : maxDepth(maxDepth) {} void build(const Scene& scene, Sampler& sampler) override {} Vec3f integrate(const Ray& ray_in, const Scene& scene, Sampler& sampler) const override { Vec3f radiance(0); Ray ray = ray_in; Vec3f throughput(1, 1, 1); for (int k = 0; k < maxDepth; ++k) { IntersectInfo info; if (scene.intersect(ray, info)) { // russian roulette if (k > 0) { const float russian_roulette_prob = std::min( std::max(throughput[0], std::max(throughput[1], throughput[2])), 1.0f); if (sampler.getNext1D() >= russian_roulette_prob) { break; } throughput /= russian_roulette_prob; } // Le if (info.hitPrimitive->hasAreaLight()) { radiance += throughput * info.hitPrimitive->Le(info.surfaceInfo, -ray.direction); } // sample direction by BxDF Vec3f dir; float pdf_dir; Vec3f f = info.hitPrimitive->sampleBxDF( -ray.direction, info.surfaceInfo, TransportDirection::FROM_CAMERA, sampler, dir, pdf_dir); // update throughput and ray throughput *= f * cosTerm(-ray.direction, dir, info.surfaceInfo, TransportDirection::FROM_CAMERA) / pdf_dir; ray = Ray(info.surfaceInfo.position, dir); } else { break; } } return radiance; } }; // implementation of photon mapping class PhotonMapping : public Integrator { private: // number of photons used for making global photon map const int nPhotonsGlobal; // number of photons used for radiance estimation by global photon map const int nEstimationGlobal; // number of photons for making caustics photon map const int nPhotonsCaustics; // number of photons used for radiance estimation by caustics photon map const int nEstimationCaustics; // maximum depth to estimate radiance by final gathering const int finalGatheringDepth; // maximum depth of photon tracing, eye tracing const int maxDepth; PhotonMap globalPhotonMap; PhotonMap causticsPhotonMap; // compute reflected radiance with global photon map Vec3f computeRadianceWithPhotonMap(const Vec3f& wo, const IntersectInfo& info) const { // get nearby photons float max_dist2; const std::vector<int> photon_indices = globalPhotonMap.queryKNearestPhotons(info.surfaceInfo.position, nEstimationGlobal, max_dist2); Vec3f Lo; for (const int photon_idx : photon_indices) { const Photon& photon = globalPhotonMap.getIthPhoton(photon_idx); const Vec3f f = info.hitPrimitive->evaluateBxDF( wo, photon.wi, info.surfaceInfo, TransportDirection::FROM_CAMERA); Lo += f * photon.throughput; } if (photon_indices.size() > 0) { Lo /= (nPhotonsGlobal * PI * max_dist2); } return Lo; } // compute reflected radiance with caustics photon map Vec3f computeCausticsWithPhotonMap(const Vec3f& wo, const IntersectInfo& info) const { // get nearby photons float max_dist2; const std::vector<int> photon_indices = causticsPhotonMap.queryKNearestPhotons(info.surfaceInfo.position, nEstimationGlobal, max_dist2); Vec3f Lo; for (const int photon_idx : photon_indices) { const Photon& photon = causticsPhotonMap.getIthPhoton(photon_idx); const Vec3f f = info.hitPrimitive->evaluateBxDF( wo, photon.wi, info.surfaceInfo, TransportDirection::FROM_CAMERA); Lo += f * photon.throughput; } if (photon_indices.size() > 0) { Lo /= (nPhotonsCaustics * PI * max_dist2); } return Lo; } // compute direct illumination with explicit light sampling(NEE) Vec3f computeDirectIllumination(const Scene& scene, const Vec3f& wo, const IntersectInfo& info, Sampler& sampler) const { Vec3f Ld; // sample light float pdf_choose_light; const std::shared_ptr<Light> light = scene.sampleLight(sampler, pdf_choose_light); // sample point on light float pdf_pos_light; const SurfaceInfo light_surf = light->samplePoint(sampler, pdf_pos_light); // convert positional pdf to directional pdf const Vec3f wi = normalize(light_surf.position - info.surfaceInfo.position); const float r = length(light_surf.position - info.surfaceInfo.position); const float pdf_dir = pdf_pos_light * r * r / std::abs(dot(-wi, light_surf.shadingNormal)); // create shadow ray Ray ray_shadow(info.surfaceInfo.position, wi); ray_shadow.tmax = r - RAY_EPS; // trace ray to the light IntersectInfo info_shadow; if (!scene.intersect(ray_shadow, info_shadow)) { const Vec3f Le = light->Le(light_surf, -wi); const Vec3f f = info.hitPrimitive->evaluateBxDF( wo, wi, info.surfaceInfo, TransportDirection::FROM_CAMERA); const float cos = std::abs(dot(wi, info.surfaceInfo.shadingNormal)); Ld = f * cos * Le / (pdf_choose_light * pdf_dir); } return Ld; } Vec3f computeIndirectIlluminationRecursive(const Scene& scene, const Vec3f& wo, const IntersectInfo& info, Sampler& sampler, int depth) const { if (depth >= maxDepth) return Vec3f(0); Vec3f Li; // sample direction by BxDF Vec3f dir; float pdf_dir; const Vec3f f = info.hitPrimitive->sampleBxDF( wo, info.surfaceInfo, TransportDirection::FROM_CAMERA, sampler, dir, pdf_dir); const float cos = std::abs(dot(info.surfaceInfo.shadingNormal, dir)); // trace final gathering ray Ray ray_fg(info.surfaceInfo.position, dir); IntersectInfo info_fg; if (scene.intersect(ray_fg, info_fg)) { const BxDFType bxdf_type = info_fg.hitPrimitive->getBxDFType(); // when hitting diffuse, compute radiance with photon map if (bxdf_type == BxDFType::DIFFUSE) { Li += f * cos * computeRadianceWithPhotonMap(-ray_fg.direction, info_fg) / pdf_dir; } // when hitting specular, recursively call this function // NOTE: to include the path like LSDSDE else if (bxdf_type == BxDFType::SPECULAR) { Li += f * cos * computeIndirectIlluminationRecursive( scene, -ray_fg.direction, info_fg, sampler, depth + 1) / pdf_dir; } } return Li; } // compute indirect illumination with final gathering Vec3f computeIndirectIllumination(const Scene& scene, const Vec3f& wo, const IntersectInfo& info, Sampler& sampler) const { return computeIndirectIlluminationRecursive(scene, wo, info, sampler, 0); } // sample initial ray from light and compute initial throughput Ray sampleRayFromLight(const Scene& scene, Sampler& sampler, Vec3f& throughput) { // sample light float light_choose_pdf; const std::shared_ptr<Light> light = scene.sampleLight(sampler, light_choose_pdf); // sample point on light float light_pos_pdf; const SurfaceInfo light_surf = light->samplePoint(sampler, light_pos_pdf); // sample direction on light float light_dir_pdf; const Vec3f dir = light->sampleDirection(light_surf, sampler, light_dir_pdf); // spawn ray Ray ray(light_surf.position, dir); throughput = light->Le(light_surf, dir) / (light_choose_pdf * light_pos_pdf * light_dir_pdf) * std::abs(dot(dir, light_surf.shadingNormal)); return ray; } Vec3f integrateRecursive(const Ray& ray, const Scene& scene, Sampler& sampler, int depth) const { if (depth >= maxDepth) return Vec3f(0); IntersectInfo info; if (scene.intersect(ray, info)) { // when directly hitting light if (info.hitPrimitive->hasAreaLight()) { return info.hitPrimitive->Le(info.surfaceInfo, -ray.direction); } const BxDFType bxdf_type = info.hitPrimitive->getBxDFType(); // if hitting diffuse surface, computed reflected radiance with photon // map if (bxdf_type == BxDFType::DIFFUSE) { if (depth >= finalGatheringDepth) { return computeRadianceWithPhotonMap(-ray.direction, info); } else { // compute direct illumination by explicit light sampling const Vec3f Ld = computeDirectIllumination(scene, -ray.direction, info, sampler); // compute caustics illumination with caustics photon map const Vec3f Lc = computeCausticsWithPhotonMap(-ray.direction, info); // compute indirect illumination with final gathering const Vec3f Li = computeIndirectIllumination(scene, -ray.direction, info, sampler); return (Ld + Lc + Li); } } // if hitting specular surface, generate next ray and continue // raytracing else if (bxdf_type == BxDFType::SPECULAR) { if (depth >= 3) { // sample direction by BxDF Vec3f dir; float pdf_dir; const Vec3f f = info.hitPrimitive->sampleBxDF( -ray.direction, info.surfaceInfo, TransportDirection::FROM_CAMERA, sampler, dir, pdf_dir); // recursively raytrace const Ray next_ray(info.surfaceInfo.position, dir); const Vec3f throughput = f * cosTerm(-ray.direction, dir, info.surfaceInfo, TransportDirection::FROM_CAMERA) / pdf_dir; return throughput * integrateRecursive(next_ray, scene, sampler, depth + 1); } // sample all direction at shallow depth // NOTE: to prevent noise at fresnel reflection else { // sample all direction const std::vector<DirectionPair> dir_pairs = info.hitPrimitive->sampleAllBxDF(-ray.direction, info.surfaceInfo, TransportDirection::FROM_CAMERA); // recursively raytrace Vec3f Lo; for (const auto& dp : dir_pairs) { const Vec3f dir = dp.first; const Vec3f f = dp.second; const Ray next_ray(info.surfaceInfo.position, dir); const Vec3f throughput = f * std::abs(dot(dir, info.surfaceInfo.shadingNormal)); Lo += throughput * integrateRecursive(next_ray, scene, sampler, depth + 1); } return Lo; } } else { spdlog::error("[PhotonMapping] invalid BxDF type"); return Vec3f(0); } } else { // ray goes out to the sky return Vec3f(0); } return Vec3f(0); } public: PhotonMapping(int nPhotonsGlobal, int nEstimationGlobal, float nPhotonsCausticsMultiplier, int nEstimationCaustics, int strictCalcDepth, int maxDepth) : nPhotonsGlobal(nPhotonsGlobal), nEstimationGlobal(nEstimationGlobal), nPhotonsCaustics(nPhotonsGlobal * nPhotonsCausticsMultiplier), nEstimationCaustics(nEstimationCaustics), finalGatheringDepth(strictCalcDepth), maxDepth(maxDepth) {} const PhotonMap* getPhotonMapPtr() const { return &globalPhotonMap; } // photon tracing and build photon map void build(const Scene& scene, Sampler& sampler) override { std::vector<Photon> photons; // init sampler for each thread std::vector<std::unique_ptr<Sampler>> samplers(omp_get_max_threads()); for (int i = 0; i < samplers.size(); ++i) { samplers[i] = sampler.clone(); samplers[i]->setSeed(samplers[i]->getSeed() * (i + 1)); } // build global photon map // photon tracing spdlog::info("[PhotonMapping] tracing photons to build global photon map"); #pragma omp parallel for for (int i = 0; i < nPhotonsGlobal; ++i) { auto& sampler_per_thread = *samplers[omp_get_thread_num()]; // sample initial ray from light and set initial throughput Vec3f throughput; Ray ray = sampleRayFromLight(scene, sampler_per_thread, throughput); // trace photons // whener hitting diffuse surface, add photon to the photon array // recursively tracing photon with russian roulette for (int k = 0; k < maxDepth; ++k) { if (std::isnan(throughput[0]) || std::isnan(throughput[1]) || std::isnan(throughput[2])) { spdlog::error("[PhotonMapping] photon throughput is NaN"); break; } else if (throughput[0] < 0 || throughput[1] < 0 || throughput[2] < 0) { spdlog::error("[PhotonMapping] photon throughput is minus"); break; } IntersectInfo info; if (scene.intersect(ray, info)) { const BxDFType bxdf_type = info.hitPrimitive->getBxDFType(); if (bxdf_type == BxDFType::DIFFUSE) { // TODO: remove lock to get more speed #pragma omp critical { photons.emplace_back(throughput, info.surfaceInfo.position, -ray.direction); } } // russian roulette if (k > 0) { const float russian_roulette_prob = std::min( std::max(throughput[0], std::max(throughput[1], throughput[2])), 1.0f); if (sampler_per_thread.getNext1D() >= russian_roulette_prob) { break; } throughput /= russian_roulette_prob; } // sample direction by BxDF Vec3f dir; float pdf_dir; const Vec3f f = info.hitPrimitive->sampleBxDF( -ray.direction, info.surfaceInfo, TransportDirection::FROM_LIGHT, sampler_per_thread, dir, pdf_dir); // update throughput and ray throughput *= f * cosTerm(-ray.direction, dir, info.surfaceInfo, TransportDirection::FROM_LIGHT) / pdf_dir; ray = Ray(info.surfaceInfo.position, dir); } else { // photon goes to the sky break; } } } // build photon map spdlog::info("[PhotonMapping] building global photon map"); globalPhotonMap.setPhotons(photons); globalPhotonMap.build(); // build caustics photon map if (finalGatheringDepth > 0) { photons.clear(); // photon tracing spdlog::info( "[PhotonMapping] tracing photons to build caustics photon map"); #pragma omp parallel for for (int i = 0; i < nPhotonsCaustics; ++i) { auto& sampler_per_thread = *samplers[omp_get_thread_num()]; // sample initial ray from light and set initial throughput Vec3f throughput; Ray ray = sampleRayFromLight(scene, sampler_per_thread, throughput); // when hitting diffuse surface after specular, add photon to the photon // array bool prev_specular = false; for (int k = 0; k < maxDepth; ++k) { if (std::isnan(throughput[0]) || std::isnan(throughput[1]) || std::isnan(throughput[2])) { spdlog::error("[PhotonMapping] photon throughput is NaN"); break; } else if (throughput[0] < 0 || throughput[1] < 0 || throughput[2] < 0) { spdlog::error("[PhotonMapping] photon throughput is minus"); break; } IntersectInfo info; if (scene.intersect(ray, info)) { const BxDFType bxdf_type = info.hitPrimitive->getBxDFType(); // break when hitting diffuse surface without previous specular if (!prev_specular && bxdf_type == BxDFType::DIFFUSE) { break; } // add photon when hitting diffuse surface after specular if (prev_specular && bxdf_type == BxDFType::DIFFUSE) { // TODO: remove lock to get more speed #pragma omp critical { photons.emplace_back(throughput, info.surfaceInfo.position, -ray.direction); } break; } prev_specular = (bxdf_type == BxDFType::SPECULAR); // russian roulette if (k > 0) { const float russian_roulette_prob = std::min(std::max(throughput[0], std::max(throughput[1], throughput[2])), 1.0f); if (sampler_per_thread.getNext1D() >= russian_roulette_prob) { break; } throughput /= russian_roulette_prob; } // sample direction by BxDF Vec3f dir; float pdf_dir; const Vec3f f = info.hitPrimitive->sampleBxDF(-ray.direction, info.surfaceInfo, TransportDirection::FROM_LIGHT, sampler_per_thread, dir, pdf_dir); // update throughput and ray throughput *= f * cosTerm(-ray.direction, dir, info.surfaceInfo, TransportDirection::FROM_LIGHT) / pdf_dir; ray = Ray(info.surfaceInfo.position, dir); } else { // photon goes to the sky break; } } } spdlog::info("[PhotonMapping] building caustics photon map"); causticsPhotonMap.setPhotons(photons); causticsPhotonMap.build(); } } Vec3f integrate(const Ray& ray_in, const Scene& scene, Sampler& sampler) const override { return integrateRecursive(ray_in, scene, sampler, 0); } }; #endif
ASTMatchers.h
//===- ASTMatchers.h - Structural query framework ---------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file implements matchers to be used together with the MatchFinder to // match AST nodes. // // Matchers are created by generator functions, which can be combined in // a functional in-language DSL to express queries over the C++ AST. // // For example, to match a class with a certain name, one would call: // cxxRecordDecl(hasName("MyClass")) // which returns a matcher that can be used to find all AST nodes that declare // a class named 'MyClass'. // // For more complicated match expressions we're often interested in accessing // multiple parts of the matched AST nodes once a match is found. In that case, // call `.bind("name")` on match expressions that match the nodes you want to // access. // // For example, when we're interested in child classes of a certain class, we // would write: // cxxRecordDecl(hasName("MyClass"), has(recordDecl().bind("child"))) // When the match is found via the MatchFinder, a user provided callback will // be called with a BoundNodes instance that contains a mapping from the // strings that we provided for the `.bind()` calls to the nodes that were // matched. // In the given example, each time our matcher finds a match we get a callback // where "child" is bound to the RecordDecl node of the matching child // class declaration. // // See ASTMatchersInternal.h for a more in-depth explanation of the // implementation details of the matcher framework. // // See ASTMatchFinder.h for how to use the generated matchers to run over // an AST. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #define LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H #include "clang/AST/ASTContext.h" #include "clang/AST/ASTTypeTraits.h" #include "clang/AST/Attr.h" #include "clang/AST/CXXInheritance.h" #include "clang/AST/Decl.h" #include "clang/AST/DeclCXX.h" #include "clang/AST/DeclFriend.h" #include "clang/AST/DeclObjC.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/LambdaCapture.h" #include "clang/AST/NestedNameSpecifier.h" #include "clang/AST/OpenMPClause.h" #include "clang/AST/OperationKinds.h" #include "clang/AST/ParentMapContext.h" #include "clang/AST/Stmt.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/StmtObjC.h" #include "clang/AST/StmtOpenMP.h" #include "clang/AST/TemplateBase.h" #include "clang/AST/TemplateName.h" #include "clang/AST/Type.h" #include "clang/AST/TypeLoc.h" #include "clang/ASTMatchers/ASTMatchersInternal.h" #include "clang/ASTMatchers/ASTMatchersMacros.h" #include "clang/Basic/AttrKinds.h" #include "clang/Basic/ExceptionSpecificationType.h" #include "clang/Basic/FileManager.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceManager.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TypeTraits.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Regex.h" #include <cassert> #include <cstddef> #include <iterator> #include <limits> #include <string> #include <utility> #include <vector> namespace clang { namespace ast_matchers { /// Maps string IDs to AST nodes matched by parts of a matcher. /// /// The bound nodes are generated by calling \c bind("id") on the node matchers /// of the nodes we want to access later. /// /// The instances of BoundNodes are created by \c MatchFinder when the user's /// callbacks are executed every time a match is found. class BoundNodes { public: /// Returns the AST node bound to \c ID. /// /// Returns NULL if there was no node bound to \c ID or if there is a node but /// it cannot be converted to the specified type. template <typename T> const T *getNodeAs(StringRef ID) const { return MyBoundNodes.getNodeAs<T>(ID); } /// Type of mapping from binding identifiers to bound nodes. This type /// is an associative container with a key type of \c std::string and a value /// type of \c clang::DynTypedNode using IDToNodeMap = internal::BoundNodesMap::IDToNodeMap; /// Retrieve mapping from binding identifiers to bound nodes. const IDToNodeMap &getMap() const { return MyBoundNodes.getMap(); } private: friend class internal::BoundNodesTreeBuilder; /// Create BoundNodes from a pre-filled map of bindings. BoundNodes(internal::BoundNodesMap &MyBoundNodes) : MyBoundNodes(MyBoundNodes) {} internal::BoundNodesMap MyBoundNodes; }; /// Types of matchers for the top-level classes in the AST class /// hierarchy. /// @{ using DeclarationMatcher = internal::Matcher<Decl>; using StatementMatcher = internal::Matcher<Stmt>; using TypeMatcher = internal::Matcher<QualType>; using TypeLocMatcher = internal::Matcher<TypeLoc>; using NestedNameSpecifierMatcher = internal::Matcher<NestedNameSpecifier>; using NestedNameSpecifierLocMatcher = internal::Matcher<NestedNameSpecifierLoc>; using CXXCtorInitializerMatcher = internal::Matcher<CXXCtorInitializer>; using TemplateArgumentMatcher = internal::Matcher<TemplateArgument>; using TemplateArgumentLocMatcher = internal::Matcher<TemplateArgumentLoc>; /// @} /// Matches any node. /// /// Useful when another matcher requires a child matcher, but there's no /// additional constraint. This will often be used with an explicit conversion /// to an \c internal::Matcher<> type such as \c TypeMatcher. /// /// Example: \c DeclarationMatcher(anything()) matches all declarations, e.g., /// \code /// "int* p" and "void f()" in /// int* p; /// void f(); /// \endcode /// /// Usable as: Any Matcher inline internal::TrueMatcher anything() { return internal::TrueMatcher(); } /// Matches the top declaration context. /// /// Given /// \code /// int X; /// namespace NS { /// int Y; /// } // namespace NS /// \endcode /// decl(hasDeclContext(translationUnitDecl())) /// matches "int X", but not "int Y". extern const internal::VariadicDynCastAllOfMatcher<Decl, TranslationUnitDecl> translationUnitDecl; /// Matches typedef declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefDecl() /// matches "typedef int X", but not "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefDecl> typedefDecl; /// Matches typedef name declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typedefNameDecl() /// matches "typedef int X" and "using Y = int" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypedefNameDecl> typedefNameDecl; /// Matches type alias declarations. /// /// Given /// \code /// typedef int X; /// using Y = int; /// \endcode /// typeAliasDecl() /// matches "using Y = int", but not "typedef int X" extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasDecl> typeAliasDecl; /// Matches type alias template declarations. /// /// typeAliasTemplateDecl() matches /// \code /// template <typename T> /// using Y = X<T>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TypeAliasTemplateDecl> typeAliasTemplateDecl; /// Matches AST nodes that were expanded within the main-file. /// /// Example matches X but not Y /// (matcher = cxxRecordDecl(isExpansionInMainFile()) /// \code /// #include <Y.h> /// class X {}; /// \endcode /// Y.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInMainFile, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); return SourceManager.isInMainFile( SourceManager.getExpansionLoc(Node.getBeginLoc())); } /// Matches AST nodes that were expanded within system-header-files. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInSystemHeader()) /// \code /// #include <SystemHeader.h> /// class X {}; /// \endcode /// SystemHeader.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER(isExpansionInSystemHeader, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc)) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } return SourceManager.isInSystemHeader(ExpansionLoc); } /// Matches AST nodes that were expanded within files whose name is /// partially matching a given regex. /// /// Example matches Y but not X /// (matcher = cxxRecordDecl(isExpansionInFileMatching("AST.*")) /// \code /// #include "ASTMatcher.h" /// class X {}; /// \endcode /// ASTMatcher.h: /// \code /// class Y {}; /// \endcode /// /// Usable as: Matcher<Decl>, Matcher<Stmt>, Matcher<TypeLoc> AST_POLYMORPHIC_MATCHER_REGEX(isExpansionInFileMatching, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), RegExp) { auto &SourceManager = Finder->getASTContext().getSourceManager(); auto ExpansionLoc = SourceManager.getExpansionLoc(Node.getBeginLoc()); if (ExpansionLoc.isInvalid()) { return false; } auto FileEntry = SourceManager.getFileEntryForID(SourceManager.getFileID(ExpansionLoc)); if (!FileEntry) { return false; } auto Filename = FileEntry->getName(); return RegExp->match(Filename); } /// Matches statements that are (transitively) expanded from the named macro. /// Does not match if only part of the statement is expanded from that macro or /// if different parts of the the statement are expanded from different /// appearances of the macro. AST_POLYMORPHIC_MATCHER_P(isExpandedFromMacro, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, Stmt, TypeLoc), std::string, MacroName) { // Verifies that the statement' beginning and ending are both expanded from // the same instance of the given macro. auto& Context = Finder->getASTContext(); llvm::Optional<SourceLocation> B = internal::getExpansionLocOfMacro(MacroName, Node.getBeginLoc(), Context); if (!B) return false; llvm::Optional<SourceLocation> E = internal::getExpansionLocOfMacro(MacroName, Node.getEndLoc(), Context); if (!E) return false; return *B == *E; } /// Matches declarations. /// /// Examples matches \c X, \c C, and the friend declaration inside \c C; /// \code /// void X(); /// class C { /// friend X; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<Decl> decl; /// Matches decomposition-declarations. /// /// Examples matches the declaration node with \c foo and \c bar, but not /// \c number. /// (matcher = declStmt(has(decompositionDecl()))) /// /// \code /// int number = 42; /// auto [foo, bar] = std::make_pair{42, 42}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, DecompositionDecl> decompositionDecl; /// Matches a declaration of a linkage specification. /// /// Given /// \code /// extern "C" {} /// \endcode /// linkageSpecDecl() /// matches "extern "C" {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, LinkageSpecDecl> linkageSpecDecl; /// Matches a declaration of anything that could have a name. /// /// Example matches \c X, \c S, the anonymous union type, \c i, and \c U; /// \code /// typedef int X; /// struct S { /// union { /// int i; /// } U; /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, NamedDecl> namedDecl; /// Matches a declaration of label. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelDecl() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Decl, LabelDecl> labelDecl; /// Matches a declaration of a namespace. /// /// Given /// \code /// namespace {} /// namespace test {} /// \endcode /// namespaceDecl() /// matches "namespace {}" and "namespace test {}" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceDecl> namespaceDecl; /// Matches a declaration of a namespace alias. /// /// Given /// \code /// namespace test {} /// namespace alias = ::test; /// \endcode /// namespaceAliasDecl() /// matches "namespace alias" but not "namespace test" extern const internal::VariadicDynCastAllOfMatcher<Decl, NamespaceAliasDecl> namespaceAliasDecl; /// Matches class, struct, and union declarations. /// /// Example matches \c X, \c Z, \c U, and \c S /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, RecordDecl> recordDecl; /// Matches C++ class declarations. /// /// Example matches \c X, \c Z /// \code /// class X; /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXRecordDecl> cxxRecordDecl; /// Matches C++ class template declarations. /// /// Example matches \c Z /// \code /// template<class T> class Z {}; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ClassTemplateDecl> classTemplateDecl; /// Matches C++ class template specializations. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// \endcode /// classTemplateSpecializationDecl() /// matches the specializations \c A<int> and \c A<double> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplateSpecializationDecl> classTemplateSpecializationDecl; /// Matches C++ class template partial specializations. /// /// Given /// \code /// template<class T1, class T2, int I> /// class A {}; /// /// template<class T, int I> /// class A<T, T*, I> {}; /// /// template<> /// class A<int, int, 1> {}; /// \endcode /// classTemplatePartialSpecializationDecl() /// matches the specialization \c A<T,T*,I> but not \c A<int,int,1> extern const internal::VariadicDynCastAllOfMatcher< Decl, ClassTemplatePartialSpecializationDecl> classTemplatePartialSpecializationDecl; /// Matches declarator declarations (field, variable, function /// and non-type template parameter declarations). /// /// Given /// \code /// class X { int y; }; /// \endcode /// declaratorDecl() /// matches \c int y. extern const internal::VariadicDynCastAllOfMatcher<Decl, DeclaratorDecl> declaratorDecl; /// Matches parameter variable declarations. /// /// Given /// \code /// void f(int x); /// \endcode /// parmVarDecl() /// matches \c int x. extern const internal::VariadicDynCastAllOfMatcher<Decl, ParmVarDecl> parmVarDecl; /// Matches C++ access specifier declarations. /// /// Given /// \code /// class C { /// public: /// int a; /// }; /// \endcode /// accessSpecDecl() /// matches 'public:' extern const internal::VariadicDynCastAllOfMatcher<Decl, AccessSpecDecl> accessSpecDecl; /// Matches constructor initializers. /// /// Examples matches \c i(42). /// \code /// class C { /// C() : i(42) {} /// int i; /// }; /// \endcode extern const internal::VariadicAllOfMatcher<CXXCtorInitializer> cxxCtorInitializer; /// Matches template arguments. /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgument() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgument> templateArgument; /// Matches template arguments (with location info). /// /// Given /// \code /// template <typename T> struct C {}; /// C<int> c; /// \endcode /// templateArgumentLoc() /// matches 'int' in C<int>. extern const internal::VariadicAllOfMatcher<TemplateArgumentLoc> templateArgumentLoc; /// Matches template name. /// /// Given /// \code /// template <typename T> class X { }; /// X<int> xi; /// \endcode /// templateName() /// matches 'X' in X<int>. extern const internal::VariadicAllOfMatcher<TemplateName> templateName; /// Matches non-type template parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// nonTypeTemplateParmDecl() /// matches 'N', but not 'T'. extern const internal::VariadicDynCastAllOfMatcher<Decl, NonTypeTemplateParmDecl> nonTypeTemplateParmDecl; /// Matches template type parameter declarations. /// /// Given /// \code /// template <typename T, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'T', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTypeParmDecl> templateTypeParmDecl; /// Matches template template parameter declarations. /// /// Given /// \code /// template <template <typename> class Z, int N> struct C {}; /// \endcode /// templateTypeParmDecl() /// matches 'Z', but not 'N'. extern const internal::VariadicDynCastAllOfMatcher<Decl, TemplateTemplateParmDecl> templateTemplateParmDecl; /// Matches public C++ declarations and C++ base specifers that specify public /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; // fieldDecl(isPublic()) matches 'a' /// protected: int b; /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived1 : public Base {}; // matches 'Base' /// struct Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPublic, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_public; } /// Matches protected C++ declarations and C++ base specifers that specify /// protected inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; // fieldDecl(isProtected()) matches 'b' /// private: int c; /// }; /// \endcode /// /// \code /// class Base {}; /// class Derived : protected Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isProtected, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_protected; } /// Matches private C++ declarations and C++ base specifers that specify private /// inheritance. /// /// Examples: /// \code /// class C { /// public: int a; /// protected: int b; /// private: int c; // fieldDecl(isPrivate()) matches 'c' /// }; /// \endcode /// /// \code /// struct Base {}; /// struct Derived1 : private Base {}; // matches 'Base' /// class Derived2 : Base {}; // matches 'Base' /// \endcode AST_POLYMORPHIC_MATCHER(isPrivate, AST_POLYMORPHIC_SUPPORTED_TYPES(Decl, CXXBaseSpecifier)) { return getAccessSpecifier(Node) == AS_private; } /// Matches non-static data members that are bit-fields. /// /// Given /// \code /// class C { /// int a : 2; /// int b; /// }; /// \endcode /// fieldDecl(isBitField()) /// matches 'int a;' but not 'int b;'. AST_MATCHER(FieldDecl, isBitField) { return Node.isBitField(); } /// Matches non-static data members that are bit-fields of the specified /// bit width. /// /// Given /// \code /// class C { /// int a : 2; /// int b : 4; /// int c : 2; /// }; /// \endcode /// fieldDecl(hasBitWidth(2)) /// matches 'int a;' and 'int c;' but not 'int b;'. AST_MATCHER_P(FieldDecl, hasBitWidth, unsigned, Width) { return Node.isBitField() && Node.getBitWidthValue(Finder->getASTContext()) == Width; } /// Matches non-static data members that have an in-class initializer. /// /// Given /// \code /// class C { /// int a = 2; /// int b = 3; /// int c; /// }; /// \endcode /// fieldDecl(hasInClassInitializer(integerLiteral(equals(2)))) /// matches 'int a;' but not 'int b;'. /// fieldDecl(hasInClassInitializer(anything())) /// matches 'int a;' and 'int b;' but not 'int c;'. AST_MATCHER_P(FieldDecl, hasInClassInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getInClassInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// Determines whether the function is "main", which is the entry point /// into an executable program. AST_MATCHER(FunctionDecl, isMain) { return Node.isMain(); } /// Matches the specialized template of a specialization declaration. /// /// Given /// \code /// template<typename T> class A {}; #1 /// template<> class A<int> {}; #2 /// \endcode /// classTemplateSpecializationDecl(hasSpecializedTemplate(classTemplateDecl())) /// matches '#2' with classTemplateDecl() matching the class template /// declaration of 'A' at #1. AST_MATCHER_P(ClassTemplateSpecializationDecl, hasSpecializedTemplate, internal::Matcher<ClassTemplateDecl>, InnerMatcher) { const ClassTemplateDecl* Decl = Node.getSpecializedTemplate(); return (Decl != nullptr && InnerMatcher.matches(*Decl, Finder, Builder)); } /// Matches a declaration that has been implicitly added /// by the compiler (eg. implicit default/copy constructors). AST_MATCHER(Decl, isImplicit) { return Node.isImplicit(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl that have at least one TemplateArgument matching the given /// InnerMatcher. /// /// Given /// \code /// template<typename T> class A {}; /// template<> class A<double> {}; /// A<int> a; /// /// template<typename T> f() {}; /// void func() { f<int>(); }; /// \endcode /// /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(asString("int")))) /// matches the specialization \c A<int> /// /// functionDecl(hasAnyTemplateArgument(refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P( hasAnyTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); return matchesFirstInRange(InnerMatcher, List.begin(), List.end(), Finder, Builder) != List.end(); } /// Causes all nested matchers to be matched with the specified traversal kind. /// /// Given /// \code /// void foo() /// { /// int i = 3.0; /// } /// \endcode /// The matcher /// \code /// traverse(TK_IgnoreUnlessSpelledInSource, /// varDecl(hasInitializer(floatLiteral().bind("init"))) /// ) /// \endcode /// matches the variable declaration with "init" bound to the "3.0". template <typename T> internal::Matcher<T> traverse(TraversalKind TK, const internal::Matcher<T> &InnerMatcher) { return internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>(); } template <typename T> internal::BindableMatcher<T> traverse(TraversalKind TK, const internal::BindableMatcher<T> &InnerMatcher) { return internal::BindableMatcher<T>( internal::DynTypedMatcher::constructRestrictedWrapper( new internal::TraversalMatcher<T>(TK, InnerMatcher), InnerMatcher.getID().first) .template unconditionalConvertTo<T>()); } template <typename... T> internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>> traverse(TraversalKind TK, const internal::VariadicOperatorMatcher<T...> &InnerMatcher) { return internal::TraversalWrapper<internal::VariadicOperatorMatcher<T...>>( TK, InnerMatcher); } template <template <typename ToArg, typename FromArg> class ArgumentAdapterT, typename T, typename ToTypes> internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>> traverse(TraversalKind TK, const internal::ArgumentAdaptingMatcherFuncAdaptor< ArgumentAdapterT, T, ToTypes> &InnerMatcher) { return internal::TraversalWrapper< internal::ArgumentAdaptingMatcherFuncAdaptor<ArgumentAdapterT, T, ToTypes>>(TK, InnerMatcher); } template <template <typename T, typename P1> class MatcherT, typename P1, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>> traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam1< MatcherT, P1, ReturnTypesF> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcherWithParam1<MatcherT, P1, ReturnTypesF>>( TK, InnerMatcher); } template <template <typename T, typename P1, typename P2> class MatcherT, typename P1, typename P2, typename ReturnTypesF> internal::TraversalWrapper< internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>> traverse(TraversalKind TK, const internal::PolymorphicMatcherWithParam2< MatcherT, P1, P2, ReturnTypesF> &InnerMatcher) { return internal::TraversalWrapper< internal::PolymorphicMatcherWithParam2<MatcherT, P1, P2, ReturnTypesF>>( TK, InnerMatcher); } template <typename... T> internal::Matcher<typename internal::GetClade<T...>::Type> traverse(TraversalKind TK, const internal::MapAnyOfHelper<T...> &InnerMatcher) { return traverse(TK, InnerMatcher.with()); } /// Matches expressions that match InnerMatcher after any implicit AST /// nodes are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// class C {}; /// C a = C(); /// C b; /// C c = b; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImplicit(cxxConstructExpr()))) /// \endcode /// would match the declarations for a, b, and c. /// While /// \code /// varDecl(hasInitializer(cxxConstructExpr())) /// \endcode /// only match the declarations for b and c. AST_MATCHER_P(Expr, ignoringImplicit, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImplicit(), Finder, Builder); } /// Matches expressions that match InnerMatcher after any implicit casts /// are stripped off. /// /// Parentheses and explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = 0; /// const int c = a; /// int *d = arr; /// long e = (long) 0l; /// \endcode /// The matchers /// \code /// varDecl(hasInitializer(ignoringImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringImpCasts(declRefExpr()))) /// \endcode /// would match the declarations for a, b, c, and d, but not e. /// While /// \code /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// \endcode /// only match the declarations for b, c, and d. AST_MATCHER_P(Expr, ignoringImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreImpCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after parentheses and /// casts are stripped off. /// /// Implicit and non-C Style casts are also discarded. /// Given /// \code /// int a = 0; /// char b = (0); /// void* c = reinterpret_cast<char*>(0); /// char d = char(0); /// \endcode /// The matcher /// varDecl(hasInitializer(ignoringParenCasts(integerLiteral()))) /// would match the declarations for a, b, c, and d. /// while /// varDecl(hasInitializer(integerLiteral())) /// only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenCasts(), Finder, Builder); } /// Matches expressions that match InnerMatcher after implicit casts and /// parentheses are stripped off. /// /// Explicit casts are not discarded. /// Given /// \code /// int arr[5]; /// int a = 0; /// char b = (0); /// const int c = a; /// int *d = (arr); /// long e = ((long) 0l); /// \endcode /// The matchers /// varDecl(hasInitializer(ignoringParenImpCasts(integerLiteral()))) /// varDecl(hasInitializer(ignoringParenImpCasts(declRefExpr()))) /// would match the declarations for a, b, c, and d, but not e. /// while /// varDecl(hasInitializer(integerLiteral())) /// varDecl(hasInitializer(declRefExpr())) /// would only match the declaration for a. AST_MATCHER_P(Expr, ignoringParenImpCasts, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.IgnoreParenImpCasts(), Finder, Builder); } /// Matches types that match InnerMatcher after any parens are stripped. /// /// Given /// \code /// void (*fp)(void); /// \endcode /// The matcher /// \code /// varDecl(hasType(pointerType(pointee(ignoringParens(functionType()))))) /// \endcode /// would match the declaration for fp. AST_MATCHER_P_OVERLOAD(QualType, ignoringParens, internal::Matcher<QualType>, InnerMatcher, 0) { return InnerMatcher.matches(Node.IgnoreParens(), Finder, Builder); } /// Overload \c ignoringParens for \c Expr. /// /// Given /// \code /// const char* str = ("my-string"); /// \endcode /// The matcher /// \code /// implicitCastExpr(hasSourceExpression(ignoringParens(stringLiteral()))) /// \endcode /// would match the implicit cast resulting from the assignment. AST_MATCHER_P_OVERLOAD(Expr, ignoringParens, internal::Matcher<Expr>, InnerMatcher, 1) { const Expr *E = Node.IgnoreParens(); return InnerMatcher.matches(*E, Finder, Builder); } /// Matches expressions that are instantiation-dependent even if it is /// neither type- nor value-dependent. /// /// In the following example, the expression sizeof(sizeof(T() + T())) /// is instantiation-dependent (since it involves a template parameter T), /// but is neither type- nor value-dependent, since the type of the inner /// sizeof is known (std::size_t) and therefore the size of the outer /// sizeof is known. /// \code /// template<typename T> /// void f(T x, T y) { sizeof(sizeof(T() + T()); } /// \endcode /// expr(isInstantiationDependent()) matches sizeof(sizeof(T() + T()) AST_MATCHER(Expr, isInstantiationDependent) { return Node.isInstantiationDependent(); } /// Matches expressions that are type-dependent because the template type /// is not yet instantiated. /// /// For example, the expressions "x" and "x + y" are type-dependent in /// the following code, but "y" is not type-dependent: /// \code /// template<typename T> /// void add(T x, int y) { /// x + y; /// } /// \endcode /// expr(isTypeDependent()) matches x + y AST_MATCHER(Expr, isTypeDependent) { return Node.isTypeDependent(); } /// Matches expression that are value-dependent because they contain a /// non-type template parameter. /// /// For example, the array bound of "Chars" in the following example is /// value-dependent. /// \code /// template<int Size> int f() { return Size; } /// \endcode /// expr(isValueDependent()) matches return Size AST_MATCHER(Expr, isValueDependent) { return Node.isValueDependent(); } /// Matches classTemplateSpecializations, templateSpecializationType and /// functionDecl where the n'th TemplateArgument matches the given InnerMatcher. /// /// Given /// \code /// template<typename T, typename U> class A {}; /// A<bool, int> b; /// A<int, bool> c; /// /// template<typename T> void f() {} /// void func() { f<int>(); }; /// \endcode /// classTemplateSpecializationDecl(hasTemplateArgument( /// 1, refersToType(asString("int")))) /// matches the specialization \c A<bool, int> /// /// functionDecl(hasTemplateArgument(0, refersToType(asString("int")))) /// matches the specialization \c f<int> AST_POLYMORPHIC_MATCHER_P2( hasTemplateArgument, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType, FunctionDecl), unsigned, N, internal::Matcher<TemplateArgument>, InnerMatcher) { ArrayRef<TemplateArgument> List = internal::getTemplateSpecializationArgs(Node); if (List.size() <= N) return false; return InnerMatcher.matches(List[N], Finder, Builder); } /// Matches if the number of template arguments equals \p N. /// /// Given /// \code /// template<typename T> struct C {}; /// C<int> c; /// \endcode /// classTemplateSpecializationDecl(templateArgumentCountIs(1)) /// matches C<int>. AST_POLYMORPHIC_MATCHER_P( templateArgumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(ClassTemplateSpecializationDecl, TemplateSpecializationType), unsigned, N) { return internal::getTemplateSpecializationArgs(Node).size() == N; } /// Matches a TemplateArgument that refers to a certain type. /// /// Given /// \code /// struct X {}; /// template<typename T> struct A {}; /// A<X> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToType(class(hasName("X"))))) /// matches the specialization \c A<X> AST_MATCHER_P(TemplateArgument, refersToType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Type) return false; return InnerMatcher.matches(Node.getAsType(), Finder, Builder); } /// Matches a TemplateArgument that refers to a certain template. /// /// Given /// \code /// template<template <typename> class S> class X {}; /// template<typename T> class Y {}; /// X<Y> xi; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToTemplate(templateName()))) /// matches the specialization \c X<Y> AST_MATCHER_P(TemplateArgument, refersToTemplate, internal::Matcher<TemplateName>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Template) return false; return InnerMatcher.matches(Node.getAsTemplate(), Finder, Builder); } /// Matches a canonical TemplateArgument that refers to a certain /// declaration. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// classTemplateSpecializationDecl(hasAnyTemplateArgument( /// refersToDeclaration(fieldDecl(hasName("next"))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, refersToDeclaration, internal::Matcher<Decl>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Declaration) return InnerMatcher.matches(*Node.getAsDecl(), Finder, Builder); return false; } /// Matches a sugar TemplateArgument that refers to a certain expression. /// /// Given /// \code /// struct B { int next; }; /// template<int(B::*next_ptr)> struct A {}; /// A<&B::next> a; /// \endcode /// templateSpecializationType(hasAnyTemplateArgument( /// isExpr(hasDescendant(declRefExpr(to(fieldDecl(hasName("next")))))))) /// matches the specialization \c A<&B::next> with \c fieldDecl(...) matching /// \c B::next AST_MATCHER_P(TemplateArgument, isExpr, internal::Matcher<Expr>, InnerMatcher) { if (Node.getKind() == TemplateArgument::Expression) return InnerMatcher.matches(*Node.getAsExpr(), Finder, Builder); return false; } /// Matches a TemplateArgument that is an integral value. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(isIntegral())) /// matches the implicit instantiation of C in C<42> /// with isIntegral() matching 42. AST_MATCHER(TemplateArgument, isIntegral) { return Node.getKind() == TemplateArgument::Integral; } /// Matches a TemplateArgument that refers to an integral type. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(refersToIntegralType(asString("int")))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, refersToIntegralType, internal::Matcher<QualType>, InnerMatcher) { if (Node.getKind() != TemplateArgument::Integral) return false; return InnerMatcher.matches(Node.getIntegralType(), Finder, Builder); } /// Matches a TemplateArgument of integral type with a given value. /// /// Note that 'Value' is a string as the template argument's value is /// an arbitrary precision integer. 'Value' must be euqal to the canonical /// representation of that integral value in base 10. /// /// Given /// \code /// template<int T> struct C {}; /// C<42> c; /// \endcode /// classTemplateSpecializationDecl( /// hasAnyTemplateArgument(equalsIntegralValue("42"))) /// matches the implicit instantiation of C in C<42>. AST_MATCHER_P(TemplateArgument, equalsIntegralValue, std::string, Value) { if (Node.getKind() != TemplateArgument::Integral) return false; return Node.getAsIntegral().toString(10) == Value; } /// Matches an Objective-C autorelease pool statement. /// /// Given /// \code /// @autoreleasepool { /// int x = 0; /// } /// \endcode /// autoreleasePoolStmt(stmt()) matches the declaration of "x" /// inside the autorelease pool. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAutoreleasePoolStmt> autoreleasePoolStmt; /// Matches any value declaration. /// /// Example matches A, B, C and F /// \code /// enum X { A, B, C }; /// void F(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ValueDecl> valueDecl; /// Matches C++ constructor declarations. /// /// Example matches Foo::Foo() and Foo::Foo(int) /// \code /// class Foo { /// public: /// Foo(); /// Foo(int); /// int DoSomething(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConstructorDecl> cxxConstructorDecl; /// Matches explicit C++ destructor declarations. /// /// Example matches Foo::~Foo() /// \code /// class Foo { /// public: /// virtual ~Foo(); /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDestructorDecl> cxxDestructorDecl; /// Matches enum declarations. /// /// Example matches X /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumDecl> enumDecl; /// Matches enum constants. /// /// Example matches A, B, C /// \code /// enum X { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, EnumConstantDecl> enumConstantDecl; /// Matches tag declarations. /// /// Example matches X, Z, U, S, E /// \code /// class X; /// template<class T> class Z {}; /// struct S {}; /// union U {}; /// enum E { /// A, B, C /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, TagDecl> tagDecl; /// Matches method declarations. /// /// Example matches y /// \code /// class X { void y(); }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXMethodDecl> cxxMethodDecl; /// Matches conversion operator declarations. /// /// Example matches the operator. /// \code /// class X { operator int() const; }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXConversionDecl> cxxConversionDecl; /// Matches user-defined and implicitly generated deduction guide. /// /// Example matches the deduction guide. /// \code /// template<typename T> /// class X { X(int) }; /// X(int) -> X<int>; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, CXXDeductionGuideDecl> cxxDeductionGuideDecl; /// Matches variable declarations. /// /// Note: this does not match declarations of member variables, which are /// "field" declarations in Clang parlance. /// /// Example matches a /// \code /// int a; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, VarDecl> varDecl; /// Matches field declarations. /// /// Given /// \code /// class X { int m; }; /// \endcode /// fieldDecl() /// matches 'm'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FieldDecl> fieldDecl; /// Matches indirect field declarations. /// /// Given /// \code /// struct X { struct { int a; }; }; /// \endcode /// indirectFieldDecl() /// matches 'a'. extern const internal::VariadicDynCastAllOfMatcher<Decl, IndirectFieldDecl> indirectFieldDecl; /// Matches function declarations. /// /// Example matches f /// \code /// void f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionDecl> functionDecl; /// Matches C++ function template declarations. /// /// Example matches f /// \code /// template<class T> void f(T t) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, FunctionTemplateDecl> functionTemplateDecl; /// Matches friend declarations. /// /// Given /// \code /// class X { friend void foo(); }; /// \endcode /// friendDecl() /// matches 'friend void foo()'. extern const internal::VariadicDynCastAllOfMatcher<Decl, FriendDecl> friendDecl; /// Matches statements. /// /// Given /// \code /// { ++a; } /// \endcode /// stmt() /// matches both the compound statement '{ ++a; }' and '++a'. extern const internal::VariadicAllOfMatcher<Stmt> stmt; /// Matches declaration statements. /// /// Given /// \code /// int a; /// \endcode /// declStmt() /// matches 'int a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclStmt> declStmt; /// Matches member expressions. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// int a; static int b; /// }; /// \endcode /// memberExpr() /// matches this->x, x, y.x, a, this->b extern const internal::VariadicDynCastAllOfMatcher<Stmt, MemberExpr> memberExpr; /// Matches unresolved member expressions. /// /// Given /// \code /// struct X { /// template <class T> void f(); /// void g(); /// }; /// template <class T> void h() { X x; x.f<T>(); x.g(); } /// \endcode /// unresolvedMemberExpr() /// matches x.f<T> extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedMemberExpr> unresolvedMemberExpr; /// Matches member expressions where the actual member referenced could not be /// resolved because the base expression or the member name was dependent. /// /// Given /// \code /// template <class T> void f() { T t; t.g(); } /// \endcode /// cxxDependentScopeMemberExpr() /// matches t.g extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDependentScopeMemberExpr> cxxDependentScopeMemberExpr; /// Matches call expressions. /// /// Example matches x.y() and y() /// \code /// X x; /// x.y(); /// y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CallExpr> callExpr; /// Matches call expressions which were resolved using ADL. /// /// Example matches y(x) but not y(42) or NS::y(x). /// \code /// namespace NS { /// struct X {}; /// void y(X); /// } /// /// void y(...); /// /// void test() { /// NS::X x; /// y(x); // Matches /// NS::y(x); // Doesn't match /// y(42); // Doesn't match /// using NS::y; /// y(x); // Found by both unqualified lookup and ADL, doesn't match // } /// \endcode AST_MATCHER(CallExpr, usesADL) { return Node.usesADL(); } /// Matches lambda expressions. /// /// Example matches [&](){return 5;} /// \code /// [&](){return 5;} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, LambdaExpr> lambdaExpr; /// Matches member call expressions. /// /// Example matches x.y() /// \code /// X x; /// x.y(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXMemberCallExpr> cxxMemberCallExpr; /// Matches ObjectiveC Message invocation expressions. /// /// The innermost message send invokes the "alloc" class method on the /// NSString class, while the outermost message send invokes the /// "initWithString" instance method on the object returned from /// NSString's "alloc". This matcher should match both message sends. /// \code /// [[NSString alloc] initWithString:@"Hello"] /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCMessageExpr> objcMessageExpr; /// Matches Objective-C interface declarations. /// /// Example matches Foo /// \code /// @interface Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCInterfaceDecl> objcInterfaceDecl; /// Matches Objective-C implementation declarations. /// /// Example matches Foo /// \code /// @implementation Foo /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCImplementationDecl> objcImplementationDecl; /// Matches Objective-C protocol declarations. /// /// Example matches FooDelegate /// \code /// @protocol FooDelegate /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCProtocolDecl> objcProtocolDecl; /// Matches Objective-C category declarations. /// /// Example matches Foo (Additions) /// \code /// @interface Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryDecl> objcCategoryDecl; /// Matches Objective-C category definitions. /// /// Example matches Foo (Additions) /// \code /// @implementation Foo (Additions) /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCCategoryImplDecl> objcCategoryImplDecl; /// Matches Objective-C method declarations. /// /// Example matches both declaration and definition of -[Foo method] /// \code /// @interface Foo /// - (void)method; /// @end /// /// @implementation Foo /// - (void)method {} /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCMethodDecl> objcMethodDecl; /// Matches block declarations. /// /// Example matches the declaration of the nameless block printing an input /// integer. /// /// \code /// myFunc(^(int p) { /// printf("%d", p); /// }) /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, BlockDecl> blockDecl; /// Matches Objective-C instance variable declarations. /// /// Example matches _enabled /// \code /// @implementation Foo { /// BOOL _enabled; /// } /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCIvarDecl> objcIvarDecl; /// Matches Objective-C property declarations. /// /// Example matches enabled /// \code /// @interface Foo /// @property BOOL enabled; /// @end /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, ObjCPropertyDecl> objcPropertyDecl; /// Matches Objective-C \@throw statements. /// /// Example matches \@throw /// \code /// @throw obj; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtThrowStmt> objcThrowStmt; /// Matches Objective-C @try statements. /// /// Example matches @try /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtTryStmt> objcTryStmt; /// Matches Objective-C @catch statements. /// /// Example matches @catch /// \code /// @try {} /// @catch (...) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtCatchStmt> objcCatchStmt; /// Matches Objective-C @finally statements. /// /// Example matches @finally /// \code /// @try {} /// @finally {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCAtFinallyStmt> objcFinallyStmt; /// Matches expressions that introduce cleanups to be run at the end /// of the sub-expression's evaluation. /// /// Example matches std::string() /// \code /// const std::string str = std::string(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExprWithCleanups> exprWithCleanups; /// Matches init list expressions. /// /// Given /// \code /// int a[] = { 1, 2 }; /// struct B { int x, y; }; /// B b = { 5, 6 }; /// \endcode /// initListExpr() /// matches "{ 1, 2 }" and "{ 5, 6 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, InitListExpr> initListExpr; /// Matches the syntactic form of init list expressions /// (if expression have it). AST_MATCHER_P(InitListExpr, hasSyntacticForm, internal::Matcher<Expr>, InnerMatcher) { const Expr *SyntForm = Node.getSyntacticForm(); return (SyntForm != nullptr && InnerMatcher.matches(*SyntForm, Finder, Builder)); } /// Matches C++ initializer list expressions. /// /// Given /// \code /// std::vector<int> a({ 1, 2, 3 }); /// std::vector<int> b = { 4, 5 }; /// int c[] = { 6, 7 }; /// std::pair<int, int> d = { 8, 9 }; /// \endcode /// cxxStdInitializerListExpr() /// matches "{ 1, 2, 3 }" and "{ 4, 5 }" extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStdInitializerListExpr> cxxStdInitializerListExpr; /// Matches implicit initializers of init list expressions. /// /// Given /// \code /// point ptarray[10] = { [2].y = 1.0, [2].x = 2.0, [0].x = 1.0 }; /// \endcode /// implicitValueInitExpr() /// matches "[0].y" (implicitly) extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitValueInitExpr> implicitValueInitExpr; /// Matches paren list expressions. /// ParenListExprs don't have a predefined type and are used for late parsing. /// In the final AST, they can be met in template declarations. /// /// Given /// \code /// template<typename T> class X { /// void f() { /// X x(*this); /// int a = 0, b = 1; int i = (a, b); /// } /// }; /// \endcode /// parenListExpr() matches "*this" but NOT matches (a, b) because (a, b) /// has a predefined type and is a ParenExpr, not a ParenListExpr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenListExpr> parenListExpr; /// Matches substitutions of non-type template parameters. /// /// Given /// \code /// template <int N> /// struct A { static const int n = N; }; /// struct B : public A<42> {}; /// \endcode /// substNonTypeTemplateParmExpr() /// matches "N" in the right-hand side of "static const int n = N;" extern const internal::VariadicDynCastAllOfMatcher<Stmt, SubstNonTypeTemplateParmExpr> substNonTypeTemplateParmExpr; /// Matches using declarations. /// /// Given /// \code /// namespace X { int x; } /// using X::x; /// \endcode /// usingDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDecl> usingDecl; /// Matches using namespace declarations. /// /// Given /// \code /// namespace X { int x; } /// using namespace X; /// \endcode /// usingDirectiveDecl() /// matches \code using namespace X \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UsingDirectiveDecl> usingDirectiveDecl; /// Matches reference to a name that can be looked up during parsing /// but could not be resolved to a specific declaration. /// /// Given /// \code /// template<typename T> /// T foo() { T a; return a; } /// template<typename T> /// void bar() { /// foo<T>(); /// } /// \endcode /// unresolvedLookupExpr() /// matches \code foo<T>() \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnresolvedLookupExpr> unresolvedLookupExpr; /// Matches unresolved using value declarations. /// /// Given /// \code /// template<typename X> /// class C : private X { /// using X::x; /// }; /// \endcode /// unresolvedUsingValueDecl() /// matches \code using X::x \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingValueDecl> unresolvedUsingValueDecl; /// Matches unresolved using value declarations that involve the /// typename. /// /// Given /// \code /// template <typename T> /// struct Base { typedef T Foo; }; /// /// template<typename T> /// struct S : private Base<T> { /// using typename Base<T>::Foo; /// }; /// \endcode /// unresolvedUsingTypenameDecl() /// matches \code using Base<T>::Foo \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, UnresolvedUsingTypenameDecl> unresolvedUsingTypenameDecl; /// Matches a constant expression wrapper. /// /// Example matches the constant in the case statement: /// (matcher = constantExpr()) /// \code /// switch (a) { /// case 37: break; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConstantExpr> constantExpr; /// Matches parentheses used in expressions. /// /// Example matches (foo() + 1) /// \code /// int foo() { return 1; } /// int a = (foo() + 1); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ParenExpr> parenExpr; /// Matches constructor call expressions (including implicit ones). /// /// Example matches string(ptr, n) and ptr within arguments of f /// (matcher = cxxConstructExpr()) /// \code /// void f(const string &a, const string &b); /// char *ptr; /// int n; /// f(string(ptr, n), ptr); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstructExpr> cxxConstructExpr; /// Matches unresolved constructor call expressions. /// /// Example matches T(t) in return statement of f /// (matcher = cxxUnresolvedConstructExpr()) /// \code /// template <typename T> /// void f(const T& t) { return T(t); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXUnresolvedConstructExpr> cxxUnresolvedConstructExpr; /// Matches implicit and explicit this expressions. /// /// Example matches the implicit this expression in "return i". /// (matcher = cxxThisExpr()) /// \code /// struct foo { /// int i; /// int f() { return i; } /// }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThisExpr> cxxThisExpr; /// Matches nodes where temporaries are created. /// /// Example matches FunctionTakesString(GetStringByValue()) /// (matcher = cxxBindTemporaryExpr()) /// \code /// FunctionTakesString(GetStringByValue()); /// FunctionTakesStringByPointer(GetStringPointer()); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBindTemporaryExpr> cxxBindTemporaryExpr; /// Matches nodes where temporaries are materialized. /// /// Example: Given /// \code /// struct T {void func();}; /// T f(); /// void g(T); /// \endcode /// materializeTemporaryExpr() matches 'f()' in these statements /// \code /// T u(f()); /// g(f()); /// f().func(); /// \endcode /// but does not match /// \code /// f(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, MaterializeTemporaryExpr> materializeTemporaryExpr; /// Matches new expressions. /// /// Given /// \code /// new X; /// \endcode /// cxxNewExpr() /// matches 'new X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNewExpr> cxxNewExpr; /// Matches delete expressions. /// /// Given /// \code /// delete X; /// \endcode /// cxxDeleteExpr() /// matches 'delete X'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDeleteExpr> cxxDeleteExpr; /// Matches noexcept expressions. /// /// Given /// \code /// bool a() noexcept; /// bool b() noexcept(true); /// bool c() noexcept(false); /// bool d() noexcept(noexcept(a())); /// bool e = noexcept(b()) || noexcept(c()); /// \endcode /// cxxNoexceptExpr() /// matches `noexcept(a())`, `noexcept(b())` and `noexcept(c())`. /// doesn't match the noexcept specifier in the declarations a, b, c or d. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNoexceptExpr> cxxNoexceptExpr; /// Matches array subscript expressions. /// /// Given /// \code /// int i = a[1]; /// \endcode /// arraySubscriptExpr() /// matches "a[1]" extern const internal::VariadicDynCastAllOfMatcher<Stmt, ArraySubscriptExpr> arraySubscriptExpr; /// Matches the value of a default argument at the call site. /// /// Example matches the CXXDefaultArgExpr placeholder inserted for the /// default value of the second parameter in the call expression f(42) /// (matcher = cxxDefaultArgExpr()) /// \code /// void f(int x, int y = 0); /// f(42); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDefaultArgExpr> cxxDefaultArgExpr; /// Matches overloaded operator calls. /// /// Note that if an operator isn't overloaded, it won't match. Instead, use /// binaryOperator matcher. /// Currently it does not match operators such as new delete. /// FIXME: figure out why these do not match? /// /// Example matches both operator<<((o << b), c) and operator<<(o, b) /// (matcher = cxxOperatorCallExpr()) /// \code /// ostream &operator<< (ostream &out, int i) { }; /// ostream &o; int b = 1, c = 1; /// o << b << c; /// \endcode /// See also the binaryOperation() matcher for more-general matching of binary /// uses of this AST node. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXOperatorCallExpr> cxxOperatorCallExpr; /// Matches rewritten binary operators /// /// Example matches use of "<": /// \code /// #include <compare> /// struct HasSpaceshipMem { /// int a; /// constexpr auto operator<=>(const HasSpaceshipMem&) const = default; /// }; /// void compare() { /// HasSpaceshipMem hs1, hs2; /// if (hs1 < hs2) /// return; /// } /// \endcode /// See also the binaryOperation() matcher for more-general matching /// of this AST node. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXRewrittenBinaryOperator> cxxRewrittenBinaryOperator; /// Matches expressions. /// /// Example matches x() /// \code /// void f() { x(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, Expr> expr; /// Matches expressions that refer to declarations. /// /// Example matches x in if (x) /// \code /// bool x; /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DeclRefExpr> declRefExpr; /// Matches a reference to an ObjCIvar. /// /// Example: matches "a" in "init" method: /// \code /// @implementation A { /// NSString *a; /// } /// - (void) init { /// a = @"hello"; /// } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ObjCIvarRefExpr> objcIvarRefExpr; /// Matches a reference to a block. /// /// Example: matches "^{}": /// \code /// void f() { ^{}(); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BlockExpr> blockExpr; /// Matches if statements. /// /// Example matches 'if (x) {}' /// \code /// if (x) {} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, IfStmt> ifStmt; /// Matches for statements. /// /// Example matches 'for (;;) {}' /// \code /// for (;;) {} /// int i[] = {1, 2, 3}; for (auto a : i); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ForStmt> forStmt; /// Matches the increment statement of a for loop. /// /// Example: /// forStmt(hasIncrement(unaryOperator(hasOperatorName("++")))) /// matches '++x' in /// \code /// for (x; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasIncrement, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Increment = Node.getInc(); return (Increment != nullptr && InnerMatcher.matches(*Increment, Finder, Builder)); } /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopInit(declStmt())) /// matches 'int x = 0' in /// \code /// for (int x = 0; x < N; ++x) { } /// \endcode AST_MATCHER_P(ForStmt, hasLoopInit, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Init = Node.getInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches range-based for statements. /// /// cxxForRangeStmt() matches 'for (auto a : i)' /// \code /// int i[] = {1, 2, 3}; for (auto a : i); /// for(int j = 0; j < 5; ++j); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXForRangeStmt> cxxForRangeStmt; /// Matches the initialization statement of a for loop. /// /// Example: /// forStmt(hasLoopVariable(anything())) /// matches 'int x' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasLoopVariable, internal::Matcher<VarDecl>, InnerMatcher) { const VarDecl *const Var = Node.getLoopVariable(); return (Var != nullptr && InnerMatcher.matches(*Var, Finder, Builder)); } /// Matches the range initialization statement of a for loop. /// /// Example: /// forStmt(hasRangeInit(anything())) /// matches 'a' in /// \code /// for (int x : a) { } /// \endcode AST_MATCHER_P(CXXForRangeStmt, hasRangeInit, internal::Matcher<Expr>, InnerMatcher) { const Expr *const Init = Node.getRangeInit(); return (Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder)); } /// Matches while statements. /// /// Given /// \code /// while (true) {} /// \endcode /// whileStmt() /// matches 'while (true) {}'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, WhileStmt> whileStmt; /// Matches do statements. /// /// Given /// \code /// do {} while (true); /// \endcode /// doStmt() /// matches 'do {} while(true)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, DoStmt> doStmt; /// Matches break statements. /// /// Given /// \code /// while (true) { break; } /// \endcode /// breakStmt() /// matches 'break' extern const internal::VariadicDynCastAllOfMatcher<Stmt, BreakStmt> breakStmt; /// Matches continue statements. /// /// Given /// \code /// while (true) { continue; } /// \endcode /// continueStmt() /// matches 'continue' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ContinueStmt> continueStmt; /// Matches return statements. /// /// Given /// \code /// return 1; /// \endcode /// returnStmt() /// matches 'return 1' extern const internal::VariadicDynCastAllOfMatcher<Stmt, ReturnStmt> returnStmt; /// Matches goto statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// gotoStmt() /// matches 'goto FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, GotoStmt> gotoStmt; /// Matches label statements. /// /// Given /// \code /// goto FOO; /// FOO: bar(); /// \endcode /// labelStmt() /// matches 'FOO:' extern const internal::VariadicDynCastAllOfMatcher<Stmt, LabelStmt> labelStmt; /// Matches address of label statements (GNU extension). /// /// Given /// \code /// FOO: bar(); /// void *ptr = &&FOO; /// goto *bar; /// \endcode /// addrLabelExpr() /// matches '&&FOO' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AddrLabelExpr> addrLabelExpr; /// Matches switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchStmt() /// matches 'switch(a)'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchStmt> switchStmt; /// Matches case and default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// switchCase() /// matches 'case 42:' and 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, SwitchCase> switchCase; /// Matches case statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// caseStmt() /// matches 'case 42:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CaseStmt> caseStmt; /// Matches default statements inside switch statements. /// /// Given /// \code /// switch(a) { case 42: break; default: break; } /// \endcode /// defaultStmt() /// matches 'default:'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, DefaultStmt> defaultStmt; /// Matches compound statements. /// /// Example matches '{}' and '{{}}' in 'for (;;) {{}}' /// \code /// for (;;) {{}} /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundStmt> compoundStmt; /// Matches catch statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxCatchStmt() /// matches 'catch(int i)' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXCatchStmt> cxxCatchStmt; /// Matches try statements. /// /// \code /// try {} catch(int i) {} /// \endcode /// cxxTryStmt() /// matches 'try {}' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTryStmt> cxxTryStmt; /// Matches throw expressions. /// /// \code /// try { throw 5; } catch(int i) {} /// \endcode /// cxxThrowExpr() /// matches 'throw 5' extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXThrowExpr> cxxThrowExpr; /// Matches null statements. /// /// \code /// foo();; /// \endcode /// nullStmt() /// matches the second ';' extern const internal::VariadicDynCastAllOfMatcher<Stmt, NullStmt> nullStmt; /// Matches asm statements. /// /// \code /// int i = 100; /// __asm("mov al, 2"); /// \endcode /// asmStmt() /// matches '__asm("mov al, 2")' extern const internal::VariadicDynCastAllOfMatcher<Stmt, AsmStmt> asmStmt; /// Matches bool literals. /// /// Example matches true /// \code /// true /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXBoolLiteralExpr> cxxBoolLiteral; /// Matches string literals (also matches wide string literals). /// /// Example matches "abcd", L"abcd" /// \code /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StringLiteral> stringLiteral; /// Matches character literals (also matches wchar_t). /// /// Not matching Hex-encoded chars (e.g. 0x1234, which is a IntegerLiteral), /// though. /// /// Example matches 'a', L'a' /// \code /// char ch = 'a'; /// wchar_t chw = L'a'; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CharacterLiteral> characterLiteral; /// Matches integer literals of all sizes / encodings, e.g. /// 1, 1L, 0x1 and 1U. /// /// Does not match character-encoded integers such as L'a'. extern const internal::VariadicDynCastAllOfMatcher<Stmt, IntegerLiteral> integerLiteral; /// Matches float literals of all sizes / encodings, e.g. /// 1.0, 1.0f, 1.0L and 1e10. /// /// Does not match implicit conversions such as /// \code /// float a = 10; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, FloatingLiteral> floatLiteral; /// Matches imaginary literals, which are based on integer and floating /// point literals e.g.: 1i, 1.0i extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImaginaryLiteral> imaginaryLiteral; /// Matches fixed point literals extern const internal::VariadicDynCastAllOfMatcher<Stmt, FixedPointLiteral> fixedPointLiteral; /// Matches user defined literal operator call. /// /// Example match: "foo"_suffix extern const internal::VariadicDynCastAllOfMatcher<Stmt, UserDefinedLiteral> userDefinedLiteral; /// Matches compound (i.e. non-scalar) literals /// /// Example match: {1}, (1, 2) /// \code /// int array[4] = {1}; /// vector int myvec = (vector int)(1, 2); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CompoundLiteralExpr> compoundLiteralExpr; /// Matches nullptr literal. extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXNullPtrLiteralExpr> cxxNullPtrLiteralExpr; /// Matches GNU __builtin_choose_expr. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ChooseExpr> chooseExpr; /// Matches GNU __null expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GNUNullExpr> gnuNullExpr; /// Matches C11 _Generic expression. extern const internal::VariadicDynCastAllOfMatcher<Stmt, GenericSelectionExpr> genericSelectionExpr; /// Matches atomic builtins. /// Example matches __atomic_load_n(ptr, 1) /// \code /// void foo() { int *ptr; __atomic_load_n(ptr, 1); } /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, AtomicExpr> atomicExpr; /// Matches statement expression (GNU extension). /// /// Example match: ({ int X = 4; X; }) /// \code /// int C = ({ int X = 4; X; }); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, StmtExpr> stmtExpr; /// Matches binary operator expressions. /// /// Example matches a || b /// \code /// !(a || b) /// \endcode /// See also the binaryOperation() matcher for more-general matching. extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryOperator> binaryOperator; /// Matches unary operator expressions. /// /// Example matches !a /// \code /// !a || b /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryOperator> unaryOperator; /// Matches conditional operator expressions. /// /// Example matches a ? b : c /// \code /// (a ? b : c) + 42 /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ConditionalOperator> conditionalOperator; /// Matches binary conditional operator expressions (GNU extension). /// /// Example matches a ?: b /// \code /// (a ?: b) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, BinaryConditionalOperator> binaryConditionalOperator; /// Matches opaque value expressions. They are used as helpers /// to reference another expressions and can be met /// in BinaryConditionalOperators, for example. /// /// Example matches 'a' /// \code /// (a ?: c) + 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, OpaqueValueExpr> opaqueValueExpr; /// Matches a C++ static_assert declaration. /// /// Example: /// staticAssertExpr() /// matches /// static_assert(sizeof(S) == sizeof(int)) /// in /// \code /// struct S { /// int x; /// }; /// static_assert(sizeof(S) == sizeof(int)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Decl, StaticAssertDecl> staticAssertDecl; /// Matches a reinterpret_cast expression. /// /// Either the source expression or the destination type can be matched /// using has(), but hasDestinationType() is more specific and can be /// more readable. /// /// Example matches reinterpret_cast<char*>(&p) in /// \code /// void* p = reinterpret_cast<char*>(&p); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXReinterpretCastExpr> cxxReinterpretCastExpr; /// Matches a C++ static_cast expression. /// /// \see hasDestinationType /// \see reinterpretCast /// /// Example: /// cxxStaticCastExpr() /// matches /// static_cast<long>(8) /// in /// \code /// long eight(static_cast<long>(8)); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXStaticCastExpr> cxxStaticCastExpr; /// Matches a dynamic_cast expression. /// /// Example: /// cxxDynamicCastExpr() /// matches /// dynamic_cast<D*>(&b); /// in /// \code /// struct B { virtual ~B() {} }; struct D : B {}; /// B b; /// D* p = dynamic_cast<D*>(&b); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXDynamicCastExpr> cxxDynamicCastExpr; /// Matches a const_cast expression. /// /// Example: Matches const_cast<int*>(&r) in /// \code /// int n = 42; /// const int &r(n); /// int* p = const_cast<int*>(&r); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXConstCastExpr> cxxConstCastExpr; /// Matches a C-style cast expression. /// /// Example: Matches (int) 2.2f in /// \code /// int i = (int) 2.2f; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CStyleCastExpr> cStyleCastExpr; /// Matches explicit cast expressions. /// /// Matches any cast expression written in user code, whether it be a /// C-style cast, a functional-style cast, or a keyword cast. /// /// Does not match implicit conversions. /// /// Note: the name "explicitCast" is chosen to match Clang's terminology, as /// Clang uses the term "cast" to apply to implicit conversions as well as to /// actual cast expressions. /// /// \see hasDestinationType. /// /// Example: matches all five of the casts in /// \code /// int((int)(reinterpret_cast<int>(static_cast<int>(const_cast<int>(42))))) /// \endcode /// but does not match the implicit conversion in /// \code /// long ell = 42; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, ExplicitCastExpr> explicitCastExpr; /// Matches the implicit cast nodes of Clang's AST. /// /// This matches many different places, including function call return value /// eliding, as well as any type conversions. extern const internal::VariadicDynCastAllOfMatcher<Stmt, ImplicitCastExpr> implicitCastExpr; /// Matches any cast nodes of Clang's AST. /// /// Example: castExpr() matches each of the following: /// \code /// (int) 3; /// const_cast<Expr *>(SubExpr); /// char c = 0; /// \endcode /// but does not match /// \code /// int i = (0); /// int k = 0; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CastExpr> castExpr; /// Matches functional cast expressions /// /// Example: Matches Foo(bar); /// \code /// Foo f = bar; /// Foo g = (Foo) bar; /// Foo h = Foo(bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXFunctionalCastExpr> cxxFunctionalCastExpr; /// Matches functional cast expressions having N != 1 arguments /// /// Example: Matches Foo(bar, bar) /// \code /// Foo h = Foo(bar, bar); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CXXTemporaryObjectExpr> cxxTemporaryObjectExpr; /// Matches predefined identifier expressions [C99 6.4.2.2]. /// /// Example: Matches __func__ /// \code /// printf("%s", __func__); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, PredefinedExpr> predefinedExpr; /// Matches C99 designated initializer expressions [C99 6.7.8]. /// /// Example: Matches { [2].y = 1.0, [0].x = 1.0 } /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, DesignatedInitExpr> designatedInitExpr; /// Matches designated initializer expressions that contain /// a specific number of designators. /// /// Example: Given /// \code /// point ptarray[10] = { [2].y = 1.0, [0].x = 1.0 }; /// point ptarray2[10] = { [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }; /// \endcode /// designatorCountIs(2) /// matches '{ [2].y = 1.0, [0].x = 1.0 }', /// but not '{ [2].y = 1.0, [2].x = 0.0, [0].x = 1.0 }'. AST_MATCHER_P(DesignatedInitExpr, designatorCountIs, unsigned, N) { return Node.size() == N; } /// Matches \c QualTypes in the clang AST. extern const internal::VariadicAllOfMatcher<QualType> qualType; /// Matches \c Types in the clang AST. extern const internal::VariadicAllOfMatcher<Type> type; /// Matches \c TypeLocs in the clang AST. extern const internal::VariadicAllOfMatcher<TypeLoc> typeLoc; /// Matches if any of the given matchers matches. /// /// Unlike \c anyOf, \c eachOf will generate a match result for each /// matching submatcher. /// /// For example, in: /// \code /// class A { int a; int b; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(eachOf(has(fieldDecl(hasName("a")).bind("v")), /// has(fieldDecl(hasName("b")).bind("v")))) /// \endcode /// will generate two results binding "v", the first of which binds /// the field declaration of \c a, the second the field declaration of /// \c b. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> eachOf; /// Matches if any of the given matchers matches. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> anyOf; /// Matches if all given matchers match. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc< 2, std::numeric_limits<unsigned>::max()> allOf; /// Matches any node regardless of the submatcher. /// /// However, \c optionally will retain any bindings generated by the submatcher. /// Useful when additional information which may or may not present about a main /// matching node is desired. /// /// For example, in: /// \code /// class Foo { /// int bar; /// } /// \endcode /// The matcher: /// \code /// cxxRecordDecl( /// optionally(has( /// fieldDecl(hasName("bar")).bind("var") /// ))).bind("record") /// \endcode /// will produce a result binding for both "record" and "var". /// The matcher will produce a "record" binding for even if there is no data /// member named "bar" in that class. /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> optionally; /// Matches sizeof (C99), alignof (C++11) and vec_step (OpenCL) /// /// Given /// \code /// Foo x = bar; /// int y = sizeof(x) + alignof(x); /// \endcode /// unaryExprOrTypeTraitExpr() /// matches \c sizeof(x) and \c alignof(x) extern const internal::VariadicDynCastAllOfMatcher<Stmt, UnaryExprOrTypeTraitExpr> unaryExprOrTypeTraitExpr; /// Matches any of the \p NodeMatchers with InnerMatchers nested within /// /// Given /// \code /// if (true); /// for (; true; ); /// \endcode /// with the matcher /// \code /// mapAnyOf(ifStmt, forStmt).with( /// hasCondition(cxxBoolLiteralExpr(equals(true))) /// ).bind("trueCond") /// \endcode /// matches the \c if and the \c for. It is equivalent to: /// \code /// auto trueCond = hasCondition(cxxBoolLiteralExpr(equals(true))); /// anyOf( /// ifStmt(trueCond).bind("trueCond"), /// forStmt(trueCond).bind("trueCond") /// ); /// \endcode /// /// The with() chain-call accepts zero or more matchers which are combined /// as-if with allOf() in each of the node matchers. /// Usable as: Any Matcher template <typename T, typename... U> auto mapAnyOf(internal::VariadicDynCastAllOfMatcher<T, U> const &...) { return internal::MapAnyOfHelper<U...>(); } /// Matches nodes which can be used with binary operators. /// /// The code /// \code /// var1 != var2; /// \endcode /// might be represented in the clang AST as a binaryOperator, a /// cxxOperatorCallExpr or a cxxRewrittenBinaryOperator, depending on /// /// * whether the types of var1 and var2 are fundamental (binaryOperator) or at /// least one is a class type (cxxOperatorCallExpr) /// * whether the code appears in a template declaration, if at least one of the /// vars is a dependent-type (binaryOperator) /// * whether the code relies on a rewritten binary operator, such as a /// spaceship operator or an inverted equality operator /// (cxxRewrittenBinaryOperator) /// /// This matcher elides details in places where the matchers for the nodes are /// compatible. /// /// Given /// \code /// binaryOperation( /// hasOperatorName("!="), /// hasLHS(expr().bind("lhs")), /// hasRHS(expr().bind("rhs")) /// ) /// \endcode /// matches each use of "!=" in: /// \code /// struct S{ /// bool operator!=(const S&) const; /// }; /// /// void foo() /// { /// 1 != 2; /// S() != S(); /// } /// /// template<typename T> /// void templ() /// { /// 1 != 2; /// T() != S(); /// } /// struct HasOpEq /// { /// bool operator==(const HasOpEq &) const; /// }; /// /// void inverse() /// { /// HasOpEq s1; /// HasOpEq s2; /// if (s1 != s2) /// return; /// } /// /// struct HasSpaceship /// { /// bool operator<=>(const HasOpEq &) const; /// }; /// /// void use_spaceship() /// { /// HasSpaceship s1; /// HasSpaceship s2; /// if (s1 != s2) /// return; /// } /// \endcode extern const internal::MapAnyOfMatcher<BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator> binaryOperation; /// Matches unary expressions that have a specific type of argument. /// /// Given /// \code /// int a, c; float b; int s = sizeof(a) + sizeof(b) + alignof(c); /// \endcode /// unaryExprOrTypeTraitExpr(hasArgumentOfType(asString("int")) /// matches \c sizeof(a) and \c alignof(c) AST_MATCHER_P(UnaryExprOrTypeTraitExpr, hasArgumentOfType, internal::Matcher<QualType>, InnerMatcher) { const QualType ArgumentType = Node.getTypeOfArgument(); return InnerMatcher.matches(ArgumentType, Finder, Builder); } /// Matches unary expressions of a certain kind. /// /// Given /// \code /// int x; /// int s = sizeof(x) + alignof(x) /// \endcode /// unaryExprOrTypeTraitExpr(ofKind(UETT_SizeOf)) /// matches \c sizeof(x) /// /// If the matcher is use from clang-query, UnaryExprOrTypeTrait parameter /// should be passed as a quoted string. e.g., ofKind("UETT_SizeOf"). AST_MATCHER_P(UnaryExprOrTypeTraitExpr, ofKind, UnaryExprOrTypeTrait, Kind) { return Node.getKind() == Kind; } /// Same as unaryExprOrTypeTraitExpr, but only matching /// alignof. inline internal::BindableMatcher<Stmt> alignOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(anyOf(ofKind(UETT_AlignOf), ofKind(UETT_PreferredAlignOf)), InnerMatcher))); } /// Same as unaryExprOrTypeTraitExpr, but only matching /// sizeof. inline internal::BindableMatcher<Stmt> sizeOfExpr( const internal::Matcher<UnaryExprOrTypeTraitExpr> &InnerMatcher) { return stmt(unaryExprOrTypeTraitExpr( allOf(ofKind(UETT_SizeOf), InnerMatcher))); } /// Matches NamedDecl nodes that have the specified name. /// /// Supports specifying enclosing namespaces or classes by prefixing the name /// with '<enclosing>::'. /// Does not match typedefs of an underlying type with the given name. /// /// Example matches X (Name == "X") /// \code /// class X; /// \endcode /// /// Example matches X (Name is one of "::a::b::X", "a::b::X", "b::X", "X") /// \code /// namespace a { namespace b { class X; } } /// \endcode inline internal::Matcher<NamedDecl> hasName(StringRef Name) { return internal::Matcher<NamedDecl>( new internal::HasNameMatcher({std::string(Name)})); } /// Matches NamedDecl nodes that have any of the specified names. /// /// This matcher is only provided as a performance optimization of hasName. /// \code /// hasAnyName(a, b, c) /// \endcode /// is equivalent to, but faster than /// \code /// anyOf(hasName(a), hasName(b), hasName(c)) /// \endcode extern const internal::VariadicFunction<internal::Matcher<NamedDecl>, StringRef, internal::hasAnyNameFunc> hasAnyName; /// Matches NamedDecl nodes whose fully qualified names contain /// a substring matched by the given RegExp. /// /// Supports specifying enclosing namespaces or classes by /// prefixing the name with '<enclosing>::'. Does not match typedefs /// of an underlying type with the given name. /// /// Example matches X (regexp == "::X") /// \code /// class X; /// \endcode /// /// Example matches X (regexp is one of "::X", "^foo::.*X", among others) /// \code /// namespace foo { namespace bar { class X; } } /// \endcode AST_MATCHER_REGEX(NamedDecl, matchesName, RegExp) { std::string FullNameString = "::" + Node.getQualifiedNameAsString(); return RegExp->match(FullNameString); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// Given: /// \code /// class A { int operator*(); }; /// const A &operator<<(const A &a, const A &b); /// A a; /// a << a; // <-- This matches /// \endcode /// /// \c cxxOperatorCallExpr(hasOverloadedOperatorName("<<"))) matches the /// specified line and /// \c cxxRecordDecl(hasMethod(hasOverloadedOperatorName("*"))) /// matches the declaration of \c A. /// /// Usable as: Matcher<CXXOperatorCallExpr>, Matcher<FunctionDecl> inline internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)> hasOverloadedOperatorName(StringRef Name) { return internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>( {std::string(Name)}); } /// Matches overloaded operator names. /// /// Matches overloaded operator names specified in strings without the /// "operator" prefix: e.g. "<<". /// /// hasAnyOverloadedOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOverloadedOperatorName("+"), hasOverloadedOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcherWithParam1< internal::HasOverloadedOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXOperatorCallExpr, FunctionDecl)>, StringRef, internal::hasAnyOverloadedOperatorNameFunc> hasAnyOverloadedOperatorName; /// Matches template-dependent, but known, member names. /// /// In template declarations, dependent members are not resolved and so can /// not be matched to particular named declarations. /// /// This matcher allows to match on the known name of members. /// /// Given /// \code /// template <typename T> /// struct S { /// void mem(); /// }; /// template <typename T> /// void x() { /// S<T> s; /// s.mem(); /// } /// \endcode /// \c cxxDependentScopeMemberExpr(hasMemberName("mem")) matches `s.mem()` AST_MATCHER_P(CXXDependentScopeMemberExpr, hasMemberName, std::string, N) { return Node.getMember().getAsString() == N; } /// Matches template-dependent, but known, member names against an already-bound /// node /// /// In template declarations, dependent members are not resolved and so can /// not be matched to particular named declarations. /// /// This matcher allows to match on the name of already-bound VarDecl, FieldDecl /// and CXXMethodDecl nodes. /// /// Given /// \code /// template <typename T> /// struct S { /// void mem(); /// }; /// template <typename T> /// void x() { /// S<T> s; /// s.mem(); /// } /// \endcode /// The matcher /// @code /// \c cxxDependentScopeMemberExpr( /// hasObjectExpression(declRefExpr(hasType(templateSpecializationType( /// hasDeclaration(classTemplateDecl(has(cxxRecordDecl(has( /// cxxMethodDecl(hasName("mem")).bind("templMem") /// ))))) /// )))), /// memberHasSameNameAsBoundNode("templMem") /// ) /// @endcode /// first matches and binds the @c mem member of the @c S template, then /// compares its name to the usage in @c s.mem() in the @c x function template AST_MATCHER_P(CXXDependentScopeMemberExpr, memberHasSameNameAsBoundNode, std::string, BindingID) { auto MemberName = Node.getMember().getAsString(); return Builder->removeBindings( [this, MemberName](const BoundNodesMap &Nodes) { const auto &BN = Nodes.getNode(this->BindingID); if (const auto *ND = BN.get<NamedDecl>()) { if (!isa<FieldDecl, CXXMethodDecl, VarDecl>(ND)) return true; return ND->getName() != MemberName; } return true; }); } /// Matches C++ classes that are directly or indirectly derived from a class /// matching \c Base, or Objective-C classes that directly or indirectly /// subclass a class matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, Z, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("NSObject")) /// \code /// @interface NSObject @end /// @interface Bar : NSObject @end /// \endcode /// /// Usable as: Matcher<CXXRecordDecl>, Matcher<ObjCInterfaceDecl> AST_POLYMORPHIC_MATCHER_P( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/false); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/false); } /// Overloaded method as shortcut for \c isDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ classes that have a direct or indirect base matching \p /// BaseSpecMatcher. /// /// Example: /// matcher hasAnyBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; //matches IndirectlyDerived /// \endcode /// // FIXME: Refactor this and isDerivedFrom to reuse implementation. AST_MATCHER_P(CXXRecordDecl, hasAnyBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return internal::matchesAnyBase(Node, BaseSpecMatcher, Finder, Builder); } /// Matches C++ classes that have a direct base matching \p BaseSpecMatcher. /// /// Example: /// matcher hasDirectBase(hasType(cxxRecordDecl(hasName("SpecialBase")))) /// \code /// class Foo; /// class Bar : Foo {}; /// class Baz : Bar {}; /// class SpecialBase; /// class Proxy : SpecialBase {}; // matches Proxy /// class IndirectlyDerived : Proxy {}; // doesn't match /// \endcode AST_MATCHER_P(CXXRecordDecl, hasDirectBase, internal::Matcher<CXXBaseSpecifier>, BaseSpecMatcher) { return Node.hasDefinition() && llvm::any_of(Node.bases(), [&](const CXXBaseSpecifier &Base) { return BaseSpecMatcher.matches(Base, Finder, Builder); }); } /// Similar to \c isDerivedFrom(), but also matches classes that directly /// match \c Base. AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { const auto M = anyOf(Base, isDerivedFrom(Base)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Overloaded method as shortcut for /// \c isSameOrDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isSameOrDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isSameOrDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches C++ or Objective-C classes that are directly derived from a class /// matching \c Base. /// /// Note that a class is not considered to be derived from itself. /// /// Example matches Y, C (Base == hasName("X")) /// \code /// class X; /// class Y : public X {}; // directly derived /// class Z : public Y {}; // indirectly derived /// typedef X A; /// typedef A B; /// class C : public B {}; // derived from a typedef of X /// \endcode /// /// In the following example, Bar matches isDerivedFrom(hasName("X")): /// \code /// class Foo; /// typedef Foo X; /// class Bar : public Foo {}; // derived from a type that X is a typedef of /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), internal::Matcher<NamedDecl>, Base, 0) { // Check if the node is a C++ struct/union/class. if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Finder->classIsDerivedFrom(RD, Base, Builder, /*Directly=*/true); // The node must be an Objective-C class. const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Finder->objcClassIsDerivedFrom(InterfaceDecl, Base, Builder, /*Directly=*/true); } /// Overloaded method as shortcut for \c isDirectlyDerivedFrom(hasName(...)). AST_POLYMORPHIC_MATCHER_P_OVERLOAD( isDirectlyDerivedFrom, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, ObjCInterfaceDecl), std::string, BaseName, 1) { if (BaseName.empty()) return false; const auto M = isDirectlyDerivedFrom(hasName(BaseName)); if (const auto *RD = dyn_cast<CXXRecordDecl>(&Node)) return Matcher<CXXRecordDecl>(M).matches(*RD, Finder, Builder); const auto *InterfaceDecl = cast<ObjCInterfaceDecl>(&Node); return Matcher<ObjCInterfaceDecl>(M).matches(*InterfaceDecl, Finder, Builder); } /// Matches the first method of a class or struct that satisfies \c /// InnerMatcher. /// /// Given: /// \code /// class A { void func(); }; /// class B { void member(); }; /// \endcode /// /// \c cxxRecordDecl(hasMethod(hasName("func"))) matches the declaration of /// \c A but not \c B. AST_MATCHER_P(CXXRecordDecl, hasMethod, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result(*Builder); auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.method_begin(), Node.method_end(), Finder, &Result); if (MatchIt == Node.method_end()) return false; if (Finder->isTraversalIgnoringImplicitNodes() && (*MatchIt)->isImplicit()) return false; *Builder = std::move(Result); return true; } /// Matches the generated class of lambda expressions. /// /// Given: /// \code /// auto x = []{}; /// \endcode /// /// \c cxxRecordDecl(isLambda()) matches the implicit class declaration of /// \c decltype(x) AST_MATCHER(CXXRecordDecl, isLambda) { return Node.isLambda(); } /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y /// (matcher = cxxRecordDecl(has(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// Usable as: Any Matcher /// Note that has is direct matcher, so it also matches things like implicit /// casts and paren casts. If you are matching with expr then you should /// probably consider using ignoringParenImpCasts like: /// has(ignoringParenImpCasts(expr())). extern const internal::ArgumentAdaptingMatcherFunc<internal::HasMatcher> has; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Z /// (matcher = cxxRecordDecl(hasDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; // Matches X, because X::X is a class of name X inside X. /// class Y { class X {}; }; /// class Z { class Y { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasDescendantMatcher> hasDescendant; /// Matches AST nodes that have child AST nodes that match the /// provided matcher. /// /// Example matches X, Y, Y::X, Z::Y, Z::Y::X /// (matcher = cxxRecordDecl(forEach(cxxRecordDecl(hasName("X"))) /// \code /// class X {}; /// class Y { class X {}; }; // Matches Y, because Y::X is a class of name X /// // inside Y. /// class Z { class Y { class X {}; }; }; // Does not match Z. /// \endcode /// /// ChildT must be an AST base type. /// /// As opposed to 'has', 'forEach' will cause a match for each result that /// matches instead of only on the first one. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc<internal::ForEachMatcher> forEach; /// Matches AST nodes that have descendant AST nodes that match the /// provided matcher. /// /// Example matches X, A, A::X, B, B::C, B::C::X /// (matcher = cxxRecordDecl(forEachDescendant(cxxRecordDecl(hasName("X"))))) /// \code /// class X {}; /// class A { class X {}; }; // Matches A, because A::X is a class of name /// // X inside A. /// class B { class C { class X {}; }; }; /// \endcode /// /// DescendantT must be an AST base type. /// /// As opposed to 'hasDescendant', 'forEachDescendant' will cause a match for /// each result that matches instead of only on the first one. /// /// Note: Recursively combined ForEachDescendant can cause many matches: /// cxxRecordDecl(forEachDescendant(cxxRecordDecl( /// forEachDescendant(cxxRecordDecl()) /// ))) /// will match 10 times (plus injected class name matches) on: /// \code /// class A { class B { class C { class D { class E {}; }; }; }; }; /// \endcode /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::ForEachDescendantMatcher> forEachDescendant; /// Matches if the node or any descendant matches. /// /// Generates results for each match. /// /// For example, in: /// \code /// class A { class B {}; class C {}; }; /// \endcode /// The matcher: /// \code /// cxxRecordDecl(hasName("::A"), /// findAll(cxxRecordDecl(isDefinition()).bind("m"))) /// \endcode /// will generate results for \c A, \c B and \c C. /// /// Usable as: Any Matcher template <typename T> internal::Matcher<T> findAll(const internal::Matcher<T> &Matcher) { return eachOf(Matcher, forEachDescendant(Matcher)); } /// Matches AST nodes that have a parent that matches the provided /// matcher. /// /// Given /// \code /// void f() { for (;;) { int x = 42; if (true) { int x = 43; } } } /// \endcode /// \c compoundStmt(hasParent(ifStmt())) matches "{ int x = 43; }". /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasParentMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasParent; /// Matches AST nodes that have an ancestor that matches the provided /// matcher. /// /// Given /// \code /// void f() { if (true) { int x = 42; } } /// void g() { for (;;) { int x = 43; } } /// \endcode /// \c expr(integerLiteral(hasAncestor(ifStmt()))) matches \c 42, but not 43. /// /// Usable as: Any Matcher extern const internal::ArgumentAdaptingMatcherFunc< internal::HasAncestorMatcher, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>, internal::TypeList<Decl, NestedNameSpecifierLoc, Stmt, TypeLoc>> hasAncestor; /// Matches if the provided matcher does not match. /// /// Example matches Y (matcher = cxxRecordDecl(unless(hasName("X")))) /// \code /// class X {}; /// class Y {}; /// \endcode /// /// Usable as: Any Matcher extern const internal::VariadicOperatorMatcherFunc<1, 1> unless; /// Matches a node if the declaration associated with that node /// matches the given matcher. /// /// The associated declaration is: /// - for type nodes, the declaration of the underlying type /// - for CallExpr, the declaration of the callee /// - for MemberExpr, the declaration of the referenced member /// - for CXXConstructExpr, the declaration of the constructor /// - for CXXNewExpr, the declaration of the operator new /// - for ObjCIvarExpr, the declaration of the ivar /// /// For type nodes, hasDeclaration will generally match the declaration of the /// sugared type. Given /// \code /// class X {}; /// typedef X Y; /// Y y; /// \endcode /// in varDecl(hasType(hasDeclaration(decl()))) the decl will match the /// typedefDecl. A common use case is to match the underlying, desugared type. /// This can be achieved by using the hasUnqualifiedDesugaredType matcher: /// \code /// varDecl(hasType(hasUnqualifiedDesugaredType( /// recordType(hasDeclaration(decl()))))) /// \endcode /// In this matcher, the decl will match the CXXRecordDecl of class X. /// /// Usable as: Matcher<AddrLabelExpr>, Matcher<CallExpr>, /// Matcher<CXXConstructExpr>, Matcher<CXXNewExpr>, Matcher<DeclRefExpr>, /// Matcher<EnumType>, Matcher<InjectedClassNameType>, Matcher<LabelStmt>, /// Matcher<MemberExpr>, Matcher<QualType>, Matcher<RecordType>, /// Matcher<TagType>, Matcher<TemplateSpecializationType>, /// Matcher<TemplateTypeParmType>, Matcher<TypedefType>, /// Matcher<UnresolvedUsingType> inline internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)> hasDeclaration(const internal::Matcher<Decl> &InnerMatcher) { return internal::PolymorphicMatcherWithParam1< internal::HasDeclarationMatcher, internal::Matcher<Decl>, void(internal::HasDeclarationSupportedTypes)>(InnerMatcher); } /// Matches a \c NamedDecl whose underlying declaration matches the given /// matcher. /// /// Given /// \code /// namespace N { template<class T> void f(T t); } /// template <class T> void g() { using N::f; f(T()); } /// \endcode /// \c unresolvedLookupExpr(hasAnyDeclaration( /// namedDecl(hasUnderlyingDecl(hasName("::N::f"))))) /// matches the use of \c f in \c g() . AST_MATCHER_P(NamedDecl, hasUnderlyingDecl, internal::Matcher<NamedDecl>, InnerMatcher) { const NamedDecl *UnderlyingDecl = Node.getUnderlyingDecl(); return UnderlyingDecl != nullptr && InnerMatcher.matches(*UnderlyingDecl, Finder, Builder); } /// Matches on the implicit object argument of a member call expression, after /// stripping off any parentheses or implicit casts. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y {}; /// void z(Y y, X x) { y.m(); (g()).m(); x.m(); } /// \endcode /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("Y"))))) /// matches `y.m()` and `(g()).m()`. /// cxxMemberCallExpr(on(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m()`. /// cxxMemberCallExpr(on(callExpr())) /// matches `(g()).m()`. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, on, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument() ->IgnoreParenImpCasts(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches on the receiver of an ObjectiveC Message expression. /// /// Example /// matcher = objCMessageExpr(hasReceiverType(asString("UIWebView *"))); /// matches the [webView ...] message invocation. /// \code /// NSString *webViewJavaScript = ... /// UIWebView *webView = ... /// [webView stringByEvaluatingJavaScriptFromString:webViewJavascript]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasReceiverType, internal::Matcher<QualType>, InnerMatcher) { const QualType TypeDecl = Node.getReceiverType(); return InnerMatcher.matches(TypeDecl, Finder, Builder); } /// Returns true when the Objective-C method declaration is a class method. /// /// Example /// matcher = objcMethodDecl(isClassMethod()) /// matches /// \code /// @interface I + (void)foo; @end /// \endcode /// but not /// \code /// @interface I - (void)bar; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isClassMethod) { return Node.isClassMethod(); } /// Returns true when the Objective-C method declaration is an instance method. /// /// Example /// matcher = objcMethodDecl(isInstanceMethod()) /// matches /// \code /// @interface I - (void)bar; @end /// \endcode /// but not /// \code /// @interface I + (void)foo; @end /// \endcode AST_MATCHER(ObjCMethodDecl, isInstanceMethod) { return Node.isInstanceMethod(); } /// Returns true when the Objective-C message is sent to a class. /// /// Example /// matcher = objcMessageExpr(isClassMessage()) /// matches /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode /// but not /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isClassMessage) { return Node.isClassMessage(); } /// Returns true when the Objective-C message is sent to an instance. /// /// Example /// matcher = objcMessageExpr(isInstanceMessage()) /// matches /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// but not /// \code /// [NSString stringWithFormat:@"format"]; /// \endcode AST_MATCHER(ObjCMessageExpr, isInstanceMessage) { return Node.isInstanceMessage(); } /// Matches if the Objective-C message is sent to an instance, /// and the inner matcher matches on that instance. /// /// For example the method call in /// \code /// NSString *x = @"hello"; /// [x containsString:@"h"]; /// \endcode /// is matched by /// objcMessageExpr(hasReceiver(declRefExpr(to(varDecl(hasName("x")))))) AST_MATCHER_P(ObjCMessageExpr, hasReceiver, internal::Matcher<Expr>, InnerMatcher) { const Expr *ReceiverNode = Node.getInstanceReceiver(); return (ReceiverNode != nullptr && InnerMatcher.matches(*ReceiverNode->IgnoreParenImpCasts(), Finder, Builder)); } /// Matches when BaseName == Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("loadHTMLString:baseURL:")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, hasSelector, std::string, BaseName) { Selector Sel = Node.getSelector(); return BaseName.compare(Sel.getAsString()) == 0; } /// Matches when at least one of the supplied string equals to the /// Selector.getAsString() /// /// matcher = objCMessageExpr(hasSelector("methodA:", "methodB:")); /// matches both of the expressions below: /// \code /// [myObj methodA:argA]; /// [myObj methodB:argB]; /// \endcode extern const internal::VariadicFunction<internal::Matcher<ObjCMessageExpr>, StringRef, internal::hasAnySelectorFunc> hasAnySelector; /// Matches ObjC selectors whose name contains /// a substring matched by the given RegExp. /// matcher = objCMessageExpr(matchesSelector("loadHTMLString\:baseURL?")); /// matches the outer message expr in the code below, but NOT the message /// invocation for self.bodyView. /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_REGEX(ObjCMessageExpr, matchesSelector, RegExp) { std::string SelectorString = Node.getSelector().getAsString(); return RegExp->match(SelectorString); } /// Matches when the selector is the empty selector /// /// Matches only when the selector of the objCMessageExpr is NULL. This may /// represent an error condition in the tree! AST_MATCHER(ObjCMessageExpr, hasNullSelector) { return Node.getSelector().isNull(); } /// Matches when the selector is a Unary Selector /// /// matcher = objCMessageExpr(matchesSelector(hasUnarySelector()); /// matches self.bodyView in the code below, but NOT the outer message /// invocation of "loadHTMLString:baseURL:". /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER(ObjCMessageExpr, hasUnarySelector) { return Node.getSelector().isUnarySelector(); } /// Matches when the selector is a keyword selector /// /// objCMessageExpr(hasKeywordSelector()) matches the generated setFrame /// message expression in /// /// \code /// UIWebView *webView = ...; /// CGRect bodyFrame = webView.frame; /// bodyFrame.size.height = self.bodyContentHeight; /// webView.frame = bodyFrame; /// // ^---- matches here /// \endcode AST_MATCHER(ObjCMessageExpr, hasKeywordSelector) { return Node.getSelector().isKeywordSelector(); } /// Matches when the selector has the specified number of arguments /// /// matcher = objCMessageExpr(numSelectorArgs(0)); /// matches self.bodyView in the code below /// /// matcher = objCMessageExpr(numSelectorArgs(2)); /// matches the invocation of "loadHTMLString:baseURL:" but not that /// of self.bodyView /// \code /// [self.bodyView loadHTMLString:html baseURL:NULL]; /// \endcode AST_MATCHER_P(ObjCMessageExpr, numSelectorArgs, unsigned, N) { return Node.getSelector().getNumArgs() == N; } /// Matches if the call expression's callee expression matches. /// /// Given /// \code /// class Y { void x() { this->x(); x(); Y y; y.x(); } }; /// void f() { f(); } /// \endcode /// callExpr(callee(expr())) /// matches this->x(), x(), y.x(), f() /// with callee(...) /// matching this->x, x, y.x, f respectively /// /// Note: Callee cannot take the more general internal::Matcher<Expr> /// because this introduces ambiguous overloads with calls to Callee taking a /// internal::Matcher<Decl>, as the matcher hierarchy is purely /// implemented in terms of implicit casts. AST_MATCHER_P(CallExpr, callee, internal::Matcher<Stmt>, InnerMatcher) { const Expr *ExprNode = Node.getCallee(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the call expression's callee's declaration matches the /// given matcher. /// /// Example matches y.x() (matcher = callExpr(callee( /// cxxMethodDecl(hasName("x"))))) /// \code /// class Y { public: void x(); }; /// void z() { Y y; y.x(); } /// \endcode AST_MATCHER_P_OVERLOAD(CallExpr, callee, internal::Matcher<Decl>, InnerMatcher, 1) { return callExpr(hasDeclaration(InnerMatcher)).matches(Node, Finder, Builder); } /// Matches if the expression's or declaration's type matches a type /// matcher. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and U (matcher = typedefDecl(hasType(asString("int"))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// typedef int U; /// class Y { friend class X; }; /// \endcode AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, TypedefNameDecl, ValueDecl), internal::Matcher<QualType>, InnerMatcher, 0) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return InnerMatcher.matches(QT, Finder, Builder); return false; } /// Overloaded to match the declaration of the expression's or value /// declaration's type. /// /// In case of a value declaration (for example a variable declaration), /// this resolves one layer of indirection. For example, in the value /// declaration "X x;", cxxRecordDecl(hasName("X")) matches the declaration of /// X, while varDecl(hasType(cxxRecordDecl(hasName("X")))) matches the /// declaration of x. /// /// Example matches x (matcher = expr(hasType(cxxRecordDecl(hasName("X"))))) /// and z (matcher = varDecl(hasType(cxxRecordDecl(hasName("X"))))) /// and friend class X (matcher = friendDecl(hasType("X")) /// \code /// class X {}; /// void y(X &x) { x; X z; } /// class Y { friend class X; }; /// \endcode /// /// Example matches class Derived /// (matcher = cxxRecordDecl(hasAnyBase(hasType(cxxRecordDecl(hasName("Base")))))) /// \code /// class Base {}; /// class Derived : Base {}; /// \endcode /// /// Usable as: Matcher<Expr>, Matcher<FriendDecl>, Matcher<ValueDecl>, /// Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER_P_OVERLOAD( hasType, AST_POLYMORPHIC_SUPPORTED_TYPES(Expr, FriendDecl, ValueDecl, CXXBaseSpecifier), internal::Matcher<Decl>, InnerMatcher, 1) { QualType QT = internal::getUnderlyingType(Node); if (!QT.isNull()) return qualType(hasDeclaration(InnerMatcher)).matches(QT, Finder, Builder); return false; } /// Matches if the type location of the declarator decl's type matches /// the inner matcher. /// /// Given /// \code /// int x; /// \endcode /// declaratorDecl(hasTypeLoc(loc(asString("int")))) /// matches int x AST_MATCHER_P(DeclaratorDecl, hasTypeLoc, internal::Matcher<TypeLoc>, Inner) { if (!Node.getTypeSourceInfo()) // This happens for example for implicit destructors. return false; return Inner.matches(Node.getTypeSourceInfo()->getTypeLoc(), Finder, Builder); } /// Matches if the matched type is represented by the given string. /// /// Given /// \code /// class Y { public: void x(); }; /// void z() { Y* y; y->x(); } /// \endcode /// cxxMemberCallExpr(on(hasType(asString("class Y *")))) /// matches y->x() AST_MATCHER_P(QualType, asString, std::string, Name) { return Name == Node.getAsString(); } /// Matches if the matched type is a pointer type and the pointee type /// matches the specified matcher. /// /// Example matches y->x() /// (matcher = cxxMemberCallExpr(on(hasType(pointsTo /// cxxRecordDecl(hasName("Y"))))))) /// \code /// class Y { public: void x(); }; /// void z() { Y *y; y->x(); } /// \endcode AST_MATCHER_P( QualType, pointsTo, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isAnyPointerType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Overloaded to match the pointee type's declaration. AST_MATCHER_P_OVERLOAD(QualType, pointsTo, internal::Matcher<Decl>, InnerMatcher, 1) { return pointsTo(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if the matched type matches the unqualified desugared /// type of the matched node. /// /// For example, in: /// \code /// class A {}; /// using B = A; /// \endcode /// The matcher type(hasUnqualifiedDesugaredType(recordType())) matches /// both B and A. AST_MATCHER_P(Type, hasUnqualifiedDesugaredType, internal::Matcher<Type>, InnerMatcher) { return InnerMatcher.matches(*Node.getUnqualifiedDesugaredType(), Finder, Builder); } /// Matches if the matched type is a reference type and the referenced /// type matches the specified matcher. /// /// Example matches X &x and const X &y /// (matcher = varDecl(hasType(references(cxxRecordDecl(hasName("X")))))) /// \code /// class X { /// void a(X b) { /// X &x = b; /// const X &y = b; /// } /// }; /// \endcode AST_MATCHER_P(QualType, references, internal::Matcher<QualType>, InnerMatcher) { return (!Node.isNull() && Node->isReferenceType() && InnerMatcher.matches(Node->getPointeeType(), Finder, Builder)); } /// Matches QualTypes whose canonical type matches InnerMatcher. /// /// Given: /// \code /// typedef int &int_ref; /// int a; /// int_ref b = a; /// \endcode /// /// \c varDecl(hasType(qualType(referenceType()))))) will not match the /// declaration of b but \c /// varDecl(hasType(qualType(hasCanonicalType(referenceType())))))) does. AST_MATCHER_P(QualType, hasCanonicalType, internal::Matcher<QualType>, InnerMatcher) { if (Node.isNull()) return false; return InnerMatcher.matches(Node.getCanonicalType(), Finder, Builder); } /// Overloaded to match the referenced type's declaration. AST_MATCHER_P_OVERLOAD(QualType, references, internal::Matcher<Decl>, InnerMatcher, 1) { return references(qualType(hasDeclaration(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches on the implicit object argument of a member call expression. Unlike /// `on`, matches the argument directly without stripping away anything. /// /// Given /// \code /// class Y { public: void m(); }; /// Y g(); /// class X : public Y { void g(); }; /// void z(Y y, X x) { y.m(); x.m(); x.g(); (g()).m(); } /// \endcode /// cxxMemberCallExpr(onImplicitObjectArgument(hasType( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `x.m()` and (g()).m(), but not `x.g()`. /// cxxMemberCallExpr(on(callExpr())) /// does not match `(g()).m()`, because the parens are not ignored. /// /// FIXME: Overload to allow directly matching types? AST_MATCHER_P(CXXMemberCallExpr, onImplicitObjectArgument, internal::Matcher<Expr>, InnerMatcher) { const Expr *ExprNode = Node.getImplicitObjectArgument(); return (ExprNode != nullptr && InnerMatcher.matches(*ExprNode, Finder, Builder)); } /// Matches if the type of the expression's implicit object argument either /// matches the InnerMatcher, or is a pointer to a type that matches the /// InnerMatcher. /// /// Given /// \code /// class Y { public: void m(); }; /// class X : public Y { void g(); }; /// void z() { Y y; y.m(); Y *p; p->m(); X x; x.m(); x.g(); } /// \endcode /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("Y"))))) /// matches `y.m()`, `p->m()` and `x.m()`. /// cxxMemberCallExpr(thisPointerType(hasDeclaration( /// cxxRecordDecl(hasName("X"))))) /// matches `x.g()`. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<QualType>, InnerMatcher, 0) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Overloaded to match the type's declaration. AST_MATCHER_P_OVERLOAD(CXXMemberCallExpr, thisPointerType, internal::Matcher<Decl>, InnerMatcher, 1) { return onImplicitObjectArgument( anyOf(hasType(InnerMatcher), hasType(pointsTo(InnerMatcher)))) .matches(Node, Finder, Builder); } /// Matches a DeclRefExpr that refers to a declaration that matches the /// specified matcher. /// /// Example matches x in if(x) /// (matcher = declRefExpr(to(varDecl(hasName("x"))))) /// \code /// bool x; /// if (x) {} /// \endcode AST_MATCHER_P(DeclRefExpr, to, internal::Matcher<Decl>, InnerMatcher) { const Decl *DeclNode = Node.getDecl(); return (DeclNode != nullptr && InnerMatcher.matches(*DeclNode, Finder, Builder)); } /// Matches a \c DeclRefExpr that refers to a declaration through a /// specific using shadow declaration. /// /// Given /// \code /// namespace a { void f() {} } /// using a::f; /// void g() { /// f(); // Matches this .. /// a::f(); // .. but not this. /// } /// \endcode /// declRefExpr(throughUsingDecl(anything())) /// matches \c f() AST_MATCHER_P(DeclRefExpr, throughUsingDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { const NamedDecl *FoundDecl = Node.getFoundDecl(); if (const UsingShadowDecl *UsingDecl = dyn_cast<UsingShadowDecl>(FoundDecl)) return InnerMatcher.matches(*UsingDecl, Finder, Builder); return false; } /// Matches an \c OverloadExpr if any of the declarations in the set of /// overloads matches the given matcher. /// /// Given /// \code /// template <typename T> void foo(T); /// template <typename T> void bar(T); /// template <typename T> void baz(T t) { /// foo(t); /// bar(t); /// } /// \endcode /// unresolvedLookupExpr(hasAnyDeclaration( /// functionTemplateDecl(hasName("foo")))) /// matches \c foo in \c foo(t); but not \c bar in \c bar(t); AST_MATCHER_P(OverloadExpr, hasAnyDeclaration, internal::Matcher<Decl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.decls_begin(), Node.decls_end(), Finder, Builder) != Node.decls_end(); } /// Matches the Decl of a DeclStmt which has a single declaration. /// /// Given /// \code /// int a, b; /// int c; /// \endcode /// declStmt(hasSingleDecl(anything())) /// matches 'int c;' but not 'int a, b;'. AST_MATCHER_P(DeclStmt, hasSingleDecl, internal::Matcher<Decl>, InnerMatcher) { if (Node.isSingleDecl()) { const Decl *FoundDecl = Node.getSingleDecl(); return InnerMatcher.matches(*FoundDecl, Finder, Builder); } return false; } /// Matches a variable declaration that has an initializer expression /// that matches the given matcher. /// /// Example matches x (matcher = varDecl(hasInitializer(callExpr()))) /// \code /// bool y() { return true; } /// bool x = y(); /// \endcode AST_MATCHER_P( VarDecl, hasInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr *Initializer = Node.getAnyInitializer(); return (Initializer != nullptr && InnerMatcher.matches(*Initializer, Finder, Builder)); } /// \brief Matches a static variable with local scope. /// /// Example matches y (matcher = varDecl(isStaticLocal())) /// \code /// void f() { /// int x; /// static int y; /// } /// static int z; /// \endcode AST_MATCHER(VarDecl, isStaticLocal) { return Node.isStaticLocal(); } /// Matches a variable declaration that has function scope and is a /// non-static local variable. /// /// Example matches x (matcher = varDecl(hasLocalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasLocalStorage) { return Node.hasLocalStorage(); } /// Matches a variable declaration that does not have local storage. /// /// Example matches y and z (matcher = varDecl(hasGlobalStorage()) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode AST_MATCHER(VarDecl, hasGlobalStorage) { return Node.hasGlobalStorage(); } /// Matches a variable declaration that has automatic storage duration. /// /// Example matches x, but not y, z, or a. /// (matcher = varDecl(hasAutomaticStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasAutomaticStorageDuration) { return Node.getStorageDuration() == SD_Automatic; } /// Matches a variable declaration that has static storage duration. /// It includes the variable declared at namespace scope and those declared /// with "static" and "extern" storage class specifiers. /// /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// static int b; /// extern int c; /// varDecl(hasStaticStorageDuration()) /// matches the function declaration y, a, b and c. /// \endcode AST_MATCHER(VarDecl, hasStaticStorageDuration) { return Node.getStorageDuration() == SD_Static; } /// Matches a variable declaration that has thread storage duration. /// /// Example matches z, but not x, z, or a. /// (matcher = varDecl(hasThreadStorageDuration()) /// \code /// void f() { /// int x; /// static int y; /// thread_local int z; /// } /// int a; /// \endcode AST_MATCHER(VarDecl, hasThreadStorageDuration) { return Node.getStorageDuration() == SD_Thread; } /// Matches a variable declaration that is an exception variable from /// a C++ catch block, or an Objective-C \@catch statement. /// /// Example matches x (matcher = varDecl(isExceptionVariable()) /// \code /// void f(int y) { /// try { /// } catch (int x) { /// } /// } /// \endcode AST_MATCHER(VarDecl, isExceptionVariable) { return Node.isExceptionVariable(); } /// Checks that a call expression or a constructor call expression has /// a specific number of arguments (including absent default arguments). /// /// Example matches f(0, 0) (matcher = callExpr(argumentCountIs(2))) /// \code /// void f(int x, int y); /// f(0, 0); /// \endcode AST_POLYMORPHIC_MATCHER_P(argumentCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), unsigned, N) { unsigned NumArgs = Node.getNumArgs(); if (!Finder->isTraversalIgnoringImplicitNodes()) return NumArgs == N; while (NumArgs) { if (!isa<CXXDefaultArgExpr>(Node.getArg(NumArgs - 1))) break; --NumArgs; } return NumArgs == N; } /// Matches the n'th argument of a call expression or a constructor /// call expression. /// /// Example matches y in x(y) /// (matcher = callExpr(hasArgument(0, declRefExpr()))) /// \code /// void x(int) { int y; x(y); } /// \endcode AST_POLYMORPHIC_MATCHER_P2(hasArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), unsigned, N, internal::Matcher<Expr>, InnerMatcher) { if (N >= Node.getNumArgs()) return false; const Expr *Arg = Node.getArg(N); if (Finder->isTraversalIgnoringImplicitNodes() && isa<CXXDefaultArgExpr>(Arg)) return false; return InnerMatcher.matches(*Arg->IgnoreParenImpCasts(), Finder, Builder); } /// Matches the n'th item of an initializer list expression. /// /// Example matches y. /// (matcher = initListExpr(hasInit(0, expr()))) /// \code /// int x{y}. /// \endcode AST_MATCHER_P2(InitListExpr, hasInit, unsigned, N, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { return N < Node.getNumInits() && InnerMatcher.matches(*Node.getInit(N), Finder, Builder); } /// Matches declaration statements that contain a specific number of /// declarations. /// /// Example: Given /// \code /// int a, b; /// int c; /// int d = 2, e; /// \endcode /// declCountIs(2) /// matches 'int a, b;' and 'int d = 2, e;', but not 'int c;'. AST_MATCHER_P(DeclStmt, declCountIs, unsigned, N) { return std::distance(Node.decl_begin(), Node.decl_end()) == (ptrdiff_t)N; } /// Matches the n'th declaration of a declaration statement. /// /// Note that this does not work for global declarations because the AST /// breaks up multiple-declaration DeclStmt's into multiple single-declaration /// DeclStmt's. /// Example: Given non-global declarations /// \code /// int a, b = 0; /// int c; /// int d = 2, e; /// \endcode /// declStmt(containsDeclaration( /// 0, varDecl(hasInitializer(anything())))) /// matches only 'int d = 2, e;', and /// declStmt(containsDeclaration(1, varDecl())) /// \code /// matches 'int a, b = 0' as well as 'int d = 2, e;' /// but 'int c;' is not matched. /// \endcode AST_MATCHER_P2(DeclStmt, containsDeclaration, unsigned, N, internal::Matcher<Decl>, InnerMatcher) { const unsigned NumDecls = std::distance(Node.decl_begin(), Node.decl_end()); if (N >= NumDecls) return false; DeclStmt::const_decl_iterator Iterator = Node.decl_begin(); std::advance(Iterator, N); return InnerMatcher.matches(**Iterator, Finder, Builder); } /// Matches a C++ catch statement that has a catch-all handler. /// /// Given /// \code /// try { /// // ... /// } catch (int) { /// // ... /// } catch (...) { /// // ... /// } /// \endcode /// cxxCatchStmt(isCatchAll()) matches catch(...) but not catch(int). AST_MATCHER(CXXCatchStmt, isCatchAll) { return Node.getExceptionDecl() == nullptr; } /// Matches a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl( /// hasAnyConstructorInitializer(anything()) /// ))) /// record matches Foo, hasAnyConstructorInitializer matches foo_(1) AST_MATCHER_P(CXXConstructorDecl, hasAnyConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { auto MatchIt = matchesFirstInPointerRange(InnerMatcher, Node.init_begin(), Node.init_end(), Finder, Builder); if (MatchIt == Node.init_end()) return false; return (*MatchIt)->isWritten() || !Finder->isTraversalIgnoringImplicitNodes(); } /// Matches the field declaration of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// forField(hasName("foo_")))))) /// matches Foo /// with forField matching foo_ AST_MATCHER_P(CXXCtorInitializer, forField, internal::Matcher<FieldDecl>, InnerMatcher) { const FieldDecl *NodeAsDecl = Node.getAnyMember(); return (NodeAsDecl != nullptr && InnerMatcher.matches(*NodeAsDecl, Finder, Builder)); } /// Matches the initializer expression of a constructor initializer. /// /// Given /// \code /// struct Foo { /// Foo() : foo_(1) { } /// int foo_; /// }; /// \endcode /// cxxRecordDecl(has(cxxConstructorDecl(hasAnyConstructorInitializer( /// withInitializer(integerLiteral(equals(1))))))) /// matches Foo /// with withInitializer matching (1) AST_MATCHER_P(CXXCtorInitializer, withInitializer, internal::Matcher<Expr>, InnerMatcher) { const Expr* NodeAsExpr = Node.getInit(); return (NodeAsExpr != nullptr && InnerMatcher.matches(*NodeAsExpr, Finder, Builder)); } /// Matches a constructor initializer if it is explicitly written in /// code (as opposed to implicitly added by the compiler). /// /// Given /// \code /// struct Foo { /// Foo() { } /// Foo(int) : foo_("A") { } /// string foo_; /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isWritten())) /// will match Foo(int), but not Foo() AST_MATCHER(CXXCtorInitializer, isWritten) { return Node.isWritten(); } /// Matches a constructor initializer if it is initializing a base, as /// opposed to a member. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isBaseInitializer())) /// will match E(), but not match D(int). AST_MATCHER(CXXCtorInitializer, isBaseInitializer) { return Node.isBaseInitializer(); } /// Matches a constructor initializer if it is initializing a member, as /// opposed to a base. /// /// Given /// \code /// struct B {}; /// struct D : B { /// int I; /// D(int i) : I(i) {} /// }; /// struct E : B { /// E() : B() {} /// }; /// \endcode /// cxxConstructorDecl(hasAnyConstructorInitializer(isMemberInitializer())) /// will match D(int), but not match E(). AST_MATCHER(CXXCtorInitializer, isMemberInitializer) { return Node.isMemberInitializer(); } /// Matches any argument of a call expression or a constructor call /// expression, or an ObjC-message-send expression. /// /// Given /// \code /// void x(int, int, int) { int y; x(1, y, 42); } /// \endcode /// callExpr(hasAnyArgument(declRefExpr())) /// matches x(1, y, 42) /// with hasAnyArgument(...) /// matching y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// void foo(I *i) { [i f:12]; } /// \endcode /// objcMessageExpr(hasAnyArgument(integerLiteral(equals(12)))) /// matches [i f:12] AST_POLYMORPHIC_MATCHER_P(hasAnyArgument, AST_POLYMORPHIC_SUPPORTED_TYPES( CallExpr, CXXConstructExpr, CXXUnresolvedConstructExpr, ObjCMessageExpr), internal::Matcher<Expr>, InnerMatcher) { for (const Expr *Arg : Node.arguments()) { if (Finder->isTraversalIgnoringImplicitNodes() && isa<CXXDefaultArgExpr>(Arg)) break; BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Arg, Finder, &Result)) { *Builder = std::move(Result); return true; } } return false; } /// Matches any capture of a lambda expression. /// /// Given /// \code /// void foo() { /// int x; /// auto f = [x](){}; /// } /// \endcode /// lambdaExpr(hasAnyCapture(anything())) /// matches [x](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<VarDecl>, InnerMatcher, 0) { for (const LambdaCapture &Capture : Node.captures()) { if (Capture.capturesVariable()) { BoundNodesTreeBuilder Result(*Builder); if (InnerMatcher.matches(*Capture.getCapturedVar(), Finder, &Result)) { *Builder = std::move(Result); return true; } } } return false; } /// Matches any capture of 'this' in a lambda expression. /// /// Given /// \code /// struct foo { /// void bar() { /// auto f = [this](){}; /// } /// } /// \endcode /// lambdaExpr(hasAnyCapture(cxxThisExpr())) /// matches [this](){}; AST_MATCHER_P_OVERLOAD(LambdaExpr, hasAnyCapture, internal::Matcher<CXXThisExpr>, InnerMatcher, 1) { return llvm::any_of(Node.captures(), [](const LambdaCapture &LC) { return LC.capturesThis(); }); } /// Matches a constructor call expression which uses list initialization. AST_MATCHER(CXXConstructExpr, isListInitialization) { return Node.isListInitialization(); } /// Matches a constructor call expression which requires /// zero initialization. /// /// Given /// \code /// void foo() { /// struct point { double x; double y; }; /// point pt[2] = { { 1.0, 2.0 } }; /// } /// \endcode /// initListExpr(has(cxxConstructExpr(requiresZeroInitialization())) /// will match the implicit array filler for pt[1]. AST_MATCHER(CXXConstructExpr, requiresZeroInitialization) { return Node.requiresZeroInitialization(); } /// Matches the n'th parameter of a function or an ObjC method /// declaration or a block. /// /// Given /// \code /// class X { void f(int x) {} }; /// \endcode /// cxxMethodDecl(hasParameter(0, hasType(varDecl()))) /// matches f(int x) {} /// with hasParameter(...) /// matching int x /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasParameter(0, hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P2(hasParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), unsigned, N, internal::Matcher<ParmVarDecl>, InnerMatcher) { return (N < Node.parameters().size() && InnerMatcher.matches(*Node.parameters()[N], Finder, Builder)); } /// Matches all arguments and their respective ParmVarDecl. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// \endcode /// callExpr( /// forEachArgumentWithParam( /// declRefExpr(to(varDecl(hasName("y")))), /// parmVarDecl(hasType(isInteger())) /// )) /// matches f(y); /// with declRefExpr(...) /// matching int y /// and parmVarDecl(...) /// matching int i AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParam, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<ParmVarDecl>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, ParamMatcher)))), callExpr(callee(functionDecl( hasParameter(ParamIndex, ParamMatcher)))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; } } ++ParamIndex; } *Builder = std::move(Result); return Matched; } /// Matches all arguments and their respective types for a \c CallExpr or /// \c CXXConstructExpr. It is very similar to \c forEachArgumentWithParam but /// it works on calls through function pointers as well. /// /// The difference is, that function pointers do not provide access to a /// \c ParmVarDecl, but only the \c QualType for each argument. /// /// Given /// \code /// void f(int i); /// int y; /// f(y); /// void (*f_ptr)(int) = f; /// f_ptr(y); /// \endcode /// callExpr( /// forEachArgumentWithParamType( /// declRefExpr(to(varDecl(hasName("y")))), /// qualType(isInteger()).bind("type) /// )) /// matches f(y) and f_ptr(y) /// with declRefExpr(...) /// matching int y /// and qualType(...) /// matching int AST_POLYMORPHIC_MATCHER_P2(forEachArgumentWithParamType, AST_POLYMORPHIC_SUPPORTED_TYPES(CallExpr, CXXConstructExpr), internal::Matcher<Expr>, ArgMatcher, internal::Matcher<QualType>, ParamMatcher) { BoundNodesTreeBuilder Result; // The first argument of an overloaded member operator is the implicit object // argument of the method which should not be matched against a parameter, so // we skip over it here. BoundNodesTreeBuilder Matches; unsigned ArgIndex = cxxOperatorCallExpr(callee(cxxMethodDecl())) .matches(Node, Finder, &Matches) ? 1 : 0; const FunctionProtoType *FProto = nullptr; if (const auto *Call = dyn_cast<CallExpr>(&Node)) { if (const auto *Value = dyn_cast_or_null<ValueDecl>(Call->getCalleeDecl())) { QualType QT = Value->getType().getCanonicalType(); // This does not necessarily lead to a `FunctionProtoType`, // e.g. K&R functions do not have a function prototype. if (QT->isFunctionPointerType()) FProto = QT->getPointeeType()->getAs<FunctionProtoType>(); if (QT->isMemberFunctionPointerType()) { const auto *MP = QT->getAs<MemberPointerType>(); assert(MP && "Must be member-pointer if its a memberfunctionpointer"); FProto = MP->getPointeeType()->getAs<FunctionProtoType>(); assert(FProto && "The call must have happened through a member function " "pointer"); } } } int ParamIndex = 0; bool Matched = false; for (; ArgIndex < Node.getNumArgs(); ++ArgIndex, ++ParamIndex) { BoundNodesTreeBuilder ArgMatches(*Builder); if (ArgMatcher.matches(*(Node.getArg(ArgIndex)->IgnoreParenCasts()), Finder, &ArgMatches)) { BoundNodesTreeBuilder ParamMatches(ArgMatches); // This test is cheaper compared to the big matcher in the next if. // Therefore, please keep this order. if (FProto) { QualType ParamType = FProto->getParamType(ParamIndex); if (ParamMatcher.matches(ParamType, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } if (expr(anyOf(cxxConstructExpr(hasDeclaration(cxxConstructorDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))), callExpr(callee(functionDecl( hasParameter(ParamIndex, hasType(ParamMatcher))))))) .matches(Node, Finder, &ParamMatches)) { Result.addMatch(ParamMatches); Matched = true; continue; } } } *Builder = std::move(Result); return Matched; } /// Matches the ParmVarDecl nodes that are at the N'th position in the parameter /// list. The parameter list could be that of either a block, function, or /// objc-method. /// /// /// Given /// /// \code /// void f(int a, int b, int c) { /// } /// \endcode /// /// ``parmVarDecl(isAtPosition(0))`` matches ``int a``. /// /// ``parmVarDecl(isAtPosition(1))`` matches ``int b``. AST_MATCHER_P(ParmVarDecl, isAtPosition, unsigned, N) { const clang::DeclContext *Context = Node.getParentFunctionOrMethod(); if (const auto *Decl = dyn_cast_or_null<FunctionDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<BlockDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; if (const auto *Decl = dyn_cast_or_null<ObjCMethodDecl>(Context)) return N < Decl->param_size() && Decl->getParamDecl(N) == &Node; return false; } /// Matches any parameter of a function or an ObjC method declaration or a /// block. /// /// Does not match the 'this' parameter of a method. /// /// Given /// \code /// class X { void f(int x, int y, int z) {} }; /// \endcode /// cxxMethodDecl(hasAnyParameter(hasName("y"))) /// matches f(int x, int y, int z) {} /// with hasAnyParameter(...) /// matching int y /// /// For ObjectiveC, given /// \code /// @interface I - (void) f:(int) y; @end /// \endcode // /// the matcher objcMethodDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of method f with hasParameter /// matching y. /// /// For blocks, given /// \code /// b = ^(int y) { printf("%d", y) }; /// \endcode /// /// the matcher blockDecl(hasAnyParameter(hasName("y"))) /// matches the declaration of the block b with hasParameter /// matching y. AST_POLYMORPHIC_MATCHER_P(hasAnyParameter, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, ObjCMethodDecl, BlockDecl), internal::Matcher<ParmVarDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.param_begin(), Node.param_end(), Finder, Builder) != Node.param_end(); } /// Matches \c FunctionDecls and \c FunctionProtoTypes that have a /// specific parameter count. /// /// Given /// \code /// void f(int i) {} /// void g(int i, int j) {} /// void h(int i, int j); /// void j(int i); /// void k(int x, int y, int z, ...); /// \endcode /// functionDecl(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(2)) /// matches \c g and \c h /// functionProtoType(parameterCountIs(3)) /// matches \c k AST_POLYMORPHIC_MATCHER_P(parameterCountIs, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType), unsigned, N) { return Node.getNumParams() == N; } /// Matches \c FunctionDecls that have a noreturn attribute. /// /// Given /// \code /// void nope(); /// [[noreturn]] void a(); /// __attribute__((noreturn)) void b(); /// struct c { [[noreturn]] c(); }; /// \endcode /// functionDecl(isNoReturn()) /// matches all of those except /// \code /// void nope(); /// \endcode AST_MATCHER(FunctionDecl, isNoReturn) { return Node.isNoReturn(); } /// Matches the return type of a function declaration. /// /// Given: /// \code /// class X { int f() { return 1; } }; /// \endcode /// cxxMethodDecl(returns(asString("int"))) /// matches int f() { return 1; } AST_MATCHER_P(FunctionDecl, returns, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getReturnType(), Finder, Builder); } /// Matches extern "C" function or variable declarations. /// /// Given: /// \code /// extern "C" void f() {} /// extern "C" { void g() {} } /// void h() {} /// extern "C" int x = 1; /// extern "C" int y = 2; /// int z = 3; /// \endcode /// functionDecl(isExternC()) /// matches the declaration of f and g, but not the declaration of h. /// varDecl(isExternC()) /// matches the declaration of x and y, but not the declaration of z. AST_POLYMORPHIC_MATCHER(isExternC, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.isExternC(); } /// Matches variable/function declarations that have "static" storage /// class specifier ("static" keyword) written in the source. /// /// Given: /// \code /// static void f() {} /// static int i = 0; /// extern int j; /// int k; /// \endcode /// functionDecl(isStaticStorageClass()) /// matches the function declaration f. /// varDecl(isStaticStorageClass()) /// matches the variable declaration i. AST_POLYMORPHIC_MATCHER(isStaticStorageClass, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl)) { return Node.getStorageClass() == SC_Static; } /// Matches deleted function declarations. /// /// Given: /// \code /// void Func(); /// void DeletedFunc() = delete; /// \endcode /// functionDecl(isDeleted()) /// matches the declaration of DeletedFunc, but not Func. AST_MATCHER(FunctionDecl, isDeleted) { return Node.isDeleted(); } /// Matches defaulted function declarations. /// /// Given: /// \code /// class A { ~A(); }; /// class B { ~B() = default; }; /// \endcode /// functionDecl(isDefaulted()) /// matches the declaration of ~B, but not ~A. AST_MATCHER(FunctionDecl, isDefaulted) { return Node.isDefaulted(); } /// Matches weak function declarations. /// /// Given: /// \code /// void foo() __attribute__((__weakref__("__foo"))); /// void bar(); /// \endcode /// functionDecl(isWeak()) /// matches the weak declaration "foo", but not "bar". AST_MATCHER(FunctionDecl, isWeak) { return Node.isWeak(); } /// Matches functions that have a dynamic exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() noexcept(true); /// void i() noexcept(false); /// void j() throw(); /// void k() throw(int); /// void l() throw(...); /// \endcode /// functionDecl(hasDynamicExceptionSpec()) and /// functionProtoType(hasDynamicExceptionSpec()) /// match the declarations of j, k, and l, but not f, g, h, or i. AST_POLYMORPHIC_MATCHER(hasDynamicExceptionSpec, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { if (const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node)) return FnTy->hasDynamicExceptionSpec(); return false; } /// Matches functions that have a non-throwing exception specification. /// /// Given: /// \code /// void f(); /// void g() noexcept; /// void h() throw(); /// void i() throw(int); /// void j() noexcept(false); /// \endcode /// functionDecl(isNoThrow()) and functionProtoType(isNoThrow()) /// match the declarations of g, and h, but not f, i or j. AST_POLYMORPHIC_MATCHER(isNoThrow, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, FunctionProtoType)) { const FunctionProtoType *FnTy = internal::getFunctionProtoType(Node); // If the function does not have a prototype, then it is assumed to be a // throwing function (as it would if the function did not have any exception // specification). if (!FnTy) return false; // Assume the best for any unresolved exception specification. if (isUnresolvedExceptionSpec(FnTy->getExceptionSpecType())) return true; return FnTy->isNothrow(); } /// Matches constexpr variable and function declarations, /// and if constexpr. /// /// Given: /// \code /// constexpr int foo = 42; /// constexpr int bar(); /// void baz() { if constexpr(1 > 0) {} } /// \endcode /// varDecl(isConstexpr()) /// matches the declaration of foo. /// functionDecl(isConstexpr()) /// matches the declaration of bar. /// ifStmt(isConstexpr()) /// matches the if statement in baz. AST_POLYMORPHIC_MATCHER(isConstexpr, AST_POLYMORPHIC_SUPPORTED_TYPES(VarDecl, FunctionDecl, IfStmt)) { return Node.isConstexpr(); } /// Matches selection statements with initializer. /// /// Given: /// \code /// void foo() { /// if (int i = foobar(); i > 0) {} /// switch (int i = foobar(); i) {} /// for (auto& a = get_range(); auto& x : a) {} /// } /// void bar() { /// if (foobar() > 0) {} /// switch (foobar()) {} /// for (auto& x : get_range()) {} /// } /// \endcode /// ifStmt(hasInitStatement(anything())) /// matches the if statement in foo but not in bar. /// switchStmt(hasInitStatement(anything())) /// matches the switch statement in foo but not in bar. /// cxxForRangeStmt(hasInitStatement(anything())) /// matches the range for statement in foo but not in bar. AST_POLYMORPHIC_MATCHER_P(hasInitStatement, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, SwitchStmt, CXXForRangeStmt), internal::Matcher<Stmt>, InnerMatcher) { const Stmt *Init = Node.getInit(); return Init != nullptr && InnerMatcher.matches(*Init, Finder, Builder); } /// Matches the condition expression of an if statement, for loop, /// switch statement or conditional operator. /// /// Example matches true (matcher = hasCondition(cxxBoolLiteral(equals(true)))) /// \code /// if (true) {} /// \endcode AST_POLYMORPHIC_MATCHER_P( hasCondition, AST_POLYMORPHIC_SUPPORTED_TYPES(IfStmt, ForStmt, WhileStmt, DoStmt, SwitchStmt, AbstractConditionalOperator), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Condition = Node.getCond(); return (Condition != nullptr && InnerMatcher.matches(*Condition, Finder, Builder)); } /// Matches the then-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasThen(cxxBoolLiteral(equals(true))))) /// \code /// if (false) true; else false; /// \endcode AST_MATCHER_P(IfStmt, hasThen, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Then = Node.getThen(); return (Then != nullptr && InnerMatcher.matches(*Then, Finder, Builder)); } /// Matches the else-statement of an if statement. /// /// Examples matches the if statement /// (matcher = ifStmt(hasElse(cxxBoolLiteral(equals(true))))) /// \code /// if (false) false; else true; /// \endcode AST_MATCHER_P(IfStmt, hasElse, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Else = Node.getElse(); return (Else != nullptr && InnerMatcher.matches(*Else, Finder, Builder)); } /// Matches if a node equals a previously bound node. /// /// Matches a node if it equals the node previously bound to \p ID. /// /// Given /// \code /// class X { int a; int b; }; /// \endcode /// cxxRecordDecl( /// has(fieldDecl(hasName("a"), hasType(type().bind("t")))), /// has(fieldDecl(hasName("b"), hasType(type(equalsBoundNode("t")))))) /// matches the class \c X, as \c a and \c b have the same type. /// /// Note that when multiple matches are involved via \c forEach* matchers, /// \c equalsBoundNodes acts as a filter. /// For example: /// compoundStmt( /// forEachDescendant(varDecl().bind("d")), /// forEachDescendant(declRefExpr(to(decl(equalsBoundNode("d")))))) /// will trigger a match for each combination of variable declaration /// and reference to that variable declaration within a compound statement. AST_POLYMORPHIC_MATCHER_P(equalsBoundNode, AST_POLYMORPHIC_SUPPORTED_TYPES(Stmt, Decl, Type, QualType), std::string, ID) { // FIXME: Figure out whether it makes sense to allow this // on any other node types. // For *Loc it probably does not make sense, as those seem // unique. For NestedNameSepcifier it might make sense, as // those also have pointer identity, but I'm not sure whether // they're ever reused. internal::NotEqualsBoundNodePredicate Predicate; Predicate.ID = ID; Predicate.Node = DynTypedNode::create(Node); return Builder->removeBindings(Predicate); } /// Matches the condition variable statement in an if statement. /// /// Given /// \code /// if (A* a = GetAPointer()) {} /// \endcode /// hasConditionVariableStatement(...) /// matches 'A* a = GetAPointer()'. AST_MATCHER_P(IfStmt, hasConditionVariableStatement, internal::Matcher<DeclStmt>, InnerMatcher) { const DeclStmt* const DeclarationStatement = Node.getConditionVariableDeclStmt(); return DeclarationStatement != nullptr && InnerMatcher.matches(*DeclarationStatement, Finder, Builder); } /// Matches the index expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasIndex(integerLiteral())) /// matches \c i[1] with the \c integerLiteral() matching \c 1 AST_MATCHER_P(ArraySubscriptExpr, hasIndex, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getIdx()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches the base expression of an array subscript expression. /// /// Given /// \code /// int i[5]; /// void f() { i[1] = 42; } /// \endcode /// arraySubscriptExpression(hasBase(implicitCastExpr( /// hasSourceExpression(declRefExpr())))) /// matches \c i[1] with the \c declRefExpr() matching \c i AST_MATCHER_P(ArraySubscriptExpr, hasBase, internal::Matcher<Expr>, InnerMatcher) { if (const Expr* Expression = Node.getBase()) return InnerMatcher.matches(*Expression, Finder, Builder); return false; } /// Matches a 'for', 'while', 'do while' statement or a function /// definition that has a given body. Note that in case of functions /// this matcher only matches the definition itself and not the other /// declarations of the same function. /// /// Given /// \code /// for (;;) {} /// \endcode /// hasBody(compoundStmt()) /// matches 'for (;;) {}' /// with compoundStmt() /// matching '{}' /// /// Given /// \code /// void f(); /// void f() {} /// \endcode /// hasBody(functionDecl()) /// matches 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void f();' AST_POLYMORPHIC_MATCHER_P(hasBody, AST_POLYMORPHIC_SUPPORTED_TYPES(DoStmt, ForStmt, WhileStmt, CXXForRangeStmt, FunctionDecl), internal::Matcher<Stmt>, InnerMatcher) { if (Finder->isTraversalIgnoringImplicitNodes() && isDefaultedHelper(&Node)) return false; const Stmt *const Statement = internal::GetBodyMatcher<NodeType>::get(Node); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches a function declaration that has a given body present in the AST. /// Note that this matcher matches all the declarations of a function whose /// body is present in the AST. /// /// Given /// \code /// void f(); /// void f() {} /// void g(); /// \endcode /// hasAnyBody(functionDecl()) /// matches both 'void f();' /// and 'void f() {}' /// with compoundStmt() /// matching '{}' /// but does not match 'void g();' AST_MATCHER_P(FunctionDecl, hasAnyBody, internal::Matcher<Stmt>, InnerMatcher) { const Stmt *const Statement = Node.getBody(); return (Statement != nullptr && InnerMatcher.matches(*Statement, Finder, Builder)); } /// Matches compound statements where at least one substatement matches /// a given matcher. Also matches StmtExprs that have CompoundStmt as children. /// /// Given /// \code /// { {}; 1+2; } /// \endcode /// hasAnySubstatement(compoundStmt()) /// matches '{ {}; 1+2; }' /// with compoundStmt() /// matching '{}' AST_POLYMORPHIC_MATCHER_P(hasAnySubstatement, AST_POLYMORPHIC_SUPPORTED_TYPES(CompoundStmt, StmtExpr), internal::Matcher<Stmt>, InnerMatcher) { const CompoundStmt *CS = CompoundStmtMatcher<NodeType>::get(Node); return CS && matchesFirstInPointerRange(InnerMatcher, CS->body_begin(), CS->body_end(), Finder, Builder) != CS->body_end(); } /// Checks that a compound statement contains a specific number of /// child statements. /// /// Example: Given /// \code /// { for (;;) {} } /// \endcode /// compoundStmt(statementCountIs(0))) /// matches '{}' /// but does not match the outer compound statement. AST_MATCHER_P(CompoundStmt, statementCountIs, unsigned, N) { return Node.size() == N; } /// Matches literals that are equal to the given value of type ValueT. /// /// Given /// \code /// f('\0', false, 3.14, 42); /// \endcode /// characterLiteral(equals(0)) /// matches '\0' /// cxxBoolLiteral(equals(false)) and cxxBoolLiteral(equals(0)) /// match false /// floatLiteral(equals(3.14)) and floatLiteral(equals(314e-2)) /// match 3.14 /// integerLiteral(equals(42)) /// matches 42 /// /// Note that you cannot directly match a negative numeric literal because the /// minus sign is not part of the literal: It is a unary operator whose operand /// is the positive numeric literal. Instead, you must use a unaryOperator() /// matcher to match the minus sign: /// /// unaryOperator(hasOperatorName("-"), /// hasUnaryOperand(integerLiteral(equals(13)))) /// /// Usable as: Matcher<CharacterLiteral>, Matcher<CXXBoolLiteralExpr>, /// Matcher<FloatingLiteral>, Matcher<IntegerLiteral> template <typename ValueT> internal::PolymorphicMatcherWithParam1<internal::ValueEqualsMatcher, ValueT> equals(const ValueT &Value) { return internal::PolymorphicMatcherWithParam1< internal::ValueEqualsMatcher, ValueT>(Value); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), bool, Value, 0) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, IntegerLiteral), unsigned, Value, 1) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } AST_POLYMORPHIC_MATCHER_P_OVERLOAD(equals, AST_POLYMORPHIC_SUPPORTED_TYPES(CharacterLiteral, CXXBoolLiteralExpr, FloatingLiteral, IntegerLiteral), double, Value, 2) { return internal::ValueEqualsMatcher<NodeType, ParamT>(Value) .matchesNode(Node); } /// Matches the operator Name of operator expressions (binary or /// unary). /// /// Example matches a || b (matcher = binaryOperator(hasOperatorName("||"))) /// \code /// !(a || b) /// \endcode AST_POLYMORPHIC_MATCHER_P( hasOperatorName, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, UnaryOperator), std::string, Name) { if (Optional<StringRef> OpName = internal::getOpName(Node)) return *OpName == Name; return false; } /// Matches operator expressions (binary or unary) that have any of the /// specified names. /// /// hasAnyOperatorName("+", "-") /// Is equivalent to /// anyOf(hasOperatorName("+"), hasOperatorName("-")) extern const internal::VariadicFunction< internal::PolymorphicMatcherWithParam1< internal::HasAnyOperatorNameMatcher, std::vector<std::string>, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, UnaryOperator)>, StringRef, internal::hasAnyOperatorNameFunc> hasAnyOperatorName; /// Matches all kinds of assignment operators. /// /// Example 1: matches a += b (matcher = binaryOperator(isAssignmentOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 = s2 /// (matcher = cxxOperatorCallExpr(isAssignmentOperator())) /// \code /// struct S { S& operator=(const S&); }; /// void x() { S s1, s2; s1 = s2; } /// \endcode AST_POLYMORPHIC_MATCHER( isAssignmentOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator)) { return Node.isAssignmentOp(); } /// Matches comparison operators. /// /// Example 1: matches a == b (matcher = binaryOperator(isComparisonOperator())) /// \code /// if (a == b) /// a += b; /// \endcode /// /// Example 2: matches s1 < s2 /// (matcher = cxxOperatorCallExpr(isComparisonOperator())) /// \code /// struct S { bool operator<(const S& other); }; /// void x(S s1, S s2) { bool b1 = s1 < s2; } /// \endcode AST_POLYMORPHIC_MATCHER( isComparisonOperator, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator)) { return Node.isComparisonOp(); } /// Matches the left hand side of binary operator expressions. /// /// Example matches a (matcher = binaryOperator(hasLHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasLHS, AST_POLYMORPHIC_SUPPORTED_TYPES( BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *LeftHandSide = internal::getLHS(Node); return (LeftHandSide != nullptr && InnerMatcher.matches(*LeftHandSide, Finder, Builder)); } /// Matches the right hand side of binary operator expressions. /// /// Example matches b (matcher = binaryOperator(hasRHS())) /// \code /// a || b /// \endcode AST_POLYMORPHIC_MATCHER_P(hasRHS, AST_POLYMORPHIC_SUPPORTED_TYPES( BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator, ArraySubscriptExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *RightHandSide = internal::getRHS(Node); return (RightHandSide != nullptr && InnerMatcher.matches(*RightHandSide, Finder, Builder)); } /// Matches if either the left hand side or the right hand side of a /// binary operator matches. AST_POLYMORPHIC_MATCHER_P( hasEitherOperand, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator), internal::Matcher<Expr>, InnerMatcher) { return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()( anyOf(hasLHS(InnerMatcher), hasRHS(InnerMatcher))) .matches(Node, Finder, Builder); } /// Matches if both matchers match with opposite sides of the binary operator. /// /// Example matcher = binaryOperator(hasOperands(integerLiteral(equals(1), /// integerLiteral(equals(2))) /// \code /// 1 + 2 // Match /// 2 + 1 // Match /// 1 + 1 // No match /// 2 + 2 // No match /// \endcode AST_POLYMORPHIC_MATCHER_P2( hasOperands, AST_POLYMORPHIC_SUPPORTED_TYPES(BinaryOperator, CXXOperatorCallExpr, CXXRewrittenBinaryOperator), internal::Matcher<Expr>, Matcher1, internal::Matcher<Expr>, Matcher2) { return internal::VariadicDynCastAllOfMatcher<Stmt, NodeType>()( anyOf(allOf(hasLHS(Matcher1), hasRHS(Matcher2)), allOf(hasLHS(Matcher2), hasRHS(Matcher1)))) .matches(Node, Finder, Builder); } /// Matches if the operand of a unary operator matches. /// /// Example matches true (matcher = hasUnaryOperand( /// cxxBoolLiteral(equals(true)))) /// \code /// !true /// \endcode AST_POLYMORPHIC_MATCHER_P(hasUnaryOperand, AST_POLYMORPHIC_SUPPORTED_TYPES(UnaryOperator, CXXOperatorCallExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const Operand = internal::getSubExpr(Node); return (Operand != nullptr && InnerMatcher.matches(*Operand, Finder, Builder)); } /// Matches if the cast's source expression /// or opaque value's source expression matches the given matcher. /// /// Example 1: matches "a string" /// (matcher = castExpr(hasSourceExpression(cxxConstructExpr()))) /// \code /// class URL { URL(string); }; /// URL url = "a string"; /// \endcode /// /// Example 2: matches 'b' (matcher = /// opaqueValueExpr(hasSourceExpression(implicitCastExpr(declRefExpr()))) /// \code /// int a = b ?: 1; /// \endcode AST_POLYMORPHIC_MATCHER_P(hasSourceExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(CastExpr, OpaqueValueExpr), internal::Matcher<Expr>, InnerMatcher) { const Expr *const SubExpression = internal::GetSourceExpressionMatcher<NodeType>::get(Node); return (SubExpression != nullptr && InnerMatcher.matches(*SubExpression, Finder, Builder)); } /// Matches casts that has a given cast kind. /// /// Example: matches the implicit cast around \c 0 /// (matcher = castExpr(hasCastKind(CK_NullToPointer))) /// \code /// int *p = 0; /// \endcode /// /// If the matcher is use from clang-query, CastKind parameter /// should be passed as a quoted string. e.g., hasCastKind("CK_NullToPointer"). AST_MATCHER_P(CastExpr, hasCastKind, CastKind, Kind) { return Node.getCastKind() == Kind; } /// Matches casts whose destination type matches a given matcher. /// /// (Note: Clang's AST refers to other conversions as "casts" too, and calls /// actual casts "explicit" casts.) AST_MATCHER_P(ExplicitCastExpr, hasDestinationType, internal::Matcher<QualType>, InnerMatcher) { const QualType NodeType = Node.getTypeAsWritten(); return InnerMatcher.matches(NodeType, Finder, Builder); } /// Matches implicit casts whose destination type matches a given /// matcher. /// /// FIXME: Unit test this matcher AST_MATCHER_P(ImplicitCastExpr, hasImplicitDestinationType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getType(), Finder, Builder); } /// Matches TagDecl object that are spelled with "struct." /// /// Example matches S, but not C, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isStruct) { return Node.isStruct(); } /// Matches TagDecl object that are spelled with "union." /// /// Example matches U, but not C, S or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isUnion) { return Node.isUnion(); } /// Matches TagDecl object that are spelled with "class." /// /// Example matches C, but not S, U or E. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isClass) { return Node.isClass(); } /// Matches TagDecl object that are spelled with "enum." /// /// Example matches E, but not C, S or U. /// \code /// struct S {}; /// class C {}; /// union U {}; /// enum E {}; /// \endcode AST_MATCHER(TagDecl, isEnum) { return Node.isEnum(); } /// Matches the true branch expression of a conditional operator. /// /// Example 1 (conditional ternary operator): matches a /// \code /// condition ? a : b /// \endcode /// /// Example 2 (conditional binary operator): matches opaqueValueExpr(condition) /// \code /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasTrueExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getTrueExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches the false branch expression of a conditional operator /// (binary or ternary). /// /// Example matches b /// \code /// condition ? a : b /// condition ?: b /// \endcode AST_MATCHER_P(AbstractConditionalOperator, hasFalseExpression, internal::Matcher<Expr>, InnerMatcher) { const Expr *Expression = Node.getFalseExpr(); return (Expression != nullptr && InnerMatcher.matches(*Expression, Finder, Builder)); } /// Matches if a declaration has a body attached. /// /// Example matches A, va, fa /// \code /// class A {}; /// class B; // Doesn't match, as it has no body. /// int va; /// extern int vb; // Doesn't match, as it doesn't define the variable. /// void fa() {} /// void fb(); // Doesn't match, as it has no body. /// @interface X /// - (void)ma; // Doesn't match, interface is declaration. /// @end /// @implementation X /// - (void)ma {} /// @end /// \endcode /// /// Usable as: Matcher<TagDecl>, Matcher<VarDecl>, Matcher<FunctionDecl>, /// Matcher<ObjCMethodDecl> AST_POLYMORPHIC_MATCHER(isDefinition, AST_POLYMORPHIC_SUPPORTED_TYPES(TagDecl, VarDecl, ObjCMethodDecl, FunctionDecl)) { return Node.isThisDeclarationADefinition(); } /// Matches if a function declaration is variadic. /// /// Example matches f, but not g or h. The function i will not match, even when /// compiled in C mode. /// \code /// void f(...); /// void g(int); /// template <typename... Ts> void h(Ts...); /// void i(); /// \endcode AST_MATCHER(FunctionDecl, isVariadic) { return Node.isVariadic(); } /// Matches the class declaration that the given method declaration /// belongs to. /// /// FIXME: Generalize this for other kinds of declarations. /// FIXME: What other kind of declarations would we need to generalize /// this to? /// /// Example matches A() in the last line /// (matcher = cxxConstructExpr(hasDeclaration(cxxMethodDecl( /// ofClass(hasName("A")))))) /// \code /// class A { /// public: /// A(); /// }; /// A a = A(); /// \endcode AST_MATCHER_P(CXXMethodDecl, ofClass, internal::Matcher<CXXRecordDecl>, InnerMatcher) { ASTChildrenNotSpelledInSourceScope RAII(Finder, false); const CXXRecordDecl *Parent = Node.getParent(); return (Parent != nullptr && InnerMatcher.matches(*Parent, Finder, Builder)); } /// Matches each method overridden by the given method. This matcher may /// produce multiple matches. /// /// Given /// \code /// class A { virtual void f(); }; /// class B : public A { void f(); }; /// class C : public B { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches once, with "b" binding "A::f" and "d" binding "C::f" (Note /// that B::f is not overridden by C::f). /// /// The check can produce multiple matches in case of multiple inheritance, e.g. /// \code /// class A1 { virtual void f(); }; /// class A2 { virtual void f(); }; /// class C : public A1, public A2 { void f(); }; /// \endcode /// cxxMethodDecl(ofClass(hasName("C")), /// forEachOverridden(cxxMethodDecl().bind("b"))).bind("d") /// matches twice, once with "b" binding "A1::f" and "d" binding "C::f", and /// once with "b" binding "A2::f" and "d" binding "C::f". AST_MATCHER_P(CXXMethodDecl, forEachOverridden, internal::Matcher<CXXMethodDecl>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *Overridden : Node.overridden_methods()) { BoundNodesTreeBuilder OverriddenBuilder(*Builder); const bool OverriddenMatched = InnerMatcher.matches(*Overridden, Finder, &OverriddenBuilder); if (OverriddenMatched) { Matched = true; Result.addMatch(OverriddenBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches declarations of virtual methods and C++ base specifers that specify /// virtual inheritance. /// /// Example: /// \code /// class A { /// public: /// virtual void x(); // matches x /// }; /// \endcode /// /// Example: /// \code /// class Base {}; /// class DirectlyDerived : virtual Base {}; // matches Base /// class IndirectlyDerived : DirectlyDerived, Base {}; // matches Base /// \endcode /// /// Usable as: Matcher<CXXMethodDecl>, Matcher<CXXBaseSpecifier> AST_POLYMORPHIC_MATCHER(isVirtual, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXMethodDecl, CXXBaseSpecifier)) { return Node.isVirtual(); } /// Matches if the given method declaration has an explicit "virtual". /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// void x(); /// }; /// \endcode /// matches A::x but not B::x AST_MATCHER(CXXMethodDecl, isVirtualAsWritten) { return Node.isVirtualAsWritten(); } /// Matches if the given method or class declaration is final. /// /// Given: /// \code /// class A final {}; /// /// struct B { /// virtual void f(); /// }; /// /// struct C : B { /// void f() final; /// }; /// \endcode /// matches A and C::f, but not B, C, or B::f AST_POLYMORPHIC_MATCHER(isFinal, AST_POLYMORPHIC_SUPPORTED_TYPES(CXXRecordDecl, CXXMethodDecl)) { return Node.template hasAttr<FinalAttr>(); } /// Matches if the given method declaration is pure. /// /// Given /// \code /// class A { /// public: /// virtual void x() = 0; /// }; /// \endcode /// matches A::x AST_MATCHER(CXXMethodDecl, isPure) { return Node.isPure(); } /// Matches if the given method declaration is const. /// /// Given /// \code /// struct A { /// void foo() const; /// void bar(); /// }; /// \endcode /// /// cxxMethodDecl(isConst()) matches A::foo() but not A::bar() AST_MATCHER(CXXMethodDecl, isConst) { return Node.isConst(); } /// Matches if the given method declaration declares a copy assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isCopyAssignmentOperator()) matches the first method but not /// the second one. AST_MATCHER(CXXMethodDecl, isCopyAssignmentOperator) { return Node.isCopyAssignmentOperator(); } /// Matches if the given method declaration declares a move assignment /// operator. /// /// Given /// \code /// struct A { /// A &operator=(const A &); /// A &operator=(A &&); /// }; /// \endcode /// /// cxxMethodDecl(isMoveAssignmentOperator()) matches the second method but not /// the first one. AST_MATCHER(CXXMethodDecl, isMoveAssignmentOperator) { return Node.isMoveAssignmentOperator(); } /// Matches if the given method declaration overrides another method. /// /// Given /// \code /// class A { /// public: /// virtual void x(); /// }; /// class B : public A { /// public: /// virtual void x(); /// }; /// \endcode /// matches B::x AST_MATCHER(CXXMethodDecl, isOverride) { return Node.size_overridden_methods() > 0 || Node.hasAttr<OverrideAttr>(); } /// Matches method declarations that are user-provided. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &) = default; // #2 /// S(S &&) = delete; // #3 /// }; /// \endcode /// cxxConstructorDecl(isUserProvided()) will match #1, but not #2 or #3. AST_MATCHER(CXXMethodDecl, isUserProvided) { return Node.isUserProvided(); } /// Matches member expressions that are called with '->' as opposed /// to '.'. /// /// Member calls on the implicit this pointer match as called with '->'. /// /// Given /// \code /// class Y { /// void x() { this->x(); x(); Y y; y.x(); a; this->b; Y::b; } /// template <class T> void f() { this->f<T>(); f<T>(); } /// int a; /// static int b; /// }; /// template <class T> /// class Z { /// void x() { this->m; } /// }; /// \endcode /// memberExpr(isArrow()) /// matches this->x, x, y.x, a, this->b /// cxxDependentScopeMemberExpr(isArrow()) /// matches this->m /// unresolvedMemberExpr(isArrow()) /// matches this->f<T>, f<T> AST_POLYMORPHIC_MATCHER( isArrow, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr)) { return Node.isArrow(); } /// Matches QualType nodes that are of integer type. /// /// Given /// \code /// void a(int); /// void b(long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isInteger()))) /// matches "a(int)", "b(long)", but not "c(double)". AST_MATCHER(QualType, isInteger) { return Node->isIntegerType(); } /// Matches QualType nodes that are of unsigned integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isUnsignedInteger()))) /// matches "b(unsigned long)", but not "a(int)" and "c(double)". AST_MATCHER(QualType, isUnsignedInteger) { return Node->isUnsignedIntegerType(); } /// Matches QualType nodes that are of signed integer type. /// /// Given /// \code /// void a(int); /// void b(unsigned long); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isSignedInteger()))) /// matches "a(int)", but not "b(unsigned long)" and "c(double)". AST_MATCHER(QualType, isSignedInteger) { return Node->isSignedIntegerType(); } /// Matches QualType nodes that are of character type. /// /// Given /// \code /// void a(char); /// void b(wchar_t); /// void c(double); /// \endcode /// functionDecl(hasAnyParameter(hasType(isAnyCharacter()))) /// matches "a(char)", "b(wchar_t)", but not "c(double)". AST_MATCHER(QualType, isAnyCharacter) { return Node->isAnyCharacterType(); } /// Matches QualType nodes that are of any pointer type; this includes /// the Objective-C object pointer type, which is different despite being /// syntactically similar. /// /// Given /// \code /// int *i = nullptr; /// /// @interface Foo /// @end /// Foo *f; /// /// int j; /// \endcode /// varDecl(hasType(isAnyPointer())) /// matches "int *i" and "Foo *f", but not "int j". AST_MATCHER(QualType, isAnyPointer) { return Node->isAnyPointerType(); } /// Matches QualType nodes that are const-qualified, i.e., that /// include "top-level" const. /// /// Given /// \code /// void a(int); /// void b(int const); /// void c(const int); /// void d(const int*); /// void e(int const) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isConstQualified()))) /// matches "void b(int const)", "void c(const int)" and /// "void e(int const) {}". It does not match d as there /// is no top-level const on the parameter type "const int *". AST_MATCHER(QualType, isConstQualified) { return Node.isConstQualified(); } /// Matches QualType nodes that are volatile-qualified, i.e., that /// include "top-level" volatile. /// /// Given /// \code /// void a(int); /// void b(int volatile); /// void c(volatile int); /// void d(volatile int*); /// void e(int volatile) {}; /// \endcode /// functionDecl(hasAnyParameter(hasType(isVolatileQualified()))) /// matches "void b(int volatile)", "void c(volatile int)" and /// "void e(int volatile) {}". It does not match d as there /// is no top-level volatile on the parameter type "volatile int *". AST_MATCHER(QualType, isVolatileQualified) { return Node.isVolatileQualified(); } /// Matches QualType nodes that have local CV-qualifiers attached to /// the node, not hidden within a typedef. /// /// Given /// \code /// typedef const int const_int; /// const_int i; /// int *const j; /// int *volatile k; /// int m; /// \endcode /// \c varDecl(hasType(hasLocalQualifiers())) matches only \c j and \c k. /// \c i is const-qualified but the qualifier is not local. AST_MATCHER(QualType, hasLocalQualifiers) { return Node.hasLocalQualifiers(); } /// Matches a member expression where the member is matched by a /// given matcher. /// /// Given /// \code /// struct { int first, second; } first, second; /// int i(second.first); /// int j(first.second); /// \endcode /// memberExpr(member(hasName("first"))) /// matches second.first /// but not first.second (because the member name there is "second"). AST_MATCHER_P(MemberExpr, member, internal::Matcher<ValueDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getMemberDecl(), Finder, Builder); } /// Matches a member expression where the object expression is matched by a /// given matcher. Implicit object expressions are included; that is, it matches /// use of implicit `this`. /// /// Given /// \code /// struct X { /// int m; /// int f(X x) { x.m; return m; } /// }; /// \endcode /// memberExpr(hasObjectExpression(hasType(cxxRecordDecl(hasName("X"))))) /// matches `x.m`, but not `m`; however, /// memberExpr(hasObjectExpression(hasType(pointsTo( // cxxRecordDecl(hasName("X")))))) /// matches `m` (aka. `this->m`), but not `x.m`. AST_POLYMORPHIC_MATCHER_P( hasObjectExpression, AST_POLYMORPHIC_SUPPORTED_TYPES(MemberExpr, UnresolvedMemberExpr, CXXDependentScopeMemberExpr), internal::Matcher<Expr>, InnerMatcher) { if (const auto *E = dyn_cast<UnresolvedMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; if (const auto *E = dyn_cast<CXXDependentScopeMemberExpr>(&Node)) if (E->isImplicitAccess()) return false; return InnerMatcher.matches(*Node.getBase(), Finder, Builder); } /// Matches any using shadow declaration. /// /// Given /// \code /// namespace X { void b(); } /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasName("b")))) /// matches \code using X::b \endcode AST_MATCHER_P(UsingDecl, hasAnyUsingShadowDecl, internal::Matcher<UsingShadowDecl>, InnerMatcher) { return matchesFirstInPointerRange(InnerMatcher, Node.shadow_begin(), Node.shadow_end(), Finder, Builder) != Node.shadow_end(); } /// Matches a using shadow declaration where the target declaration is /// matched by the given matcher. /// /// Given /// \code /// namespace X { int a; void b(); } /// using X::a; /// using X::b; /// \endcode /// usingDecl(hasAnyUsingShadowDecl(hasTargetDecl(functionDecl()))) /// matches \code using X::b \endcode /// but not \code using X::a \endcode AST_MATCHER_P(UsingShadowDecl, hasTargetDecl, internal::Matcher<NamedDecl>, InnerMatcher) { return InnerMatcher.matches(*Node.getTargetDecl(), Finder, Builder); } /// Matches template instantiations of function, class, or static /// member variable template instantiations. /// /// Given /// \code /// template <typename T> class X {}; class A {}; X<A> x; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; template class X<A>; /// \endcode /// or /// \code /// template <typename T> class X {}; class A {}; extern template class X<A>; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// matches the template instantiation of X<A>. /// /// But given /// \code /// template <typename T> class X {}; class A {}; /// template <> class X<A> {}; X<A> x; /// \endcode /// cxxRecordDecl(hasName("::X"), isTemplateInstantiation()) /// does not match, as X<A> is an explicit template specialization. /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isTemplateInstantiation, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ImplicitInstantiation || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDefinition || Node.getTemplateSpecializationKind() == TSK_ExplicitInstantiationDeclaration); } /// Matches declarations that are template instantiations or are inside /// template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { T i; } /// A(0); /// A(0U); /// \endcode /// functionDecl(isInstantiated()) /// matches 'A(int) {...};' and 'A(unsigned) {...}'. AST_MATCHER_FUNCTION(internal::Matcher<Decl>, isInstantiated) { auto IsInstantiation = decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))); return decl(anyOf(IsInstantiation, hasAncestor(IsInstantiation))); } /// Matches statements inside of a template instantiation. /// /// Given /// \code /// int j; /// template<typename T> void A(T t) { T i; j += 42;} /// A(0); /// A(0U); /// \endcode /// declStmt(isInTemplateInstantiation()) /// matches 'int i;' and 'unsigned i'. /// unless(stmt(isInTemplateInstantiation())) /// will NOT match j += 42; as it's shared between the template definition and /// instantiation. AST_MATCHER_FUNCTION(internal::Matcher<Stmt>, isInTemplateInstantiation) { return stmt( hasAncestor(decl(anyOf(cxxRecordDecl(isTemplateInstantiation()), functionDecl(isTemplateInstantiation()))))); } /// Matches explicit template specializations of function, class, or /// static member variable template instantiations. /// /// Given /// \code /// template<typename T> void A(T t) { } /// template<> void A(int N) { } /// \endcode /// functionDecl(isExplicitTemplateSpecialization()) /// matches the specialization A<int>(). /// /// Usable as: Matcher<FunctionDecl>, Matcher<VarDecl>, Matcher<CXXRecordDecl> AST_POLYMORPHIC_MATCHER(isExplicitTemplateSpecialization, AST_POLYMORPHIC_SUPPORTED_TYPES(FunctionDecl, VarDecl, CXXRecordDecl)) { return (Node.getTemplateSpecializationKind() == TSK_ExplicitSpecialization); } /// Matches \c TypeLocs for which the given inner /// QualType-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD(internal::BindableMatcher<TypeLoc>, loc, internal::Matcher<QualType>, InnerMatcher, 0) { return internal::BindableMatcher<TypeLoc>( new internal::TypeLocTypeMatcher(InnerMatcher)); } /// Matches type \c bool. /// /// Given /// \code /// struct S { bool func(); }; /// \endcode /// functionDecl(returns(booleanType())) /// matches "bool func();" AST_MATCHER(Type, booleanType) { return Node.isBooleanType(); } /// Matches type \c void. /// /// Given /// \code /// struct S { void func(); }; /// \endcode /// functionDecl(returns(voidType())) /// matches "void func();" AST_MATCHER(Type, voidType) { return Node.isVoidType(); } template <typename NodeType> using AstTypeMatcher = internal::VariadicDynCastAllOfMatcher<Type, NodeType>; /// Matches builtin Types. /// /// Given /// \code /// struct A {}; /// A a; /// int b; /// float c; /// bool d; /// \endcode /// builtinType() /// matches "int b", "float c" and "bool d" extern const AstTypeMatcher<BuiltinType> builtinType; /// Matches all kinds of arrays. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[4]; /// void f() { int c[a[0]]; } /// \endcode /// arrayType() /// matches "int a[]", "int b[4]" and "int c[a[0]]"; extern const AstTypeMatcher<ArrayType> arrayType; /// Matches C99 complex types. /// /// Given /// \code /// _Complex float f; /// \endcode /// complexType() /// matches "_Complex float f" extern const AstTypeMatcher<ComplexType> complexType; /// Matches any real floating-point type (float, double, long double). /// /// Given /// \code /// int i; /// float f; /// \endcode /// realFloatingPointType() /// matches "float f" but not "int i" AST_MATCHER(Type, realFloatingPointType) { return Node.isRealFloatingType(); } /// Matches arrays and C99 complex types that have a specific element /// type. /// /// Given /// \code /// struct A {}; /// A a[7]; /// int b[7]; /// \endcode /// arrayType(hasElementType(builtinType())) /// matches "int b[7]" /// /// Usable as: Matcher<ArrayType>, Matcher<ComplexType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasElementType, getElement, AST_POLYMORPHIC_SUPPORTED_TYPES(ArrayType, ComplexType)); /// Matches C arrays with a specified constant size. /// /// Given /// \code /// void() { /// int a[2]; /// int b[] = { 2, 3 }; /// int c[b[0]]; /// } /// \endcode /// constantArrayType() /// matches "int a[2]" extern const AstTypeMatcher<ConstantArrayType> constantArrayType; /// Matches nodes that have the specified size. /// /// Given /// \code /// int a[42]; /// int b[2 * 21]; /// int c[41], d[43]; /// char *s = "abcd"; /// wchar_t *ws = L"abcd"; /// char *w = "a"; /// \endcode /// constantArrayType(hasSize(42)) /// matches "int a[42]" and "int b[2 * 21]" /// stringLiteral(hasSize(4)) /// matches "abcd", L"abcd" AST_POLYMORPHIC_MATCHER_P(hasSize, AST_POLYMORPHIC_SUPPORTED_TYPES(ConstantArrayType, StringLiteral), unsigned, N) { return internal::HasSizeMatcher<NodeType>::hasSize(Node, N); } /// Matches C++ arrays whose size is a value-dependent expression. /// /// Given /// \code /// template<typename T, int Size> /// class array { /// T data[Size]; /// }; /// \endcode /// dependentSizedArrayType /// matches "T data[Size]" extern const AstTypeMatcher<DependentSizedArrayType> dependentSizedArrayType; /// Matches C arrays with unspecified size. /// /// Given /// \code /// int a[] = { 2, 3 }; /// int b[42]; /// void f(int c[]) { int d[a[0]]; }; /// \endcode /// incompleteArrayType() /// matches "int a[]" and "int c[]" extern const AstTypeMatcher<IncompleteArrayType> incompleteArrayType; /// Matches C arrays with a specified size that is not an /// integer-constant-expression. /// /// Given /// \code /// void f() { /// int a[] = { 2, 3 } /// int b[42]; /// int c[a[0]]; /// } /// \endcode /// variableArrayType() /// matches "int c[a[0]]" extern const AstTypeMatcher<VariableArrayType> variableArrayType; /// Matches \c VariableArrayType nodes that have a specific size /// expression. /// /// Given /// \code /// void f(int b) { /// int a[b]; /// } /// \endcode /// variableArrayType(hasSizeExpr(ignoringImpCasts(declRefExpr(to( /// varDecl(hasName("b"))))))) /// matches "int a[b]" AST_MATCHER_P(VariableArrayType, hasSizeExpr, internal::Matcher<Expr>, InnerMatcher) { return InnerMatcher.matches(*Node.getSizeExpr(), Finder, Builder); } /// Matches atomic types. /// /// Given /// \code /// _Atomic(int) i; /// \endcode /// atomicType() /// matches "_Atomic(int) i" extern const AstTypeMatcher<AtomicType> atomicType; /// Matches atomic types with a specific value type. /// /// Given /// \code /// _Atomic(int) i; /// _Atomic(float) f; /// \endcode /// atomicType(hasValueType(isInteger())) /// matches "_Atomic(int) i" /// /// Usable as: Matcher<AtomicType> AST_TYPELOC_TRAVERSE_MATCHER_DECL(hasValueType, getValue, AST_POLYMORPHIC_SUPPORTED_TYPES(AtomicType)); /// Matches types nodes representing C++11 auto types. /// /// Given: /// \code /// auto n = 4; /// int v[] = { 2, 3 } /// for (auto i : v) { } /// \endcode /// autoType() /// matches "auto n" and "auto i" extern const AstTypeMatcher<AutoType> autoType; /// Matches types nodes representing C++11 decltype(<expr>) types. /// /// Given: /// \code /// short i = 1; /// int j = 42; /// decltype(i + j) result = i + j; /// \endcode /// decltypeType() /// matches "decltype(i + j)" extern const AstTypeMatcher<DecltypeType> decltypeType; /// Matches \c AutoType nodes where the deduced type is a specific type. /// /// Note: There is no \c TypeLoc for the deduced type and thus no /// \c getDeducedLoc() matcher. /// /// Given /// \code /// auto a = 1; /// auto b = 2.0; /// \endcode /// autoType(hasDeducedType(isInteger())) /// matches "auto a" /// /// Usable as: Matcher<AutoType> AST_TYPE_TRAVERSE_MATCHER(hasDeducedType, getDeducedType, AST_POLYMORPHIC_SUPPORTED_TYPES(AutoType)); /// Matches \c DecltypeType nodes to find out the underlying type. /// /// Given /// \code /// decltype(1) a = 1; /// decltype(2.0) b = 2.0; /// \endcode /// decltypeType(hasUnderlyingType(isInteger())) /// matches the type of "a" /// /// Usable as: Matcher<DecltypeType> AST_TYPE_TRAVERSE_MATCHER(hasUnderlyingType, getUnderlyingType, AST_POLYMORPHIC_SUPPORTED_TYPES(DecltypeType)); /// Matches \c FunctionType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionType() /// matches "int (*f)(int)" and the type of "g". extern const AstTypeMatcher<FunctionType> functionType; /// Matches \c FunctionProtoType nodes. /// /// Given /// \code /// int (*f)(int); /// void g(); /// \endcode /// functionProtoType() /// matches "int (*f)(int)" and the type of "g" in C++ mode. /// In C mode, "g" is not matched because it does not contain a prototype. extern const AstTypeMatcher<FunctionProtoType> functionProtoType; /// Matches \c ParenType nodes. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int *array_of_ptrs[4]; /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType()))) matches \c ptr_to_array but not /// \c array_of_ptrs. extern const AstTypeMatcher<ParenType> parenType; /// Matches \c ParenType nodes where the inner type is a specific type. /// /// Given /// \code /// int (*ptr_to_array)[4]; /// int (*ptr_to_func)(int); /// \endcode /// /// \c varDecl(hasType(pointsTo(parenType(innerType(functionType()))))) matches /// \c ptr_to_func but not \c ptr_to_array. /// /// Usable as: Matcher<ParenType> AST_TYPE_TRAVERSE_MATCHER(innerType, getInnerType, AST_POLYMORPHIC_SUPPORTED_TYPES(ParenType)); /// Matches block pointer types, i.e. types syntactically represented as /// "void (^)(int)". /// /// The \c pointee is always required to be a \c FunctionType. extern const AstTypeMatcher<BlockPointerType> blockPointerType; /// Matches member pointer types. /// Given /// \code /// struct A { int i; } /// A::* ptr = A::i; /// \endcode /// memberPointerType() /// matches "A::* ptr" extern const AstTypeMatcher<MemberPointerType> memberPointerType; /// Matches pointer types, but does not match Objective-C object pointer /// types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int c = 5; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "int *a", but does not match "Foo *f". extern const AstTypeMatcher<PointerType> pointerType; /// Matches an Objective-C object pointer type, which is different from /// a pointer type, despite being syntactically similar. /// /// Given /// \code /// int *a; /// /// @interface Foo /// @end /// Foo *f; /// \endcode /// pointerType() /// matches "Foo *f", but does not match "int *a". extern const AstTypeMatcher<ObjCObjectPointerType> objcObjectPointerType; /// Matches both lvalue and rvalue reference types. /// /// Given /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c referenceType() matches the types of \c b, \c c, \c d, \c e, and \c f. extern const AstTypeMatcher<ReferenceType> referenceType; /// Matches lvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c lValueReferenceType() matches the types of \c b, \c d, and \c e. \c e is /// matched since the type is deduced as int& by reference collapsing rules. extern const AstTypeMatcher<LValueReferenceType> lValueReferenceType; /// Matches rvalue reference types. /// /// Given: /// \code /// int *a; /// int &b = *a; /// int &&c = 1; /// auto &d = b; /// auto &&e = c; /// auto &&f = 2; /// int g = 5; /// \endcode /// /// \c rValueReferenceType() matches the types of \c c and \c f. \c e is not /// matched as it is deduced to int& by reference collapsing rules. extern const AstTypeMatcher<RValueReferenceType> rValueReferenceType; /// Narrows PointerType (and similar) matchers to those where the /// \c pointee matches a given matcher. /// /// Given /// \code /// int *a; /// int const *b; /// float const *f; /// \endcode /// pointerType(pointee(isConstQualified(), isInteger())) /// matches "int const *b" /// /// Usable as: Matcher<BlockPointerType>, Matcher<MemberPointerType>, /// Matcher<PointerType>, Matcher<ReferenceType> AST_TYPELOC_TRAVERSE_MATCHER_DECL( pointee, getPointee, AST_POLYMORPHIC_SUPPORTED_TYPES(BlockPointerType, MemberPointerType, PointerType, ReferenceType)); /// Matches typedef types. /// /// Given /// \code /// typedef int X; /// \endcode /// typedefType() /// matches "typedef int X" extern const AstTypeMatcher<TypedefType> typedefType; /// Matches enum types. /// /// Given /// \code /// enum C { Green }; /// enum class S { Red }; /// /// C c; /// S s; /// \endcode // /// \c enumType() matches the type of the variable declarations of both \c c and /// \c s. extern const AstTypeMatcher<EnumType> enumType; /// Matches template specialization types. /// /// Given /// \code /// template <typename T> /// class C { }; /// /// template class C<int>; // A /// C<char> var; // B /// \endcode /// /// \c templateSpecializationType() matches the type of the explicit /// instantiation in \c A and the type of the variable declaration in \c B. extern const AstTypeMatcher<TemplateSpecializationType> templateSpecializationType; /// Matches C++17 deduced template specialization types, e.g. deduced class /// template types. /// /// Given /// \code /// template <typename T> /// class C { public: C(T); }; /// /// C c(123); /// \endcode /// \c deducedTemplateSpecializationType() matches the type in the declaration /// of the variable \c c. extern const AstTypeMatcher<DeducedTemplateSpecializationType> deducedTemplateSpecializationType; /// Matches types nodes representing unary type transformations. /// /// Given: /// \code /// typedef __underlying_type(T) type; /// \endcode /// unaryTransformType() /// matches "__underlying_type(T)" extern const AstTypeMatcher<UnaryTransformType> unaryTransformType; /// Matches record types (e.g. structs, classes). /// /// Given /// \code /// class C {}; /// struct S {}; /// /// C c; /// S s; /// \endcode /// /// \c recordType() matches the type of the variable declarations of both \c c /// and \c s. extern const AstTypeMatcher<RecordType> recordType; /// Matches tag types (record and enum types). /// /// Given /// \code /// enum E {}; /// class C {}; /// /// E e; /// C c; /// \endcode /// /// \c tagType() matches the type of the variable declarations of both \c e /// and \c c. extern const AstTypeMatcher<TagType> tagType; /// Matches types specified with an elaborated type keyword or with a /// qualified name. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// class C {}; /// /// class C c; /// N::M::D d; /// \endcode /// /// \c elaboratedType() matches the type of the variable declarations of both /// \c c and \c d. extern const AstTypeMatcher<ElaboratedType> elaboratedType; /// Matches ElaboratedTypes whose qualifier, a NestedNameSpecifier, /// matches \c InnerMatcher if the qualifier exists. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(hasQualifier(hasPrefix(specifiesNamespace(hasName("N")))) /// matches the type of the variable declaration of \c d. AST_MATCHER_P(ElaboratedType, hasQualifier, internal::Matcher<NestedNameSpecifier>, InnerMatcher) { if (const NestedNameSpecifier *Qualifier = Node.getQualifier()) return InnerMatcher.matches(*Qualifier, Finder, Builder); return false; } /// Matches ElaboratedTypes whose named type matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// N::M::D d; /// \endcode /// /// \c elaboratedType(namesType(recordType( /// hasDeclaration(namedDecl(hasName("D")))))) matches the type of the variable /// declaration of \c d. AST_MATCHER_P(ElaboratedType, namesType, internal::Matcher<QualType>, InnerMatcher) { return InnerMatcher.matches(Node.getNamedType(), Finder, Builder); } /// Matches types that represent the result of substituting a type for a /// template type parameter. /// /// Given /// \code /// template <typename T> /// void F(T t) { /// int i = 1 + t; /// } /// \endcode /// /// \c substTemplateTypeParmType() matches the type of 't' but not '1' extern const AstTypeMatcher<SubstTemplateTypeParmType> substTemplateTypeParmType; /// Matches template type parameter substitutions that have a replacement /// type that matches the provided matcher. /// /// Given /// \code /// template <typename T> /// double F(T t); /// int i; /// double j = F(i); /// \endcode /// /// \c substTemplateTypeParmType(hasReplacementType(type())) matches int AST_TYPE_TRAVERSE_MATCHER( hasReplacementType, getReplacementType, AST_POLYMORPHIC_SUPPORTED_TYPES(SubstTemplateTypeParmType)); /// Matches template type parameter types. /// /// Example matches T, but not int. /// (matcher = templateTypeParmType()) /// \code /// template <typename T> void f(int i); /// \endcode extern const AstTypeMatcher<TemplateTypeParmType> templateTypeParmType; /// Matches injected class name types. /// /// Example matches S s, but not S<T> s. /// (matcher = parmVarDecl(hasType(injectedClassNameType()))) /// \code /// template <typename T> struct S { /// void f(S s); /// void g(S<T> s); /// }; /// \endcode extern const AstTypeMatcher<InjectedClassNameType> injectedClassNameType; /// Matches decayed type /// Example matches i[] in declaration of f. /// (matcher = valueDecl(hasType(decayedType(hasDecayedType(pointerType()))))) /// Example matches i[1]. /// (matcher = expr(hasType(decayedType(hasDecayedType(pointerType()))))) /// \code /// void f(int i[]) { /// i[1] = 0; /// } /// \endcode extern const AstTypeMatcher<DecayedType> decayedType; /// Matches the decayed type, whoes decayed type matches \c InnerMatcher AST_MATCHER_P(DecayedType, hasDecayedType, internal::Matcher<QualType>, InnerType) { return InnerType.matches(Node.getDecayedType(), Finder, Builder); } /// Matches declarations whose declaration context, interpreted as a /// Decl, matches \c InnerMatcher. /// /// Given /// \code /// namespace N { /// namespace M { /// class D {}; /// } /// } /// \endcode /// /// \c cxxRcordDecl(hasDeclContext(namedDecl(hasName("M")))) matches the /// declaration of \c class \c D. AST_MATCHER_P(Decl, hasDeclContext, internal::Matcher<Decl>, InnerMatcher) { const DeclContext *DC = Node.getDeclContext(); if (!DC) return false; return InnerMatcher.matches(*Decl::castFromDeclContext(DC), Finder, Builder); } /// Matches nested name specifiers. /// /// Given /// \code /// namespace ns { /// struct A { static void f(); }; /// void A::f() {} /// void g() { A::f(); } /// } /// ns::A a; /// \endcode /// nestedNameSpecifier() /// matches "ns::" and both "A::" extern const internal::VariadicAllOfMatcher<NestedNameSpecifier> nestedNameSpecifier; /// Same as \c nestedNameSpecifier but matches \c NestedNameSpecifierLoc. extern const internal::VariadicAllOfMatcher<NestedNameSpecifierLoc> nestedNameSpecifierLoc; /// Matches \c NestedNameSpecifierLocs for which the given inner /// NestedNameSpecifier-matcher matches. AST_MATCHER_FUNCTION_P_OVERLOAD( internal::BindableMatcher<NestedNameSpecifierLoc>, loc, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 1) { return internal::BindableMatcher<NestedNameSpecifierLoc>( new internal::LocMatcher<NestedNameSpecifierLoc, NestedNameSpecifier>( InnerMatcher)); } /// Matches nested name specifiers that specify a type matching the /// given \c QualType matcher without qualifiers. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(specifiesType( /// hasDeclaration(cxxRecordDecl(hasName("A"))) /// )) /// matches "A::" AST_MATCHER_P(NestedNameSpecifier, specifiesType, internal::Matcher<QualType>, InnerMatcher) { if (!Node.getAsType()) return false; return InnerMatcher.matches(QualType(Node.getAsType(), 0), Finder, Builder); } /// Matches nested name specifier locs that specify a type matching the /// given \c TypeLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(specifiesTypeLoc(loc(type( /// hasDeclaration(cxxRecordDecl(hasName("A"))))))) /// matches "A::" AST_MATCHER_P(NestedNameSpecifierLoc, specifiesTypeLoc, internal::Matcher<TypeLoc>, InnerMatcher) { return Node && Node.getNestedNameSpecifier()->getAsType() && InnerMatcher.matches(Node.getTypeLoc(), Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifier. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifier(hasPrefix(specifiesType(asString("struct A")))) and /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifier, hasPrefix, internal::Matcher<NestedNameSpecifier>, InnerMatcher, 0) { const NestedNameSpecifier *NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(*NextNode, Finder, Builder); } /// Matches on the prefix of a \c NestedNameSpecifierLoc. /// /// Given /// \code /// struct A { struct B { struct C {}; }; }; /// A::B::C c; /// \endcode /// nestedNameSpecifierLoc(hasPrefix(loc(specifiesType(asString("struct A"))))) /// matches "A::" AST_MATCHER_P_OVERLOAD(NestedNameSpecifierLoc, hasPrefix, internal::Matcher<NestedNameSpecifierLoc>, InnerMatcher, 1) { NestedNameSpecifierLoc NextNode = Node.getPrefix(); if (!NextNode) return false; return InnerMatcher.matches(NextNode, Finder, Builder); } /// Matches nested name specifiers that specify a namespace matching the /// given namespace matcher. /// /// Given /// \code /// namespace ns { struct A {}; } /// ns::A a; /// \endcode /// nestedNameSpecifier(specifiesNamespace(hasName("ns"))) /// matches "ns::" AST_MATCHER_P(NestedNameSpecifier, specifiesNamespace, internal::Matcher<NamespaceDecl>, InnerMatcher) { if (!Node.getAsNamespace()) return false; return InnerMatcher.matches(*Node.getAsNamespace(), Finder, Builder); } /// Overloads for the \c equalsNode matcher. /// FIXME: Implement for other node types. /// @{ /// Matches if a node equals another node. /// /// \c Decl has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Decl, equalsNode, const Decl*, Other, 0) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Stmt has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Stmt, equalsNode, const Stmt*, Other, 1) { return &Node == Other; } /// Matches if a node equals another node. /// /// \c Type has pointer identity in the AST. AST_MATCHER_P_OVERLOAD(Type, equalsNode, const Type*, Other, 2) { return &Node == Other; } /// @} /// Matches each case or default statement belonging to the given switch /// statement. This matcher may produce multiple matches. /// /// Given /// \code /// switch (1) { case 1: case 2: default: switch (2) { case 3: case 4: ; } } /// \endcode /// switchStmt(forEachSwitchCase(caseStmt().bind("c"))).bind("s") /// matches four times, with "c" binding each of "case 1:", "case 2:", /// "case 3:" and "case 4:", and "s" respectively binding "switch (1)", /// "switch (1)", "switch (2)" and "switch (2)". AST_MATCHER_P(SwitchStmt, forEachSwitchCase, internal::Matcher<SwitchCase>, InnerMatcher) { BoundNodesTreeBuilder Result; // FIXME: getSwitchCaseList() does not necessarily guarantee a stable // iteration order. We should use the more general iterating matchers once // they are capable of expressing this matcher (for example, it should ignore // case statements belonging to nested switch statements). bool Matched = false; for (const SwitchCase *SC = Node.getSwitchCaseList(); SC; SC = SC->getNextSwitchCase()) { BoundNodesTreeBuilder CaseBuilder(*Builder); bool CaseMatched = InnerMatcher.matches(*SC, Finder, &CaseBuilder); if (CaseMatched) { Matched = true; Result.addMatch(CaseBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches each constructor initializer in a constructor definition. /// /// Given /// \code /// class A { A() : i(42), j(42) {} int i; int j; }; /// \endcode /// cxxConstructorDecl(forEachConstructorInitializer( /// forField(decl().bind("x")) /// )) /// will trigger two matches, binding for 'i' and 'j' respectively. AST_MATCHER_P(CXXConstructorDecl, forEachConstructorInitializer, internal::Matcher<CXXCtorInitializer>, InnerMatcher) { BoundNodesTreeBuilder Result; bool Matched = false; for (const auto *I : Node.inits()) { if (Finder->isTraversalIgnoringImplicitNodes() && !I->isWritten()) continue; BoundNodesTreeBuilder InitBuilder(*Builder); if (InnerMatcher.matches(*I, Finder, &InitBuilder)) { Matched = true; Result.addMatch(InitBuilder); } } *Builder = std::move(Result); return Matched; } /// Matches constructor declarations that are copy constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isCopyConstructor()) will match #2, but not #1 or #3. AST_MATCHER(CXXConstructorDecl, isCopyConstructor) { return Node.isCopyConstructor(); } /// Matches constructor declarations that are move constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isMoveConstructor()) will match #3, but not #1 or #2. AST_MATCHER(CXXConstructorDecl, isMoveConstructor) { return Node.isMoveConstructor(); } /// Matches constructor declarations that are default constructors. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(const S &); // #2 /// S(S &&); // #3 /// }; /// \endcode /// cxxConstructorDecl(isDefaultConstructor()) will match #1, but not #2 or #3. AST_MATCHER(CXXConstructorDecl, isDefaultConstructor) { return Node.isDefaultConstructor(); } /// Matches constructors that delegate to another constructor. /// /// Given /// \code /// struct S { /// S(); // #1 /// S(int) {} // #2 /// S(S &&) : S() {} // #3 /// }; /// S::S() : S(0) {} // #4 /// \endcode /// cxxConstructorDecl(isDelegatingConstructor()) will match #3 and #4, but not /// #1 or #2. AST_MATCHER(CXXConstructorDecl, isDelegatingConstructor) { return Node.isDelegatingConstructor(); } /// Matches constructor, conversion function, and deduction guide declarations /// that have an explicit specifier if this explicit specifier is resolved to /// true. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(isExplicit()) will match #2 and #8, but not #1, #7 or #9. /// cxxConversionDecl(isExplicit()) will match #4, but not #3. /// cxxDeductionGuideDecl(isExplicit()) will match #6, but not #5. AST_POLYMORPHIC_MATCHER(isExplicit, AST_POLYMORPHIC_SUPPORTED_TYPES( CXXConstructorDecl, CXXConversionDecl, CXXDeductionGuideDecl)) { return Node.isExplicit(); } /// Matches the expression in an explicit specifier if present in the given /// declaration. /// /// Given /// \code /// template<bool b> /// struct S { /// S(int); // #1 /// explicit S(double); // #2 /// operator int(); // #3 /// explicit operator bool(); // #4 /// explicit(false) S(bool) // # 7 /// explicit(true) S(char) // # 8 /// explicit(b) S(S) // # 9 /// }; /// S(int) -> S<true> // #5 /// explicit S(double) -> S<false> // #6 /// \endcode /// cxxConstructorDecl(hasExplicitSpecifier(constantExpr())) will match #7, #8 and #9, but not #1 or #2. /// cxxConversionDecl(hasExplicitSpecifier(constantExpr())) will not match #3 or #4. /// cxxDeductionGuideDecl(hasExplicitSpecifier(constantExpr())) will not match #5 or #6. AST_MATCHER_P(FunctionDecl, hasExplicitSpecifier, internal::Matcher<Expr>, InnerMatcher) { ExplicitSpecifier ES = ExplicitSpecifier::getFromDecl(&Node); if (!ES.getExpr()) return false; ASTChildrenNotSpelledInSourceScope RAII(Finder, false); return InnerMatcher.matches(*ES.getExpr(), Finder, Builder); } /// Matches function and namespace declarations that are marked with /// the inline keyword. /// /// Given /// \code /// inline void f(); /// void g(); /// namespace n { /// inline namespace m {} /// } /// \endcode /// functionDecl(isInline()) will match ::f(). /// namespaceDecl(isInline()) will match n::m. AST_POLYMORPHIC_MATCHER(isInline, AST_POLYMORPHIC_SUPPORTED_TYPES(NamespaceDecl, FunctionDecl)) { // This is required because the spelling of the function used to determine // whether inline is specified or not differs between the polymorphic types. if (const auto *FD = dyn_cast<FunctionDecl>(&Node)) return FD->isInlineSpecified(); else if (const auto *NSD = dyn_cast<NamespaceDecl>(&Node)) return NSD->isInline(); llvm_unreachable("Not a valid polymorphic type"); } /// Matches anonymous namespace declarations. /// /// Given /// \code /// namespace n { /// namespace {} // #1 /// } /// \endcode /// namespaceDecl(isAnonymous()) will match #1 but not ::n. AST_MATCHER(NamespaceDecl, isAnonymous) { return Node.isAnonymousNamespace(); } /// Matches declarations in the namespace `std`, but not in nested namespaces. /// /// Given /// \code /// class vector {}; /// namespace foo { /// class vector {}; /// namespace std { /// class vector {}; /// } /// } /// namespace std { /// inline namespace __1 { /// class vector {}; // #1 /// namespace experimental { /// class vector {}; /// } /// } /// } /// \endcode /// cxxRecordDecl(hasName("vector"), isInStdNamespace()) will match only #1. AST_MATCHER(Decl, isInStdNamespace) { return Node.isInStdNamespace(); } /// If the given case statement does not use the GNU case range /// extension, matches the constant given in the statement. /// /// Given /// \code /// switch (1) { case 1: case 1+1: case 3 ... 4: ; } /// \endcode /// caseStmt(hasCaseConstant(integerLiteral())) /// matches "case 1:" AST_MATCHER_P(CaseStmt, hasCaseConstant, internal::Matcher<Expr>, InnerMatcher) { if (Node.getRHS()) return false; return InnerMatcher.matches(*Node.getLHS(), Finder, Builder); } /// Matches declaration that has a given attribute. /// /// Given /// \code /// __attribute__((device)) void f() { ... } /// \endcode /// decl(hasAttr(clang::attr::CUDADevice)) matches the function declaration of /// f. If the matcher is used from clang-query, attr::Kind parameter should be /// passed as a quoted string. e.g., hasAttr("attr::CUDADevice"). AST_MATCHER_P(Decl, hasAttr, attr::Kind, AttrKind) { for (const auto *Attr : Node.attrs()) { if (Attr->getKind() == AttrKind) return true; } return false; } /// Matches the return value expression of a return statement /// /// Given /// \code /// return a + b; /// \endcode /// hasReturnValue(binaryOperator()) /// matches 'return a + b' /// with binaryOperator() /// matching 'a + b' AST_MATCHER_P(ReturnStmt, hasReturnValue, internal::Matcher<Expr>, InnerMatcher) { if (const auto *RetValue = Node.getRetValue()) return InnerMatcher.matches(*RetValue, Finder, Builder); return false; } /// Matches CUDA kernel call expression. /// /// Example matches, /// \code /// kernel<<<i,j>>>(); /// \endcode extern const internal::VariadicDynCastAllOfMatcher<Stmt, CUDAKernelCallExpr> cudaKernelCallExpr; /// Matches expressions that resolve to a null pointer constant, such as /// GNU's __null, C++11's nullptr, or C's NULL macro. /// /// Given: /// \code /// void *v1 = NULL; /// void *v2 = nullptr; /// void *v3 = __null; // GNU extension /// char *cp = (char *)0; /// int *ip = 0; /// int i = 0; /// \endcode /// expr(nullPointerConstant()) /// matches the initializer for v1, v2, v3, cp, and ip. Does not match the /// initializer for i. AST_MATCHER(Expr, nullPointerConstant) { return Node.isNullPointerConstant(Finder->getASTContext(), Expr::NPC_ValueDependentIsNull); } /// Matches declaration of the function the statement belongs to /// /// Given: /// \code /// F& operator=(const F& o) { /// std::copy_if(o.begin(), o.end(), begin(), [](V v) { return v > 0; }); /// return *this; /// } /// \endcode /// returnStmt(forFunction(hasName("operator="))) /// matches 'return *this' /// but does not match 'return v > 0' AST_MATCHER_P(Stmt, forFunction, internal::Matcher<FunctionDecl>, InnerMatcher) { const auto &Parents = Finder->getASTContext().getParents(Node); llvm::SmallVector<DynTypedNode, 8> Stack(Parents.begin(), Parents.end()); while(!Stack.empty()) { const auto &CurNode = Stack.back(); Stack.pop_back(); if(const auto *FuncDeclNode = CurNode.get<FunctionDecl>()) { if(InnerMatcher.matches(*FuncDeclNode, Finder, Builder)) { return true; } } else if(const auto *LambdaExprNode = CurNode.get<LambdaExpr>()) { if(InnerMatcher.matches(*LambdaExprNode->getCallOperator(), Finder, Builder)) { return true; } } else { for(const auto &Parent: Finder->getASTContext().getParents(CurNode)) Stack.push_back(Parent); } } return false; } /// Matches a declaration that has external formal linkage. /// /// Example matches only z (matcher = varDecl(hasExternalFormalLinkage())) /// \code /// void f() { /// int x; /// static int y; /// } /// int z; /// \endcode /// /// Example matches f() because it has external formal linkage despite being /// unique to the translation unit as though it has internal likage /// (matcher = functionDecl(hasExternalFormalLinkage())) /// /// \code /// namespace { /// void f() {} /// } /// \endcode AST_MATCHER(NamedDecl, hasExternalFormalLinkage) { return Node.hasExternalFormalLinkage(); } /// Matches a declaration that has default arguments. /// /// Example matches y (matcher = parmVarDecl(hasDefaultArgument())) /// \code /// void x(int val) {} /// void y(int val = 0) {} /// \endcode /// /// Deprecated. Use hasInitializer() instead to be able to /// match on the contents of the default argument. For example: /// /// \code /// void x(int val = 7) {} /// void y(int val = 42) {} /// \endcode /// parmVarDecl(hasInitializer(integerLiteral(equals(42)))) /// matches the parameter of y /// /// A matcher such as /// parmVarDecl(hasInitializer(anything())) /// is equivalent to parmVarDecl(hasDefaultArgument()). AST_MATCHER(ParmVarDecl, hasDefaultArgument) { return Node.hasDefaultArg(); } /// Matches array new expressions. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(isArray()) /// matches the expression 'new MyClass[10]'. AST_MATCHER(CXXNewExpr, isArray) { return Node.isArray(); } /// Matches placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage, 16) MyClass(); /// \endcode /// cxxNewExpr(hasPlacementArg(1, integerLiteral(equals(16)))) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P2(CXXNewExpr, hasPlacementArg, unsigned, Index, internal::Matcher<Expr>, InnerMatcher) { return Node.getNumPlacementArgs() > Index && InnerMatcher.matches(*Node.getPlacementArg(Index), Finder, Builder); } /// Matches any placement new expression arguments. /// /// Given: /// \code /// MyClass *p1 = new (Storage) MyClass(); /// \endcode /// cxxNewExpr(hasAnyPlacementArg(anything())) /// matches the expression 'new (Storage, 16) MyClass()'. AST_MATCHER_P(CXXNewExpr, hasAnyPlacementArg, internal::Matcher<Expr>, InnerMatcher) { return llvm::any_of(Node.placement_arguments(), [&](const Expr *Arg) { return InnerMatcher.matches(*Arg, Finder, Builder); }); } /// Matches array new expressions with a given array size. /// /// Given: /// \code /// MyClass *p1 = new MyClass[10]; /// \endcode /// cxxNewExpr(hasArraySize(integerLiteral(equals(10)))) /// matches the expression 'new MyClass[10]'. AST_MATCHER_P(CXXNewExpr, hasArraySize, internal::Matcher<Expr>, InnerMatcher) { return Node.isArray() && *Node.getArraySize() && InnerMatcher.matches(**Node.getArraySize(), Finder, Builder); } /// Matches a class declaration that is defined. /// /// Example matches x (matcher = cxxRecordDecl(hasDefinition())) /// \code /// class x {}; /// class y; /// \endcode AST_MATCHER(CXXRecordDecl, hasDefinition) { return Node.hasDefinition(); } /// Matches C++11 scoped enum declaration. /// /// Example matches Y (matcher = enumDecl(isScoped())) /// \code /// enum X {}; /// enum class Y {}; /// \endcode AST_MATCHER(EnumDecl, isScoped) { return Node.isScoped(); } /// Matches a function declared with a trailing return type. /// /// Example matches Y (matcher = functionDecl(hasTrailingReturn())) /// \code /// int X() {} /// auto Y() -> int {} /// \endcode AST_MATCHER(FunctionDecl, hasTrailingReturn) { if (const auto *F = Node.getType()->getAs<FunctionProtoType>()) return F->hasTrailingReturn(); return false; } /// Matches expressions that match InnerMatcher that are possibly wrapped in an /// elidable constructor and other corresponding bookkeeping nodes. /// /// In C++17, elidable copy constructors are no longer being generated in the /// AST as it is not permitted by the standard. They are, however, part of the /// AST in C++14 and earlier. So, a matcher must abstract over these differences /// to work in all language modes. This matcher skips elidable constructor-call /// AST nodes, `ExprWithCleanups` nodes wrapping elidable constructor-calls and /// various implicit nodes inside the constructor calls, all of which will not /// appear in the C++17 AST. /// /// Given /// /// \code /// struct H {}; /// H G(); /// void f() { /// H D = G(); /// } /// \endcode /// /// ``varDecl(hasInitializer(ignoringElidableConstructorCall(callExpr())))`` /// matches ``H D = G()`` in C++11 through C++17 (and beyond). AST_MATCHER_P(Expr, ignoringElidableConstructorCall, ast_matchers::internal::Matcher<Expr>, InnerMatcher) { // E tracks the node that we are examining. const Expr *E = &Node; // If present, remove an outer `ExprWithCleanups` corresponding to the // underlying `CXXConstructExpr`. This check won't cover all cases of added // `ExprWithCleanups` corresponding to `CXXConstructExpr` nodes (because the // EWC is placed on the outermost node of the expression, which this may not // be), but, it still improves the coverage of this matcher. if (const auto *CleanupsExpr = dyn_cast<ExprWithCleanups>(&Node)) E = CleanupsExpr->getSubExpr(); if (const auto *CtorExpr = dyn_cast<CXXConstructExpr>(E)) { if (CtorExpr->isElidable()) { if (const auto *MaterializeTemp = dyn_cast<MaterializeTemporaryExpr>(CtorExpr->getArg(0))) { return InnerMatcher.matches(*MaterializeTemp->getSubExpr(), Finder, Builder); } } } return InnerMatcher.matches(Node, Finder, Builder); } //----------------------------------------------------------------------------// // OpenMP handling. //----------------------------------------------------------------------------// /// Matches any ``#pragma omp`` executable directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective()`` matches ``omp parallel``, /// ``omp parallel default(none)`` and ``omp taskyield``. extern const internal::VariadicDynCastAllOfMatcher<Stmt, OMPExecutableDirective> ompExecutableDirective; /// Matches standalone OpenMP directives, /// i.e., directives that can't have a structured block. /// /// Given /// /// \code /// #pragma omp parallel /// {} /// #pragma omp taskyield /// \endcode /// /// ``ompExecutableDirective(isStandaloneDirective()))`` matches /// ``omp taskyield``. AST_MATCHER(OMPExecutableDirective, isStandaloneDirective) { return Node.isStandaloneDirective(); } /// Matches the structured-block of the OpenMP executable directive /// /// Prerequisite: the executable directive must not be standalone directive. /// If it is, it will never match. /// /// Given /// /// \code /// #pragma omp parallel /// ; /// #pragma omp parallel /// {} /// \endcode /// /// ``ompExecutableDirective(hasStructuredBlock(nullStmt()))`` will match ``;`` AST_MATCHER_P(OMPExecutableDirective, hasStructuredBlock, internal::Matcher<Stmt>, InnerMatcher) { if (Node.isStandaloneDirective()) return false; // Standalone directives have no structured blocks. return InnerMatcher.matches(*Node.getStructuredBlock(), Finder, Builder); } /// Matches any clause in an OpenMP directive. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// \endcode /// /// ``ompExecutableDirective(hasAnyClause(anything()))`` matches /// ``omp parallel default(none)``. AST_MATCHER_P(OMPExecutableDirective, hasAnyClause, internal::Matcher<OMPClause>, InnerMatcher) { ArrayRef<OMPClause *> Clauses = Node.clauses(); return matchesFirstInPointerRange(InnerMatcher, Clauses.begin(), Clauses.end(), Finder, Builder) != Clauses.end(); } /// Matches OpenMP ``default`` clause. /// /// Given /// /// \code /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// #pragma omp parallel /// \endcode /// /// ``ompDefaultClause()`` matches ``default(none)``, ``default(shared)``, and /// ``default(firstprivate)`` extern const internal::VariadicDynCastAllOfMatcher<OMPClause, OMPDefaultClause> ompDefaultClause; /// Matches if the OpenMP ``default`` clause has ``none`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isNoneKind())`` matches only ``default(none)``. AST_MATCHER(OMPDefaultClause, isNoneKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_none; } /// Matches if the OpenMP ``default`` clause has ``shared`` kind specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isSharedKind())`` matches only ``default(shared)``. AST_MATCHER(OMPDefaultClause, isSharedKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_shared; } /// Matches if the OpenMP ``default`` clause has ``firstprivate`` kind /// specified. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel default(none) /// #pragma omp parallel default(shared) /// #pragma omp parallel default(firstprivate) /// \endcode /// /// ``ompDefaultClause(isFirstPrivateKind())`` matches only /// ``default(firstprivate)``. AST_MATCHER(OMPDefaultClause, isFirstPrivateKind) { return Node.getDefaultKind() == llvm::omp::OMP_DEFAULT_firstprivate; } /// Matches if the OpenMP directive is allowed to contain the specified OpenMP /// clause kind. /// /// Given /// /// \code /// #pragma omp parallel /// #pragma omp parallel for /// #pragma omp for /// \endcode /// /// `ompExecutableDirective(isAllowedToContainClause(OMPC_default))`` matches /// ``omp parallel`` and ``omp parallel for``. /// /// If the matcher is use from clang-query, ``OpenMPClauseKind`` parameter /// should be passed as a quoted string. e.g., /// ``isAllowedToContainClauseKind("OMPC_default").`` AST_MATCHER_P(OMPExecutableDirective, isAllowedToContainClauseKind, OpenMPClauseKind, CKind) { return llvm::omp::isAllowedClauseForDirective( Node.getDirectiveKind(), CKind, Finder->getASTContext().getLangOpts().OpenMP); } //----------------------------------------------------------------------------// // End OpenMP handling. //----------------------------------------------------------------------------// } // namespace ast_matchers } // namespace clang #endif // LLVM_CLANG_ASTMATCHERS_ASTMATCHERS_H
TuLi_Test.h
#pragma once namespace tinyDNN { void tuli_Conv_1() { LoadTuLi::load_Tuli(); LoadTuLi::load_Tuli_T(); std::shared_ptr<Inter_LayerQL<double>> in_01 = std::make_shared<Inter_LayerQL<double>>(128, 128); std::shared_ptr<LayerQL<double>> pool_01 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 64, 64); //�ػ��� std::shared_ptr<Inter_LayerQL<double>> o_01 = in_01 + pool_01; // ���� �������� ���� ���� �����˿��� �����˼�Ƭ ������� std::shared_ptr<LayerQL<double>> conv_01 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 32, 64, 64, 7, 1, 3); //������ std::shared_ptr<Inter_LayerQL<double>> o_02 = o_01 + conv_01; std::shared_ptr<LayerQL<double>> rule_01 = std::make_shared<Relu_LayerQL<double>>(Relu_Conv_Layer); //Relu�� std::shared_ptr<Inter_LayerQL<double>> o_03 = o_02 + rule_01; std::shared_ptr<LayerQL<double>> pool_02 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 32, 32); //�ػ��� std::shared_ptr<Inter_LayerQL<double>> o_04 = o_03 + pool_02; // ���� �������� ���� ���� �����˿��� �����˼�Ƭ ������� std::shared_ptr<LayerQL<double>> conv_02 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 32, 32, 32, 5, 32, 2); //������ std::shared_ptr<Inter_LayerQL<double>> o_05 = o_04 + conv_02; std::shared_ptr<LayerQL<double>> rule_02 = std::make_shared<Relu_LayerQL<double>>(Relu_Conv_Layer); //Relu�� std::shared_ptr<Inter_LayerQL<double>> o_06 = o_05 + rule_02; std::shared_ptr<LayerQL<double>> pool_03 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 16, 16); //�ػ��� std::shared_ptr<Inter_LayerQL<double>> o_07 = o_06 + pool_03; // ���� �������� ���� ���� �����˿��� �����˼�Ƭ ������� std::shared_ptr<LayerQL<double>> conv_03 = std::make_shared<Conv_LayerQL<double>>(Conv_Layer, 16, 16, 16, 3, 32, 1); //������ std::shared_ptr<Inter_LayerQL<double>> o_08 = o_07 + conv_03; std::shared_ptr<LayerQL<double>> rule_03 = std::make_shared<Relu_LayerQL<double>>(Relu_Conv_Layer); //Relu�� std::shared_ptr<Inter_LayerQL<double>> o_09 = o_08 + rule_03; std::shared_ptr<LayerQL<double>> pool_04 = std::make_shared<PooLayerQL<double>>(Pool_Layer, 8, 8); //�ػ��� std::shared_ptr<Inter_LayerQL<double>> o_10 = o_09 + pool_04; std::shared_ptr<LayerQL<double>> dim_reduce_01 = std::make_shared<Dim_ReduceQL<double>>(Dim_Reduce_Layer, 16, 8, 8); //��ά�� std::shared_ptr<Inter_LayerQL<double>> o_11 = o_10 + dim_reduce_01; std::shared_ptr<LayerQL<double>> fullconnect_01 = std::make_shared<Fullconnect_LayerQL<double>>(Fullconnect_Layer, 16 * 8 * 8, 30); //ȫ���Ӳ� std::shared_ptr<Inter_LayerQL<double>> o_12 = o_11 + fullconnect_01; std::shared_ptr<LayerQL<double>> rule_04 = std::make_shared<Relu_LayerQL<double>>(Relu_Layer); //Relu�� std::shared_ptr<Inter_LayerQL<double>> o_13 = o_12 + rule_04; std::shared_ptr<LayerQL<double>> fullconnect_02 = std::make_shared<Fullconnect_LayerQL<double>>(Fullconnect_Layer, 30, 3); //ȫ���Ӳ� std::shared_ptr<Inter_LayerQL<double>> o_14 = o_13 + fullconnect_02; std::shared_ptr<LayerQL<double>> rule_05 = std::make_shared<Relu_LayerQL<double>>(Relu_Layer); //Relu�� std::shared_ptr<Inter_LayerQL<double>> o_15 = o_14 + rule_05; std::shared_ptr<LayerQL<double>> lossLayer_01 = std::make_shared<SoftMax_LayerQL<double>>(SoftMax_Layer); //Loss�� std::shared_ptr<Inter_LayerQL<double>> o_16 = o_15 + lossLayer_01; rule_01->pRelu_k = 0.12; rule_02->pRelu_k = 0.12; rule_03->pRelu_k = 0.12; rule_04->pRelu_k = 0.12; rule_05->pRelu_k = 0.12; conv_01->upConv = 0.005; conv_02->upConv = 0.005; conv_03->upConv = 0.005; fullconnect_01->upFull = 0.005; fullconnect_02->upFull = 0.005; for (int i = 0; i < 50; i++) { std::cout << i << std::endl; for (int j = 1; j < 24; j++) { in_01->forward_Matrix_Vector.clear(); in_01->forward_Matrix_Vector.push_back(LoadTuLi::tuli_Train[j - 1]); switch (j) { case 1: case 4: case 7: case 10: case 13: case 16: case 19: case 22: o_16->backward_Matrix->setMatrixQL().resize(1, 3); o_16->backward_Matrix->setMatrixQL().setZero(); o_16->backward_Matrix->setMatrixQL()(0, 0) = 1; break; case 2: case 5: case 8: case 11: case 14: case 17: case 20: case 23: o_16->backward_Matrix->setMatrixQL().resize(1, 3); o_16->backward_Matrix->setMatrixQL().setZero(); o_16->backward_Matrix->setMatrixQL()(0, 1) = 1; break; case 3: case 6: case 9: case 12: case 15: case 18: case 21: o_16->backward_Matrix->setMatrixQL().resize(1, 3); o_16->backward_Matrix->setMatrixQL().setZero(); o_16->backward_Matrix->setMatrixQL()(0, 2) = 1; break; default: break; } for (auto k = NetQL<double>::layerQLVector.begin(); k != NetQL<double>::layerQLVector.end(); k++) { (*k)->calForward(); } //��ͷ��ʼ���򴫲� + Ȩ�ظ��� //#pragma omp parallel for (auto k = NetQL<double>::layerQLVector.rbegin(); k != NetQL<double>::layerQLVector.rend(); k++) { (*k)->calBackward(); (*k)->upMatrix(); } if (i > 48) { std::cout << "i::" << o_16->forward_Matrix->getMatrixQL() << std::endl; } } } for (int j = 1; j < 10; j++) { in_01->forward_Matrix_Vector.clear(); in_01->forward_Matrix_Vector.push_back(LoadTuLi::tuli_Test[j - 1]); for (auto k = NetQL<double>::layerQLVector.begin(); k != NetQL<double>::layerQLVector.end(); k++) { (*k)->calForward(); } std::cout << "j::" << o_16->forward_Matrix->getMatrixQL() << std::endl; } } }
3d7pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 7 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 8; tile_size[3] = 64; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; const double alpha = 0.0876; const double beta = 0.0765; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 2) && (Nx >= 3) && (Ny >= 3) && (Nz >= 3)) { for (t1=-1;t1<=floord(Nt-2,4);t1++) { lbp=max(ceild(t1,2),ceild(8*t1-Nt+3,8)); ubp=min(floord(Nt+Nz-4,8),floord(4*t1+Nz+1,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(max(0,ceild(t1-1,2)),ceild(8*t2-Nz-4,8));t3<=min(min(min(floord(Nt+Ny-4,8),floord(4*t1+Ny+5,8)),floord(8*t2+Ny+4,8)),floord(8*t1-8*t2+Nz+Ny+3,8));t3++) { for (t4=max(max(max(0,ceild(t1-15,16)),ceild(8*t2-Nz-60,64)),ceild(8*t3-Ny-60,64));t4<=min(min(min(min(floord(Nt+Nx-4,64),floord(4*t1+Nx+5,64)),floord(8*t2+Nx+4,64)),floord(8*t3+Nx+4,64)),floord(8*t1-8*t2+Nz+Nx+3,64));t4++) { for (t5=max(max(max(max(max(0,4*t1),8*t1-8*t2+1),8*t2-Nz+2),8*t3-Ny+2),64*t4-Nx+2);t5<=min(min(min(min(min(Nt-2,4*t1+7),8*t2+6),8*t3+6),64*t4+62),8*t1-8*t2+Nz+5);t5++) { for (t6=max(max(8*t2,t5+1),-8*t1+8*t2+2*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+2*t5),t5+Nz-2);t6++) { for (t7=max(8*t3,t5+1);t7<=min(8*t3+7,t5+Ny-2);t7++) { lbv=max(64*t4,t5+1); ubv=min(64*t4+63,t5+Nx-2); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)] = ((alpha * A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8)]) + (beta * (((((A[ t5 % 2][ (-t5+t6) - 1][ (-t5+t7)][ (-t5+t8)] + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) - 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) - 1]) + A[ t5 % 2][ (-t5+t6) + 1][ (-t5+t7)][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7) + 1][ (-t5+t8)]) + A[ t5 % 2][ (-t5+t6)][ (-t5+t7)][ (-t5+t8) + 1])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays (Causing performance degradation /* for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); */ return 0; }
GB_binop__ne_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__ne_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__ne_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__ne_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__ne_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__ne_fp32) // A*D function (colscale): GB (_AxD__ne_fp32) // D*A function (rowscale): GB (_DxB__ne_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__ne_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__ne_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__ne_fp32) // C=scalar+B GB (_bind1st__ne_fp32) // C=scalar+B' GB (_bind1st_tran__ne_fp32) // C=A+scalar GB (_bind2nd__ne_fp32) // C=A'+scalar GB (_bind2nd_tran__ne_fp32) // C type: bool // A type: float // B,b type: float // BinaryOp: cij = (aij != bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x != y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_NE || GxB_NO_FP32 || GxB_NO_NE_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__ne_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__ne_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__ne_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__ne_fp32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__ne_fp32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__ne_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__ne_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__ne_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__ne_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__ne_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__ne_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x != bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__ne_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij != y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x != aij) ; \ } GrB_Info GB (_bind1st_tran__ne_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij != y) ; \ } GrB_Info GB (_bind2nd_tran__ne_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_int64_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int64_int64) // op(A') function: GB (_unop_tran__identity_int64_int64) // C type: int64_t // A type: int64_t // cast: int64_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ int64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ int64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int64_t z = aij ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 1 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int64_int64) ( int64_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { int64_t aij = Ax [p] ; int64_t z = aij ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; int64_t aij = Ax [p] ; int64_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int64_int64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
nodal_residualbased_elimination_builder_and_solver_for_FSI.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi, Alessandro Franci // // #if !defined(KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI) #define KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI /* System includes */ #include <set> #ifdef _OPENMP #include <omp.h> #endif /* External includes */ // #define USE_GOOGLE_HASH #ifdef USE_GOOGLE_HASH #include "sparsehash/dense_hash_set" //included in external libraries #else #include <unordered_set> #endif /* Project includes */ #include "utilities/timer.h" #include "includes/define.h" #include "includes/key_hash.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "includes/model_part.h" #include "pfem_fluid_dynamics_application_variables.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class NodalResidualBasedEliminationBuilderAndSolverForFSI * @ingroup KratosCore * @brief Current class provides an implementation for standard builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual) * Degrees of freedom are reordered putting the restrained degrees of freedom at * the end of the system ordered in reverse order with respect to the DofSet. * Imposition of the dirichlet conditions is naturally dealt with as the residual already contains * this information. * Calculation of the reactions involves a cost very similiar to the calculation of the total residual * @author Riccardo Rossi */ template <class TSparseSpace, class TDenseSpace, //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class NodalResidualBasedEliminationBuilderAndSolverForFSI : public BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedEliminationBuilderAndSolverForFSI); typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef Node<3> NodeType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; typedef Vector VectorType; typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType; ///@} ///@name Life Cycle ///@{ /** Constructor. */ NodalResidualBasedEliminationBuilderAndSolverForFSI( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSystemSolver) { // KRATOS_INFO("NodalResidualBasedEliminationBuilderAndSolverForFSI") << "Using the standard builder and solver " << std::endl; } /** Destructor. */ ~NodalResidualBasedEliminationBuilderAndSolverForFSI() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void SetMaterialPropertiesToFluid( ModelPart::NodeIterator itNode, double &density, double &deviatoricCoeff, double &volumetricCoeff, double timeInterval, double nodalVolume) { density = itNode->FastGetSolutionStepValue(DENSITY); deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); if (volumetricCoeff > 0) { volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); double bulkReduction = density * nodalVolume / (timeInterval * volumetricCoeff); volumetricCoeff *= bulkReduction; } } void SetMaterialPropertiesToSolid( ModelPart::NodeIterator itNode, double &density, double &deviatoricCoeff, double &volumetricCoeff, double timeInterval, double nodalVolume) { density = itNode->FastGetSolutionStepValue(SOLID_DENSITY); double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS); double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO); //deviatoricCoeff=deltaT*secondLame deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5; //volumetricCoeff=bulk*deltaT=deltaT*(firstLame+2*secondLame/3) volumetricCoeff = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio)) + 2.0 * deviatoricCoeff / 3.0; } void BuildSolidNodally( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &b, double hybridCoeff) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; //contributions to the system LocalSystemMatrixType solidLHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType solidRHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different terms Element::EquationIdVectorType solidEquationId; ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; const double FourThirds = 4.0 / 3.0; const double nTwoThirds = -2.0 / 3.0; //double theta = 0.5; double theta = 1.0; array_1d<double, 3> Acc(3, 0.0); double dNdXi = 0; double dNdYi = 0; double dNdZi = 0; double dNdXj = 0; double dNdYj = 0; double dNdZj = 0; unsigned int firstRow = 0; unsigned int firstCol = 0; double density = 0; double deviatoricCoeff = 0; double volumetricCoeff = 0; double dynamics = 1.0; //dynamics=0.0; // static problem without intertial effects /* #pragma omp parallel */ // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); double numNodesForExternalForce = 0; double nodalExternalForce = 0; bool belytsckoCase = false; bool cooksMembraneCase = false; if (cooksMembraneCase == true) { for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double posX = itNode->X0(); if (posX > 47.999 && posX < 48.001) { numNodesForExternalForce += 1.0; } } if (numNodesForExternalForce > 0) { nodalExternalForce = 1.0 / numNodesForExternalForce; } } if (belytsckoCase == true) { for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double posX = itNode->X0(); if (posX > 24.999 && posX < 25.001) { numNodesForExternalForce += 1.0; } } if (numNodesForExternalForce > 0) { nodalExternalForce = 40.0 / numNodesForExternalForce; } } for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { if (itNode->Is(SOLID)) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); // const unsigned int neighSize = neighb_nodes.size()+1; const unsigned int neighSize = solidNodalSFDneighboursId.size(); const double nodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); if (neighSize > 1 && nodalVolume > 0) { const unsigned int localSize = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS).size(); if (solidLHS_Contribution.size1() != localSize) solidLHS_Contribution.resize(localSize, localSize, false); //false says not to preserve existing storage!! if (solidRHS_Contribution.size() != localSize) solidRHS_Contribution.resize(localSize, false); //false says not to preserve existing storage!! if (solidEquationId.size() != localSize) solidEquationId.resize(localSize, false); noalias(solidLHS_Contribution) = ZeroMatrix(localSize, localSize); noalias(solidRHS_Contribution) = ZeroVector(localSize); this->SetMaterialPropertiesToSolid(itNode, density, deviatoricCoeff, volumetricCoeff, timeInterval, nodalVolume); firstRow = 0; firstCol = 0; if (dimension == 2) { //////////////////////////// LHS TERMS ////////////////////////////// solidLHS_Contribution(0, 0) += nodalVolume * density * 2.0 * dynamics / timeInterval; solidLHS_Contribution(1, 1) += nodalVolume * density * 2.0 * dynamics / timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 0); solidRHS_Contribution[0] += -nodalVolume * density * Acc[0] * dynamics; solidRHS_Contribution[1] += -nodalVolume * density * Acc[1] * dynamics; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); // double posX= itNode->X(); // double posY= itNode->Y(); // double coeffX =(12.0-24.0*posY)*pow(posX,4); // coeffX += (-24.0+48.0*posY)*pow(posX,3); // coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2); // coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX; // coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3); // double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3); // coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2); // coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX; // coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4); // RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]*coeffX; // RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]*coeffY; solidRHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0]; solidRHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1]; ///////////////LOAD CONDITIONS FOR BELYTSCHKO CASE // if(itNode->X0()>24.999){ // solidRHS_Contribution[1]+=40.0/2.0; // mesh 4 (1 element per edge) //solidRHS_Contribution[1]+=40.0/3.0; // mesh 2 (2 element per edge) //solidRHS_Contribution[1]+=40.0/5.0; // mesh 1 (4 element per edge) //solidRHS_Contribution[1]+=40.0/9.0; // mesh 0.5 (8 element per edge) // solidRHS_Contribution[1]+=40.0/17.0; // mesh 0.25 (16 element per edge) // solidRHS_Contribution[1]+=40.0/33.0; // mesh 0.125 (32 element per edge) //solidRHS_Contribution[1]+=40.0/65.0; // mesh 0.0625 (64 element per edge) //} if (belytsckoCase == true) { if (itNode->X0() > 24.999 && itNode->X0() < 25.001) { solidRHS_Contribution[1] += nodalExternalForce; } } if (cooksMembraneCase == true) { if (itNode->X0() > 47.999 && itNode->X0() < 48.001) { solidRHS_Contribution[1] += nodalExternalForce; } } //-------- INTERNAL FORCES TERM -------// array_1d<double, 3> Sigma(3, 0.0); Sigma = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); solidEquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); for (unsigned int i = 0; i < neighSize; i++) { dNdXi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol + 1]; solidRHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[2]) * hybridCoeff; solidRHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[2]) * hybridCoeff; for (unsigned int j = 0; j < neighSize; j++) { dNdXj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow + 1]; solidLHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + dNdYj * dNdYi * deviatoricCoeff) * theta * hybridCoeff; solidLHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta * hybridCoeff; solidLHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta * hybridCoeff; solidLHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + dNdXj * dNdXi * deviatoricCoeff) * theta * hybridCoeff; firstRow += 2; } firstRow = 0; firstCol += 2; unsigned int indexNode = i + 1; if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true && indexNode < neighSize) { unsigned int other_neigh_nodes_id = solidNodalSFDneighboursId[indexNode]; // std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); // std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl; if (neigh_nodes_id == other_neigh_nodes_id) { solidEquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); break; } } } else if (i < neighb_nodes.size()) { solidEquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); } } /* std::cout << "LHS_Contribution = " << LHS_Contribution << std::endl; */ } else if (dimension == 3) { //////////////////////////// LHS TERMS ////////////////////////////// solidLHS_Contribution(0, 0) += nodalVolume * density * 2.0 / timeInterval; solidLHS_Contribution(1, 1) += nodalVolume * density * 2.0 / timeInterval; solidLHS_Contribution(2, 2) += nodalVolume * density * 2.0 / timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 0); solidRHS_Contribution[0] += -nodalVolume * density * Acc[0]; solidRHS_Contribution[1] += -nodalVolume * density * Acc[1]; solidRHS_Contribution[2] += -nodalVolume * density * Acc[2]; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); solidRHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0]; solidRHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1]; solidRHS_Contribution[2] += nodalVolume * density * VolumeAcceleration[2]; ///////////////LOAD CONDITIONS FOR BELITSCHKO CASE // if(itNode->X0()>24.999){ // // solidRHS_Contribution[1]+=40.0/2.0; // mesh 4 (1 element per edge) // // solidRHS_Contribution[1]+=40.0/3.0; // mesh 2 (2 element per edge) // // solidRHS_Contribution[1]+=40.0/5.0; // mesh 1 (4 element per edge) // solidRHS_Contribution[1]+=40.0/27.0; // mesh 0.5 (8 element per edge, 2 per width) // // solidRHS_Contribution[1]+=40.0/17.0; // mesh 0.25 (16 element per edge) // // solidRHS_Contribution[1]+=40.0/33.0; // mesh 0.125 (32 element per edge) // // solidRHS_Contribution[1]+=40.0/65.0; // mesh 0.0625 (64 element per edge) // } //-------- INTERNAL FORCES TERM -------// array_1d<double, 6> Sigma(6, 0.0); Sigma = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // } const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); solidEquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); solidEquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); for (unsigned int i = 0; i < neighSize; i++) { dNdXi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol + 1]; dNdZi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol + 2]; solidRHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[3] + dNdZi * Sigma[4]); solidRHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[3] + dNdZi * Sigma[5]); solidRHS_Contribution[firstCol + 2] += -nodalVolume * (dNdZi * Sigma[2] + dNdXi * Sigma[4] + dNdYi * Sigma[5]); for (unsigned int j = 0; j < neighSize; j++) { dNdXj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow + 1]; dNdZj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow + 2]; solidLHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + (dNdYj * dNdYi + dNdZj * dNdZi) * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdZi + dNdZj * dNdXi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + (dNdXj * dNdXi + dNdZj * dNdZi) * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 1, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdZi + dNdZj * dNdYi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 2, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdXi + dNdXj * dNdZi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 2, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdYi + dNdYj * dNdZi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 2, firstCol + 2) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdZi + (dNdXj * dNdXi + dNdYj * dNdYi) * deviatoricCoeff) * theta; firstRow += 3; } firstRow = 0; firstCol += 3; unsigned int indexNode = i + 1; if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true && indexNode < neighSize) { unsigned int other_neigh_nodes_id = solidNodalSFDneighboursId[indexNode]; // std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); // std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl; if (neigh_nodes_id == other_neigh_nodes_id) { solidEquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); solidEquationId[firstCol + 2] = neighb_nodes[k].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); break; } } } else if (i < neighb_nodes.size()) { solidEquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); solidEquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } } } #ifdef _OPENMP Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId, mlock_array); #else Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId); #endif } } } // } KRATOS_CATCH("") } void BuildFluidNodally( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &b) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; /* std::cout<<"Building LHS and RHS of Momentum Equation Nodally"<<std::endl; */ //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different terms Element::EquationIdVectorType EquationId; ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; const double FourThirds = 4.0 / 3.0; const double nTwoThirds = -2.0 / 3.0; double theta = 0.5; array_1d<double, 3> Acc(3, 0.0); // array_1d<double,6> Sigma(6,0.0); double pressure = 0; double dNdXi = 0; double dNdYi = 0; double dNdZi = 0; double dNdXj = 0; double dNdYj = 0; double dNdZj = 0; unsigned int firstRow = 0; unsigned int firstCol = 0; double density = 0; double deviatoricCoeff = 0; double volumetricCoeff = 0; /* #pragma omp parallel */ // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { if ((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); // const unsigned int neighSize = neighb_nodes.size()+1; const unsigned int neighSize = nodalSFDneighboursId.size(); const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); if (neighSize > 1 && nodalVolume > 0) { const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size(); if (LHS_Contribution.size1() != localSize) LHS_Contribution.resize(localSize, localSize, false); //false says not to preserve existing storage!! if (RHS_Contribution.size() != localSize) RHS_Contribution.resize(localSize, false); //false says not to preserve existing storage!! if (EquationId.size() != localSize) EquationId.resize(localSize, false); noalias(LHS_Contribution) = ZeroMatrix(localSize, localSize); noalias(RHS_Contribution) = ZeroVector(localSize); this->SetMaterialPropertiesToFluid(itNode, density, deviatoricCoeff, volumetricCoeff, timeInterval, nodalVolume); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // // std::cout<<"density,deviatoricCoeff,volumetricCoeff "<<density<<" "<<deviatoricCoeff<<" "<<volumetricCoeff<<std::endl; // std::cout<<"INTERFACE nodalVolume "<<nodalVolume<<std::endl; // }else{ // std::cout<<"nodalVolume "<<nodalVolume<<std::endl; // } firstRow = 0; firstCol = 0; if (dimension == 2) { //////////////////////////// LHS TERMS ////////////////////////////// LHS_Contribution(0, 0) += nodalVolume * density * 2.0 / timeInterval; LHS_Contribution(1, 1) += nodalVolume * density * 2.0 / timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 0); RHS_Contribution[0] += -nodalVolume * density * Acc[0]; RHS_Contribution[1] += -nodalVolume * density * Acc[1]; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); // double posX= itNode->X(); // double posY= itNode->Y(); // double coeffX =(12.0-24.0*posY)*pow(posX,4); // coeffX += (-24.0+48.0*posY)*pow(posX,3); // coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2); // coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX; // coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3); // double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3); // coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2); // coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX; // coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4); // RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]*coeffX; // RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]*coeffY; RHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0]; RHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1]; //-------- INTERNAL FORCES TERM -------// array_1d<double, 3> Sigma(3, 0.0); Sigma = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // } if (itNode->IsNot(SOLID) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { pressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) * theta + itNode->FastGetSolutionStepValue(PRESSURE, 1) * (1 - theta); Sigma[0] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure; Sigma[1] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure; } const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); for (unsigned int i = 0; i < neighSize; i++) { dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1]; RHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[2]); RHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[2]); for (unsigned int j = 0; j < neighSize; j++) { dNdXj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 1]; LHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + dNdYj * dNdYi * deviatoricCoeff) * theta; LHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + dNdXj * dNdXi * deviatoricCoeff) * theta; firstRow += 2; } firstRow = 0; firstCol += 2; unsigned int indexNode = i + 1; if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true && indexNode < neighSize) { unsigned int other_neigh_nodes_id = nodalSFDneighboursId[indexNode]; // std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); // std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl; if (neigh_nodes_id == other_neigh_nodes_id) { EquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); break; } } } else if (i < neighb_nodes.size()) { EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); } } /* std::cout << "LHS_Contribution = " << LHS_Contribution << std::endl; */ } else if (dimension == 3) { //////////////////////////// LHS TERMS ////////////////////////////// LHS_Contribution(0, 0) += nodalVolume * density * 2.0 / timeInterval; LHS_Contribution(1, 1) += nodalVolume * density * 2.0 / timeInterval; LHS_Contribution(2, 2) += nodalVolume * density * 2.0 / timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 0); RHS_Contribution[0] += -nodalVolume * density * Acc[0]; RHS_Contribution[1] += -nodalVolume * density * Acc[1]; RHS_Contribution[2] += -nodalVolume * density * Acc[2]; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); RHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0]; RHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1]; RHS_Contribution[2] += nodalVolume * density * VolumeAcceleration[2]; //-------- INTERNAL FORCES TERM -------// array_1d<double, 6> Sigma(6, 0.0); Sigma = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // } if (itNode->IsNot(SOLID) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { pressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) * theta + itNode->FastGetSolutionStepValue(PRESSURE, 1) * (1 - theta); Sigma[0] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure; Sigma[1] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure; Sigma[2] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2] + pressure; } const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); EquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); for (unsigned int i = 0; i < neighSize; i++) { dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1]; dNdZi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 2]; RHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[3] + dNdZi * Sigma[4]); RHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[3] + dNdZi * Sigma[5]); RHS_Contribution[firstCol + 2] += -nodalVolume * (dNdZi * Sigma[2] + dNdXi * Sigma[4] + dNdYi * Sigma[5]); for (unsigned int j = 0; j < neighSize; j++) { dNdXj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 1]; dNdZj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 2]; LHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + (dNdYj * dNdYi + dNdZj * dNdZi) * deviatoricCoeff) * theta; LHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta; LHS_Contribution(firstRow, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdZi + dNdZj * dNdXi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + (dNdXj * dNdXi + dNdZj * dNdZi) * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdZi + dNdZj * dNdYi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 2, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdXi + dNdXj * dNdZi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 2, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdYi + dNdYj * dNdZi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 2, firstCol + 2) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdZi + (dNdXj * dNdXi + dNdYj * dNdYi) * deviatoricCoeff) * theta; firstRow += 3; } firstRow = 0; firstCol += 3; unsigned int indexNode = i + 1; if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true && indexNode < neighSize) { unsigned int other_neigh_nodes_id = nodalSFDneighboursId[indexNode]; // std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); // std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl; if (neigh_nodes_id == other_neigh_nodes_id) { EquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); EquationId[firstCol + 2] = neighb_nodes[k].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); break; } } } else if (i < neighb_nodes.size()) { EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); EquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } } } #ifdef _OPENMP Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif } } } // } KRATOS_CATCH("") } /** * @brief This is a call to the linear system solver * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void SystemSolve( TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else TSparseSpace::SetToZero(Dx); // Prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** *@brief This is a call to the linear system solver (taking into account some physical particularities of the problem) * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector * @param rModelPart The model part of the problem to solve */ void SystemSolveWithPhysics( TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b, ModelPart &rModelPart) { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //provide physical data as needed if (BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded()) BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart); //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else { TSparseSpace::SetToZero(Dx); KRATOS_WARNING_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl; } // Prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the building and solving phase at the same time. * @details It is ideally the fastest and safer function to use when it is possible to solve * just after building * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { KRATOS_TRY Timer::Start("Build"); // boost::timer m_build_time; double hybridCoeff = 1.0; // 0.5: half nodal - half elemental; 1.0 all nodal; 0.0 all elemental BuildSolidNodally(pScheme, rModelPart, A, b, hybridCoeff); if (hybridCoeff < 0.99999999) { BuildElementally(pScheme, rModelPart, A, b); } BuildFluidNodally(pScheme, rModelPart, A, b); // std::cout << "MOMENTUM EQ: build_time : " << m_build_time.elapsed() << std::endl; Timer::Stop("Build"); // ApplyPointLoads(pScheme,rModelPart,b); // Does nothing...dirichlet conditions are naturally dealt with in defining the residual ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b); KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; // const double start_solve = OpenMPUtils::GetCurrentTime(); // Timer::Start("Solve"); /* boost::timer m_solve_time; */ SystemSolveWithPhysics(A, Dx, b, rModelPart); /* std::cout << "MOMENTUM EQ: solve_time : " << m_solve_time.elapsed() << std::endl; */ // Timer::Stop("Solve"); // const double stop_solve = OpenMPUtils::GetCurrentTime(); // KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl; KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; KRATOS_CATCH("") } void BuildElementally( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &rA, TSystemVectorType &rb) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; //getting the elements from the model const int nelements = static_cast<int>(rModelPart.Elements().size()); //getting the array of the conditions const int nconditions = static_cast<int>(rModelPart.Conditions().size()); const ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin(); ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; // const double start_build = OpenMPUtils::GetCurrentTime(); // assemble all elements #pragma omp parallel firstprivate(nelements, nconditions, LHS_Contribution, RHS_Contribution, EquationId) { #pragma omp for schedule(guided, 512) nowait for (int k = 0; k < nelements; k++) { ModelPart::ElementsContainerType::iterator it = el_begin + k; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool element_is_active = true; if ((it)->IsDefined(ACTIVE)) element_is_active = (it)->Is(ACTIVE); if (element_is_active) { //calculate elemental contribution pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY AssembleElementally(rA, rb, LHS_Contribution, RHS_Contribution, EquationId, mLockArray); #else AssembleElementally(rA, rb, LHS_Contribution, RHS_Contribution, EquationId); #endif } } #pragma omp for schedule(guided, 512) for (int k = 0; k < nconditions; k++) { ModelPart::ConditionsContainerType::iterator it = cond_begin + k; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool condition_is_active = true; if ((it)->IsDefined(ACTIVE)) condition_is_active = (it)->Is(ACTIVE); if (condition_is_active) { //calculate elemental contribution pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); #ifdef USE_LOCKS_IN_ASSEMBLY AssembleElementally(rA, rb, LHS_Contribution, RHS_Contribution, EquationId, mLockArray); #else AssembleElementally(rA, rb, LHS_Contribution, RHS_Contribution, EquationId); #endif } } } // const double stop_build = OpenMPUtils::GetCurrentTime(); // KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System build time: " << stop_build - start_build << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished building" << std::endl; KRATOS_CATCH("") } void AssembleElementally( TSystemMatrixType &rA, TSystemVectorType &rb, const LocalSystemMatrixType &rLHSContribution, const LocalSystemVectorType &rRHSContribution, const Element::EquationIdVectorType &rEquationId #ifdef USE_LOCKS_IN_ASSEMBLY , std::vector<omp_lock_t> &rLockArray #endif ) { unsigned int local_size = rLHSContribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { #ifdef USE_LOCKS_IN_ASSEMBLY omp_set_lock(&rLockArray[i_global]); b[i_global] += rRHSContribution(i_local); #else double &r_a = rb[i_global]; const double &v_a = rRHSContribution(i_local); #pragma omp atomic r_a += v_a; #endif AssembleRowContributionFreeDofs(rA, rLHSContribution, i_global, i_local, rEquationId); #ifdef USE_LOCKS_IN_ASSEMBLY omp_unset_lock(&rLockArray[i_global]); #endif } //note that computation of reactions is not performed here! } } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element * and condition its Dofs. * @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the * way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart) override { KRATOS_TRY; KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl; //Gets the array of elements from the modeler ElementsArrayType &pElements = rModelPart.Elements(); const int nelements = static_cast<int>(pElements.size()); Element::DofsVectorType ElementalDofList; ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); unsigned int nthreads = ParallelUtilities::GetNumThreads(); // typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type; // typedef std::unordered_set < NodeType::DofType::Pointer, // DofPointerHasher, // DofPointerComparor, // allocator_type > set_type; #ifdef USE_GOOGLE_HASH typedef google::dense_hash_set<NodeType::DofType::Pointer, DofPointerHasher> set_type; #else typedef std::unordered_set<NodeType::DofType::Pointer, DofPointerHasher> set_type; #endif // std::vector<set_type> dofs_aux_list(nthreads); // std::vector<allocator_type> allocators(nthreads); for (int i = 0; i < static_cast<int>(nthreads); i++) { #ifdef USE_GOOGLE_HASH dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer()); #else // dofs_aux_list[i] = set_type( allocators[i]); dofs_aux_list[i].reserve(nelements); #endif } // #pragma omp parallel for firstprivate(nelements, ElementalDofList) for (int i = 0; i < static_cast<int>(nelements); ++i) { auto it_elem = pElements.begin() + i; const IndexType this_thread_id = OpenMPUtils::ThisThread(); // Gets list of Dof involved on every element pScheme->GetDofList(*it_elem, ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } ConditionsArrayType &pConditions = rModelPart.Conditions(); const int nconditions = static_cast<int>(pConditions.size()); #pragma omp parallel for firstprivate(nconditions, ElementalDofList) for (int i = 0; i < nconditions; ++i) { auto it_cond = pConditions.begin() + i; const IndexType this_thread_id = OpenMPUtils::ThisThread(); // Gets list of Dof involved on every element pScheme->GetDofList(*it_cond, ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } //here we do a reduction in a tree so to have everything on thread 0 unsigned int old_max = nthreads; unsigned int new_max = ceil(0.5 * static_cast<double>(old_max)); while (new_max >= 1 && new_max != old_max) { // //just for debugging // std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl; // for (int i = 0; i < new_max; i++) // { // if (i + new_max < old_max) // { // std::cout << i << " - " << i + new_max << std::endl; // } // } // std::cout << "********************" << std::endl; #pragma omp parallel for for (int i = 0; i < static_cast<int>(new_max); i++) { if (i + new_max < old_max) { dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end()); dofs_aux_list[i + new_max].clear(); } } old_max = new_max; new_max = ceil(0.5 * static_cast<double>(old_max)); } DofsArrayType Doftemp; BaseType::mDofSet = DofsArrayType(); Doftemp.reserve(dofs_aux_list[0].size()); for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++) { Doftemp.push_back(*it); } Doftemp.Sort(); BaseType::mDofSet = Doftemp; // Throws an execption if there are no Degrees of freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; BaseType::mDofSetIsInitialized = true; KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl; #ifdef _OPENMP if (mlock_array.size() != 0) { for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) omp_destroy_lock(&mlock_array[i]); } mlock_array.resize(BaseType::mDofSet.size()); for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) omp_init_lock(&mlock_array[i]); #endif // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if (BaseType::GetCalculateReactionsFlag()) { for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl << "Node : " << dof_iterator->Id() << std::endl << "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl; } } #endif KRATOS_CATCH(""); } /** * @brief Organises the dofset in order to speed up the building phase * @param rModelPart The model part of the problem to solve */ void SetUpSystem( ModelPart &rModelPart) override { // Set equation id for degrees of freedom // the free degrees of freedom are positioned at the beginning of the system, // while the fixed one are at the end (in opposite order). // // that means that if the EquationId is greater than "mEquationSystemSize" // the pointed degree of freedom is restrained // int free_id = 0; int fix_id = BaseType::mDofSet.size(); for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) if (dof_iterator->IsFixed()) dof_iterator->SetEquationId(--fix_id); else dof_iterator->SetEquationId(free_id++); BaseType::mEquationSystemSize = fix_id; } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType &pA, TSystemVectorPointerType &pDx, TSystemVectorPointerType &pb, ModelPart &rModelPart) override { KRATOS_TRY // boost::timer m_contruct_matrix; if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); pA.swap(pNewA); } if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0)); pDx.swap(pNewDx); } if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0)); pb.swap(pNewb); } if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0)); BaseType::mpReactionsVector.swap(pNewReactionsVector); } TSystemMatrixType &A = *pA; TSystemVectorType &Dx = *pDx; TSystemVectorType &b = *pb; //resizing the system vectors and matrix if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ConstructMatrixStructureForFSI(pScheme, A, rModelPart); } else { if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize) { KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW"); KRATOS_ERROR << "The equation system size has changed during the simulation. This is not permited." << std::endl; A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true); ConstructMatrixStructureForFSI(pScheme, A, rModelPart); } } if (Dx.size() != BaseType::mEquationSystemSize) Dx.resize(BaseType::mEquationSystemSize, false); if (b.size() != BaseType::mEquationSystemSize) b.resize(BaseType::mEquationSystemSize, false); //if needed resize the vector for the calculation of reactions if (BaseType::mCalculateReactionsFlag == true) { unsigned int ReactionsVectorSize = BaseType::mDofSet.size(); if (BaseType::mpReactionsVector->size() != ReactionsVectorSize) BaseType::mpReactionsVector->resize(ReactionsVectorSize, false); } // std::cout << "MOMENTUM EQ: contruct_matrix : " << m_contruct_matrix.elapsed() << std::endl; KRATOS_CATCH("") } inline void AssembleRowContributionFreeDofs( TSystemMatrixType &rA, const Matrix &rALocal, const IndexType i, const IndexType i_local, const Element::EquationIdVectorType &EquationId) { double *values_vector = rA.value_data().begin(); std::size_t *index1_vector = rA.index1_data().begin(); std::size_t *index2_vector = rA.index2_data().begin(); const std::size_t left_limit = index1_vector[i]; // Find the first entry // We iterate over the equation ids until we find the first equation id to be considered // We count in which component we find an ID std::size_t last_pos = 0; std::size_t last_found = 0; std::size_t counter = 0; for (std::size_t j = 0; j < EquationId.size(); ++j) { ++counter; const std::size_t j_global = EquationId[j]; if (j_global < BaseType::mEquationSystemSize) { last_pos = ForwardFind(j_global, left_limit, index2_vector); last_found = j_global; break; } } // If the counter is equal to the size of the EquationID vector that means that only one dof will be considered, if the number is greater means that all the dofs are fixed. If the number is below means that at we have several dofs free to be considered if (counter <= EquationId.size()) { #ifndef USE_LOCKS_IN_ASSEMBLY double &r_a = values_vector[last_pos]; const double &v_a = rALocal(i_local, counter - 1); #pragma omp atomic r_a += v_a; #else values_vector[last_pos] += rALocal(i_local, counter - 1); #endif // Now find all of the other entries std::size_t pos = 0; for (std::size_t j = counter; j < EquationId.size(); ++j) { std::size_t id_to_find = EquationId[j]; if (id_to_find < BaseType::mEquationSystemSize) { if (id_to_find > last_found) pos = ForwardFind(id_to_find, last_pos + 1, index2_vector); else if (id_to_find < last_found) pos = BackwardFind(id_to_find, last_pos - 1, index2_vector); else pos = last_pos; #ifndef USE_LOCKS_IN_ASSEMBLY double &r = values_vector[pos]; const double &v = rALocal(i_local, j); #pragma omp atomic r += v; #else values_vector[pos] += Alocal(i_local, j); #endif last_found = id_to_find; last_pos = pos; } } } } inline std::size_t ForwardFind(const std::size_t id_to_find, const std::size_t start, const std::size_t *index_vector) { std::size_t pos = start; while (id_to_find != index_vector[pos]) pos++; return pos; } inline std::size_t BackwardFind(const std::size_t id_to_find, const std::size_t start, const std::size_t *index_vector) { std::size_t pos = start; while (id_to_find != index_vector[pos]) pos--; return pos; } //************************************************************************** //************************************************************************** /** * @brief Applies the dirichlet conditions. This operation may be very heavy or completely * unexpensive depending on the implementation choosen and on how the System Matrix is built. * @details For explanation of how it works for a particular implementation the user * should refer to the particular Builder And Solver choosen * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() override { this->mDofSet = DofsArrayType(); if (this->mpReactionsVector != NULL) TSparseSpace::Clear((this->mpReactionsVector)); // this->mReactionsVector = TSystemVectorType(); this->mpLinearSystemSolver->Clear(); KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl; } /** * @brief This function is designed to be called once to perform all the checks needed * on the input provided. Checks can be "expensive" as the function is designed * to catch user's errors. * @param rModelPart The model part of the problem to solve * @return 0 all ok */ int Check(ModelPart &rModelPart) override { KRATOS_TRY return 0; KRATOS_CATCH(""); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ void Assemble( TSystemMatrixType &A, TSystemVectorType &b, const LocalSystemMatrixType &LHS_Contribution, const LocalSystemVectorType &RHS_Contribution, const Element::EquationIdVectorType &EquationId #ifdef _OPENMP , std::vector<omp_lock_t> &lock_array #endif ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&lock_array[i_global]); #endif b[i_global] += RHS_Contribution(i_local); for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) { A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } #ifdef _OPENMP omp_unset_lock(&lock_array[i_global]); #endif } //note that assembly on fixed rows is not performed here } } //************************************************************************** virtual void ConstructMatrixStructureForFSI( typename TSchemeType::Pointer pScheme, TSystemMatrixType &A, ModelPart &rModelPart) { //filling with zero the matrix (creating the structure) Timer::Start("MatrixStructure"); ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); // Getting the array of the conditions const int nconditions = static_cast<int>(rModelPart.Conditions().size()); ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); const std::size_t equation_size = BaseType::mEquationSystemSize; #ifdef USE_GOOGLE_HASH std::vector<google::dense_hash_set<std::size_t>> indices(equation_size); const std::size_t empty_key = 2 * equation_size + 10; #else std::vector<std::unordered_set<std::size_t>> indices(equation_size); #endif #pragma omp parallel for firstprivate(equation_size) for (int iii = 0; iii < static_cast<int>(equation_size); iii++) { #ifdef USE_GOOGLE_HASH indices[iii].set_empty_key(empty_key); #else indices[iii].reserve(40); #endif } Element::EquationIdVectorType EquationId; ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { if (itNode->Is(SOLID)) { const unsigned int localSize = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS).size(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); const unsigned int neighSize = nodalSFDneighboursId.size(); if (EquationId.size() != localSize) EquationId.resize(localSize, false); unsigned int firstCol = 0; const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) EquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); for (unsigned int i = 0; i < neighb_nodes.size(); i++) { unsigned int indexNode = i + 1; if (indexNode < neighSize) { unsigned int other_neigh_nodes_id = nodalSFDneighboursId[indexNode]; firstCol += dimension; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); if (neigh_nodes_id == other_neigh_nodes_id) { EquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) { EquationId[firstCol + 2] = neighb_nodes[k].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } break; } } } } } else { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); for (unsigned int i = 0; i < neighb_nodes.size(); i++) { firstCol += dimension; EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) { EquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } } } } if ((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); const unsigned int neighSize = nodalSFDneighboursId.size(); if (EquationId.size() != localSize) EquationId.resize(localSize, false); unsigned int firstCol = 0; const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) EquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); for (unsigned int i = 0; i < neighb_nodes.size(); i++) { unsigned int indexNode = i + 1; if (indexNode < neighSize) { unsigned int other_neigh_nodes_id = nodalSFDneighboursId[indexNode]; firstCol += dimension; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); if (neigh_nodes_id == other_neigh_nodes_id) { EquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) { EquationId[firstCol + 2] = neighb_nodes[k].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } break; } } } } } else { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); for (unsigned int i = 0; i < neighb_nodes.size(); i++) { firstCol += dimension; EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) { EquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } } } } for (std::size_t i = 0; i < EquationId.size(); i++) { if (EquationId[i] < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&mlock_array[EquationId[i]]); #endif auto &row_indices = indices[EquationId[i]]; for (auto it = EquationId.begin(); it != EquationId.end(); it++) { if (*it < BaseType::mEquationSystemSize) row_indices.insert(*it); } #ifdef _OPENMP omp_unset_lock(&mlock_array[EquationId[i]]); #endif } } } Element::EquationIdVectorType ids(3, 0); #pragma omp parallel for firstprivate(nconditions, ids) for (int iii = 0; iii < nconditions; iii++) { typename ConditionsArrayType::iterator i_condition = cond_begin + iii; pScheme->EquationId(*i_condition, ids, CurrentProcessInfo); for (std::size_t i = 0; i < ids.size(); i++) { if (ids[i] < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&mlock_array[ids[i]]); #endif auto &row_indices = indices[ids[i]]; for (auto it = ids.begin(); it != ids.end(); it++) { if (*it < BaseType::mEquationSystemSize) row_indices.insert(*it); } #ifdef _OPENMP omp_unset_lock(&mlock_array[ids[i]]); #endif } } } //count the row sizes unsigned int nnz = 0; for (unsigned int i = 0; i < indices.size(); i++) nnz += indices[i].size(); A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz); double *Avalues = A.value_data().begin(); std::size_t *Arow_indices = A.index1_data().begin(); std::size_t *Acol_indices = A.index2_data().begin(); //filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Arow_indices[0] = 0; for (int i = 0; i < static_cast<int>(A.size1()); i++) Arow_indices[i + 1] = Arow_indices[i] + indices[i].size(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(A.size1()); i++) { const unsigned int row_begin = Arow_indices[i]; const unsigned int row_end = Arow_indices[i + 1]; unsigned int k = row_begin; for (auto it = indices[i].begin(); it != indices[i].end(); it++) { Acol_indices[k] = *it; Avalues[k] = 0.0; k++; } std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); } A.set_filled(indices.size() + 1, nnz); Timer::Stop("MatrixStructure"); } void AssembleLHS( TSystemMatrixType &A, LocalSystemMatrixType &LHS_Contribution, Element::EquationIdVectorType &EquationId) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ #ifdef _OPENMP std::vector<omp_lock_t> mlock_array; #endif ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ inline void AddUnique(std::vector<std::size_t> &v, const std::size_t &candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while (i != endit && (*i) != candidate) { i++; } if (i == endit) { v.push_back(candidate); } } void AssembleRHS( TSystemVectorType &b, const LocalSystemVectorType &RHS_Contribution, const Element::EquationIdVectorType &EquationId) { unsigned int local_size = RHS_Contribution.size(); if (BaseType::mCalculateReactionsFlag == false) { for (unsigned int i_local = 0; i_local < local_size; i_local++) { const unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) //free dof { // ASSEMBLING THE SYSTEM VECTOR double &b_value = b[i_global]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } else { TSystemVectorType &ReactionsVector = *BaseType::mpReactionsVector; for (unsigned int i_local = 0; i_local < local_size; i_local++) { const unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) //free dof { // ASSEMBLING THE SYSTEM VECTOR double &b_value = b[i_global]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } else //fixed dof { double &b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } } //************************************************************************** void AssembleLHS_CompleteOnFreeRows( TSystemMatrixType &A, LocalSystemMatrixType &LHS_Contribution, Element::EquationIdVectorType &EquationId) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (unsigned int j_local = 0; j_local < local_size; j_local++) { int j_global = EquationId[j_local]; A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class NodalResidualBasedEliminationBuilderAndSolverForFSI */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI defined */
GB_unaryop__abs_int32_int8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int32_int8 // op(A') function: GB_tran__abs_int32_int8 // C type: int32_t // A type: int8_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ int8_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int32_t z = (int32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT32 || GxB_NO_INT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int32_int8 ( int32_t *restrict Cx, const int8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int32_int8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
t008.c
#include<stdint.h> #include<stdlib.h> #include<stdio.h> #include<omp.h> #define n_site 4 #define n_mu 4 typedef float T_mu; typedef struct {T_mu mu[n_mu];} T_site; typedef T_site T_field[n_site]; int main(int argc, char **argv) { const T_mu val = 1.0f; T_field f; f[0].mu[0] = 0.0f; #pragma omp target map(f[0:1]) { f[0].mu[0] += val; } printf("%.7g\n", f[0].mu[0]); int ret = 0; if(f[0].mu[0] != val) ret = 1; return ret; }
grid.c
/* Copyright 2014-2015 The Regents of the University of California. * Copyright 2015-2018 Martin Uecker. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * 2011, 2015, 2018 Martin Uecker <[email protected]> * 2014 Frank Ong <[email protected]> */ #include <math.h> #include <complex.h> #include <assert.h> #include <string.h> #include "num/multind.h" #include "num/flpmath.h" #include "num/specfun.h" #include "misc/nested.h" #include "misc/misc.h" #include "grid.h" #define KB_BETA 13.9086 // 13.8551 // 2x oversampling #define KB_WIDTH 3 #ifndef KB128 static double kb(double beta, double x) { if (fabs(x) >= 0.5) return 0.; return bessel_i0(beta * sqrt(1. - pow(2. * x, 2.))) / bessel_i0(beta); } static void kb_precompute(double beta, int n, float table[n + 1]) { for (int i = 0; i < n + 1; i++) table[i] = kb(beta, (double)(i) / (double)(n - 1) / 2.); } #endif static double I0_beta(double beta) { #ifndef KB128 return bessel_i0(beta); #else assert(KB_BETA == beta); return 118509.158946; #endif } const float kb_table128[129] = { 1.0000000000000000, 0.9995847139398653, 0.9983398161018390, 0.9962681840754728, 0.9933746024007669, 0.9896657454602714, 0.9851501536396374, 0.9798382028675283, 0.9737420676763386, 0.9668756779551011, 0.9592546695947362, 0.9508963292536357, 0.9418195334980564, 0.9320446825968820, 0.9215936292739139, 0.9104896027426631, 0.8987571283688524, 0.8864219433239096, 0.8735109086091393, 0.8600519178443380, 0.8460738032267725, 0.8316062390762707, 0.8166796433899514, 0.8013250778354499, 0.7855741466147803, 0.7694588946318385, 0.7530117053952898, 0.7362651990850234, 0.7192521312046796, 0.7020052922349470, 0.6845574086924412, 0.6669410459871208, 0.6491885134575039, 0.6313317719473744, 0.6134023442704702, 0.5954312288909031, 0.5774488171268115, 0.5594848141632452, 0.5415681641375969, 0.5237269795371914, 0.5059884751240438, 0.4883789065765270, 0.4709235140117633, 0.4536464705263341, 0.4365708358662894, 0.4197185153108639, 0.4031102238276424, 0.3867654555305848, 0.3707024584462233, 0.3549382145678427, 0.3394884251524746, 0.3243675011914082, 0.3095885589616078, 0.2951634205431575, 0.2811026191666483, 0.2674154092344597, 0.2541097808412017, 0.2411924786012657, 0.2286690245755740, 0.2165437450752543, 0.2048198011071496, 0.1934992222148726, 0.1825829434594916, 0.1720708452759937, 0.1619617959353290, 0.1522536963371723, 0.1429435268554672, 0.1340273959573592, 0.1255005903162311, 0.1173576261411828, 0.1095923014483913, 0.1021977490043101, 0.0951664896765205, 0.0884904859351842, 0.0821611952563688, 0.0761696231879639, 0.0705063758493464, 0.0651617116473479, 0.0601255920032731, 0.0553877308986640, 0.0509376430610617, 0.0467646906251106, 0.0428581281188566, 0.0392071456399203, 0.0358009101012748, 0.0326286044415178, 0.0296794647097185, 0.0269428149500251, 0.0244080998261697, 0.0220649149406994, 0.0199030348181235, 0.0179124385351197, 0.0160833329944038, 0.0144061738517878, 0.0128716841182598, 0.0114708704705587, 0.0101950373146533, 0.0090357986567118, 0.0079850878455423, 0.0070351652590612, 0.0061786240150858, 0.0054083937936397, 0.0047177428639869, 0.0041002784147792, 0.0035499452900106, 0.0030610232369345, 0.0026281227747268, 0.0022461797944939, 0.0019104490022500, 0.0016164963167613, 0.0013601903336964, 0.0011376929663821, 0.0009454493716780, 0.0007801772670918, 0.0006388557423128, 0.0005187136648862, 0.0004172177758400, 0.0003320605667526, 0.0002611480250751, 0.0002025873295356, 0.0001546745722200, 0.0001158825784783, 0.0000848488902175, 0.0000603639724392, 0.0000413596971257, 0.0000268981528101, 0.0000161608224276, 0.0000000000000000, 0.0000000000000000, }; static double ftkb(double beta, double x) { double a = sqrt(pow(beta, 2.) - pow(M_PI * x, 2.)); return ((0. == a) ? 1. : (sinh(a) / a)) / I0_beta(beta); } static float rolloff(float x, double beta, float width) { return (float)ftkb(beta, x * width) / ftkb(beta, 0.); } // Linear interpolation static float lerp(float a, float b, float c) { return (1. - c) * a + c * b; } // Linear interpolation look up static float intlookup(int n, const float table[n + 1], float x) { float fpart; // fpart = modff(x * n, &ipart); // int index = ipart; int index = (int)(x * (n - 1)); fpart = x * (n - 1) - (float)index; #if 1 assert(index >= 0); assert(index <= n); assert(fpart >= 0.); assert(fpart <= 1.); #endif float l = lerp(table[index], table[index + 1], fpart); #if 1 assert(l <= 1.); assert(0 >= 0.); #endif return l; } void gridH(const struct grid_conf_s* conf, const complex float* traj, const long ksp_dims[4], complex float* dst, const long grid_dims[4], const complex float* grid) { long C = ksp_dims[3]; #ifndef KB128 // precompute kaiser bessel table int kb_size = 500; float kb_table[kb_size + 1]; kb_precompute(conf->beta, kb_size, kb_table); #else assert(KB_BETA == beta); int kb_size = 128; const float* kb_table = kb_table128; #endif assert(1 == ksp_dims[0]); long samples = ksp_dims[1] * ksp_dims[2]; #pragma omp parallel for for(int i = 0; i < samples; i++) { float pos[3]; pos[0] = conf->os * (creal(traj[i * 3 + 0])); pos[1] = conf->os * (creal(traj[i * 3 + 1])); pos[2] = conf->os * (creal(traj[i * 3 + 2])); pos[0] += (grid_dims[0] > 1) ? ((float)grid_dims[0] / 2.) : 0.; pos[1] += (grid_dims[1] > 1) ? ((float)grid_dims[1] / 2.) : 0.; pos[2] += (grid_dims[2] > 1) ? ((float)grid_dims[2] / 2.) : 0.; complex float val[C]; for (int j = 0; j < C; j++) val[j] = 0.0; grid_pointH(C, 3, grid_dims, pos, val, grid, conf->periodic, conf->width, kb_size, kb_table); for (int j = 0; j < C; j++) dst[j * samples + i] += val[j]; } } void grid(const struct grid_conf_s* conf, const complex float* traj, const long grid_dims[4], complex float* grid, const long ksp_dims[4], const complex float* src) { long C = ksp_dims[3]; #ifndef KB128 // precompute kaiser bessel table int kb_size = 500; float kb_table[kb_size + 1]; kb_precompute(conf->beta, kb_size, kb_table); #else assert(KB_BETA == beta); int kb_size = 128; const float* kb_table = kb_table128; #endif assert(1 == ksp_dims[0]); long samples = ksp_dims[1] * ksp_dims[2]; // grid #pragma omp parallel for for(int i = 0; i < samples; i++) { float pos[3]; pos[0] = conf->os * (creal(traj[i * 3 + 0])); pos[1] = conf->os * (creal(traj[i * 3 + 1])); pos[2] = conf->os * (creal(traj[i * 3 + 2])); pos[0] += (grid_dims[0] > 1) ? ((float) grid_dims[0] / 2.) : 0.; pos[1] += (grid_dims[1] > 1) ? ((float) grid_dims[1] / 2.) : 0.; pos[2] += (grid_dims[2] > 1) ? ((float) grid_dims[2] / 2.) : 0.; complex float val[C]; for (int j = 0; j < C; j++) val[j] = src[j * samples + i]; grid_point(C, 3, grid_dims, pos, grid, val, conf->periodic, conf->width, kb_size, kb_table); } } static void grid2_dims(unsigned int D, const long trj_dims[D], const long ksp_dims[D], const long grid_dims[D]) { assert(D >= 4); assert(md_check_compat(D - 3, ~0, grid_dims + 3, ksp_dims + 3)); assert(md_check_compat(D - 3, ~(MD_BIT(1) | MD_BIT(2)), trj_dims + 3, ksp_dims + 3)); assert(md_check_bounds(D - 3, ~0, trj_dims + 3, ksp_dims + 3)); assert(3 == trj_dims[0]); assert(1 == trj_dims[3]); assert(1 == ksp_dims[0]); } void grid2(const struct grid_conf_s* conf, unsigned int D, const long trj_dims[D], const complex float* traj, const long grid_dims[D], complex float* dst, const long ksp_dims[D], const complex float* src) { grid2_dims(D, trj_dims, ksp_dims, grid_dims); long ksp_strs[D]; md_calc_strides(D, ksp_strs, ksp_dims, CFL_SIZE); long trj_strs[D]; md_calc_strides(D, trj_strs, trj_dims, CFL_SIZE); long grid_strs[D]; md_calc_strides(D, grid_strs, grid_dims, CFL_SIZE); long pos[D]; for (unsigned int i = 0; i < D; i++) pos[i] = 0; do { grid(conf, &MD_ACCESS(D, trj_strs, pos, traj), grid_dims, &MD_ACCESS(D, grid_strs, pos, dst), ksp_dims, &MD_ACCESS(D, ksp_strs, pos, src)); } while(md_next(D, ksp_dims, (~0 ^ 15), pos)); } void grid2H(const struct grid_conf_s* conf, unsigned int D, const long trj_dims[D], const complex float* traj, const long ksp_dims[D], complex float* dst, const long grid_dims[D], const complex float* src) { grid2_dims(D, trj_dims, ksp_dims, grid_dims); long ksp_strs[D]; md_calc_strides(D, ksp_strs, ksp_dims, CFL_SIZE); long trj_strs[D]; md_calc_strides(D, trj_strs, trj_dims, CFL_SIZE); long grid_strs[D]; md_calc_strides(D, grid_strs, grid_dims, CFL_SIZE); long pos[D]; for (unsigned int i = 0; i < D; i++) pos[i] = 0; do { gridH(conf, &MD_ACCESS(D, trj_strs, pos, traj), ksp_dims, &MD_ACCESS(D, ksp_strs, pos, dst), grid_dims, &MD_ACCESS(D, grid_strs, pos, src)); } while(md_next(D, ksp_dims, (~0 ^ 15), pos)); } typedef void CLOSURE_TYPE(grid_update_t)(int ind, float d); #ifndef __clang__ #define VLA(x) x #else // blocks extension does not play well even with arguments which // just look like variably-modified types #define VLA(x) #endif static void grid_point_gen(int N, const long dims[VLA(N)], const float pos[VLA(N)], bool periodic, float width, int kb_size, const float kb_table[VLA(kb_size + 1)], grid_update_t update) { #ifndef __clang__ int sti[N]; int eni[N]; int off[N]; #else // blocks extension does not play well with variably-modified types int* sti = alloca(sizeof(int[N])); int* eni = alloca(sizeof(int[N])); int* off = alloca(sizeof(int[N])); #endif for (int j = 0; j < N; j++) { sti[j] = (int)ceil(pos[j] - width); eni[j] = (int)floor(pos[j] + width); off[j] = 0; if (sti[j] > eni[j]) return; if (!periodic) { sti[j] = MAX(sti[j], 0); eni[j] = MIN(eni[j], dims[j] - 1); } else { while (sti[j] + off[j] < 0) off[j] += dims[j]; } if (1 == dims[j]) { assert(0. == pos[j]); sti[j] = 0; eni[j] = 0; } } __block NESTED(void, grid_point_r, (int N, int ind, float d)) // __block for recursion { if (0 == N) { update(ind, d); } else { N--; for (int w = sti[N]; w <= eni[N]; w++) { float frac = fabs(((float)w - pos[N])); float d2 = d * intlookup(kb_size, kb_table, frac / width); int ind2 = (ind * dims[N] + ((w + off[N]) % dims[N])); grid_point_r(N, ind2, d2); } } }; grid_point_r(N, 0, 1.); } void grid_point(unsigned int ch, int N, const long dims[VLA(N)], const float pos[VLA(N)], complex float* dst, const complex float val[VLA(ch)], bool periodic, float width, int kb_size, const float kb_table[kb_size + 1]) { NESTED(void, update, (int ind, float d)) { for (unsigned int c = 0; c < ch; c++) { // we are allowed to update real and imaginary part independently which works atomically #pragma omp atomic __real(dst[ind + c * dims[0] * dims[1] * dims[2]]) += __real(val[c]) * d; #pragma omp atomic __imag(dst[ind + c * dims[0] * dims[1] * dims[2]]) += __imag(val[c]) * d; } }; grid_point_gen(N, dims, pos, periodic, width, kb_size, kb_table, update); } void grid_pointH(unsigned int ch, int N, const long dims[VLA(N)], const float pos[VLA(N)], complex float val[VLA(ch)], const complex float* src, bool periodic, float width, int kb_size, const float kb_table[kb_size + 1]) { NESTED(void, update, (int ind, float d)) { for (unsigned int c = 0; c < ch; c++) { // we are allowed to update real and imaginary part independently which works atomically #pragma omp atomic __real(val[c]) += __real(src[ind + c * dims[0] * dims[1] * dims[2]]) * d; #pragma omp atomic __imag(val[c]) += __imag(src[ind + c * dims[0] * dims[1] * dims[2]]) * d; } }; grid_point_gen(N, dims, pos, periodic, width, kb_size, kb_table, update); } double calc_beta(float os, float width) { return M_PI * sqrt(pow((width * 2. / os) * (os - 0.5), 2.) - 0.8); } static float pos(int d, int i) { return (1 == d) ? 0. : (((float)i - (float)d / 2.) / (float)d); } void rolloff_correction(float os, float width, float beta, const long dimensions[3], complex float* dst) { UNUSED(os); #pragma omp parallel for collapse(3) for (int z = 0; z < dimensions[2]; z++) for (int y = 0; y < dimensions[1]; y++) for (int x = 0; x < dimensions[0]; x++) dst[x + dimensions[0] * (y + z * dimensions[1])] = 1. / ( rolloff(pos(dimensions[0], x), beta, width) * rolloff(pos(dimensions[1], y), beta, width) * rolloff(pos(dimensions[2], z), beta, width) ); }
stencil_parallel_vect.c
#include <stdio.h> #include <omp.h> #include <inttypes.h> #include "matrix_utils.h" int main(int argc, const char* argv[]){ if(argc==2){ int numThreads = strtoimax(argv[1], NULL, 0); if(numThreads>0){ double c[5], start_time, end_time, temp; static double g[2][M_SIZE][M_SIZE]; int last_matrix=0; initiateMask(c); initiateMatrix(g[0]); initiateMatrix(g[1]); omp_set_num_threads(numThreads); start_time=omp_get_wtime(); for(int it=0; it<ITERATIONS; it++){ //one iteration #pragma omp parallel for private(temp) schedule(static) for(int i=STENCIL_P; i<M_SIZE-STENCIL_P; i++){ for(int j=STENCIL_P; j<M_SIZE-STENCIL_P; j++){ temp= c[0]*g[last_matrix][i][j]; #pragma omp simd aligned(c, g: 32) reduction(+: temp) for(int k=1; k < 5; k++){ temp+= c[k]*g[last_matrix][i][j+k]; temp+= c[k]*g[last_matrix][i][j-k]; temp+= c[k]*g[last_matrix][i+k][j]; temp+= c[k]*g[last_matrix][i-k][j]; } g[!last_matrix][i][j]=temp; } } last_matrix=!last_matrix; } end_time = omp_get_wtime() - start_time; //printResults(g[last_matrix]); printf("Execution Time: %f s\n",end_time); }else{ printf("Wrong number of threads!\n"); } }else{ printf("Wrong number of arguments. Put number of threads!\n"); } return 0; }
mixed_tentusscher_myo_epi_2004_S2_7.c
// Scenario 1 - Mixed-Model TenTusscher 2004 (Myocardium + Epicardium) // (AP + max:dvdt) #include <stdio.h> #include "mixed_tentusscher_myo_epi_2004_S2_7.h" GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { static bool first_call = true; if(first_call) { print_to_stdout_and_file("Using mixed version of TenTusscher 2004 myocardium + epicardium CPU model\n"); first_call = false; } // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } // Initial conditions for TenTusscher myocardium if (mapping[sv_id] == 0) { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.3965119057144,0.00133824305081220,0.775463576993407,0.775278393595599,0.000179499343643571,0.483303039835057,0.00297647859235379,0.999998290403642,1.98961879737287e-08,1.93486789479597e-05,0.999599147019885,1.00646342475688,0.999975178010127,5.97703651642618e-05,0.418325344820368,10.7429775420171,138.918155900633}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } // Initial conditions for TenTusscher epicardium else { // Default initial conditions /* sv[0] = INITIAL_V; // V; millivolt sv[1] = 0.f; //M sv[2] = 0.75; //H sv[3] = 0.75f; //J sv[4] = 0.f; //Xr1 sv[5] = 1.f; //Xr2 sv[6] = 0.f; //Xs sv[7] = 1.f; //S sv[8] = 0.f; //R sv[9] = 0.f; //D sv[10] = 1.f; //F sv[11] = 1.f; //FCa sv[12] = 1.f; //G sv[13] = 0.0002; //Cai sv[14] = 0.2f; //CaSR sv[15] = 11.6f; //Nai sv[16] = 138.3f; //Ki */ // Elnaz's steady-state initial conditions real sv_sst[]={-86.7278378798970,0.00124683785460400,0.783600532542734,0.783451216299390,0.000170840217018618,0.486751117524943,0.00290707269945428,0.999998399879982,1.88274662357417e-08,1.85125538579548e-05,0.999771968830020,1.00716814076148,0.999996330327535,4.34584098863557e-05,0.262582811563238,10.1909174640447,139.649590217400}; for (uint32_t i = 0; i < NEQ; i++) sv[i] = sv_sst[i]; } } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { // Get the mapping array uint32_t *mapping = NULL; if(extra_data) { mapping = (uint32_t*)extra_data; } else { print_to_stderr_and_file_and_exit("You need to specify a mask function when using a mixed model!\n"); } uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { if (mapping[i] == 0) solve_model_ode_cpu_myo(dt, sv + (sv_id * NEQ), stim_currents[i]); else solve_model_ode_cpu_epi(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu_myo (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_myo(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_myo(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Myocardium cell real Gks=0.062; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Myocardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f; Irel=A*sd*sg; Ileak=0.00008f*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; // [!] Myocardium cell R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; } void solve_model_ode_cpu_epi (real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu_epi(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu_epi(const real *sv, real *rDY_, real stim_current, real dt) { // State variables real svolt = sv[0]; real sm = sv[1]; real sh = sv[2]; real sj = sv[3]; real sxr1 = sv[4]; real sxr2 = sv[5]; real sxs = sv[6]; real ss = sv[7]; real sr = sv[8]; real sd = sv[9]; real sf = sv[10]; real sfca = sv[11]; real sg = sv[12]; real Cai = sv[13]; real CaSR = sv[14]; real Nai = sv[15]; real Ki = sv[16]; //External concentrations real Ko=5.4; real Cao=2.0; real Nao=140.0; //Intracellular volumes real Vc=0.016404; real Vsr=0.001094; //Calcium dynamics real Bufc=0.15f; real Kbufc=0.001f; real Bufsr=10.f; real Kbufsr=0.3f; real taufca=2.f; real taug=2.f; real Vmaxup=0.000425f; real Kup=0.00025f; //Constants const real R = 8314.472f; const real F = 96485.3415f; const real T =310.0f; real RTONF =(R*T)/F; //Cellular capacitance real CAPACITANCE=0.185; //Parameters for currents //Parameters for IKr real Gkr=0.096; //Parameters for Iks real pKNa=0.03; // [!] Epicardium cell real Gks=0.245; //Parameters for Ik1 real GK1=5.405; //Parameters for Ito // [!] Epicardium cell real Gto=0.294; //Parameters for INa real GNa=14.838; //Parameters for IbNa real GbNa=0.00029; //Parameters for INaK real KmK=1.0; real KmNa=40.0; real knak=1.362; //Parameters for ICaL real GCaL=0.000175; //Parameters for IbCa real GbCa=0.000592; //Parameters for INaCa real knaca=1000; real KmNai=87.5; real KmCa=1.38; real ksat=0.1; real n=0.35; //Parameters for IpCa real GpCa=0.825; real KpCa=0.0005; //Parameters for IpK; real GpK=0.0146; real parameters []={14.5712760941292,0.000430386069753157,0.000123954049331097,0.000339134345249555,0.275716122609344,0.118177339481679,0.171110543073973,4.97589596104639,0.0143070702358362,1.84740470131292,1098.07431966868,0.000411463768659304,0.558750994902965,0.00904110237316287,0.00475280604119224,7.62229127897770e-05}; GNa=parameters[0]; GbNa=parameters[1]; GCaL=parameters[2]; GbCa=parameters[3]; Gto=parameters[4]; Gkr=parameters[5]; Gks=parameters[6]; GK1=parameters[7]; GpK=parameters[8]; knak=parameters[9]; knaca=parameters[10]; Vmaxup=parameters[11]; GpCa=parameters[12]; real arel=parameters[13]; real crel=parameters[14]; real Vleak=parameters[15]; real IKr; real IKs; real IK1; real Ito; real INa; real IbNa; real ICaL; real IbCa; real INaCa; real IpCa; real IpK; real INaK; real Irel; real Ileak; real dNai; real dKi; real dCai; real dCaSR; real A; // real BufferFactorc; // real BufferFactorsr; real SERCA; real Caisquare; real CaSRsquare; real CaCurrent; real CaSRCurrent; real fcaold; real gold; real Ek; real Ena; real Eks; real Eca; real CaCSQN; real bjsr; real cjsr; real CaBuf; real bc; real cc; real Ak1; real Bk1; real rec_iK1; real rec_ipK; real rec_iNaK; real AM; real BM; real AH_1; real BH_1; real AH_2; real BH_2; real AJ_1; real BJ_1; real AJ_2; real BJ_2; real M_INF; real H_INF; real J_INF; real TAU_M; real TAU_H; real TAU_J; real axr1; real bxr1; real axr2; real bxr2; real Xr1_INF; real Xr2_INF; real TAU_Xr1; real TAU_Xr2; real Axs; real Bxs; real Xs_INF; real TAU_Xs; real R_INF; real TAU_R; real S_INF; real TAU_S; real Ad; real Bd; real Cd; real TAU_D; real D_INF; real TAU_F; real F_INF; real FCa_INF; real G_INF; real inverseVcF2=1/(2*Vc*F); real inverseVcF=1./(Vc*F); real Kupsquare=Kup*Kup; // real BufcKbufc=Bufc*Kbufc; // real Kbufcsquare=Kbufc*Kbufc; // real Kbufc2=2*Kbufc; // real BufsrKbufsr=Bufsr*Kbufsr; // const real Kbufsrsquare=Kbufsr*Kbufsr; // const real Kbufsr2=2*Kbufsr; const real exptaufca=exp(-dt/taufca); const real exptaug=exp(-dt/taug); real sItot; //Needed to compute currents Ek=RTONF*(log((Ko/Ki))); Ena=RTONF*(log((Nao/Nai))); Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai))); Eca=0.5*RTONF*(log((Cao/Cai))); Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200))); Bk1=(3.*exp(0.0002*(svolt-Ek+100))+ exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek))); rec_iK1=Ak1/(Ak1+Bk1); rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T)))); rec_ipK=1./(1.+exp((25-svolt)/5.98)); //Compute currents INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena); ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))* (exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.); Ito=Gto*sr*ss*(svolt-Ek); IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek); IKs=Gks*sxs*sxs*(svolt-Eks); IK1=GK1*rec_iK1*(svolt-Ek); INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))* (1./(1+ksat*exp((n-1)*svolt*F/(R*T))))* (exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao- exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5); INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK; IpCa=GpCa*Cai/(KpCa+Cai); IpK=GpK*rec_ipK*(svolt-Ek); IbNa=GbNa*(svolt-Ena); IbCa=GbCa*(svolt-Eca); //Determine total current (sItot) = IKr + IKs + IK1 + Ito + INa + IbNa + ICaL + IbCa + INaK + INaCa + IpCa + IpK + stim_current; //update concentrations Caisquare=Cai*Cai; CaSRsquare=CaSR*CaSR; CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE; A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel; Irel=A*sd*sg; Ileak=Vleak*(CaSR-Cai); SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare)); CaSRCurrent=SERCA-Irel-Ileak; CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr); dCaSR=dt*(Vc/Vsr)*CaSRCurrent; bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr; cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR); CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.; CaBuf=Bufc*Cai/(Cai+Kbufc); dCai=dt*(CaCurrent-CaSRCurrent); bc=Bufc-CaBuf-dCai-Cai+Kbufc; cc=Kbufc*(CaBuf+dCai+Cai); Cai=(sqrt(bc*bc+4*cc)-bc)/2; dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE; Nai+=dt*dNai; dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE; Ki+=dt*dKi; //compute steady state values and time constants AM=1./(1.+exp((-60.-svolt)/5.)); BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.)); TAU_M=AM*BM; M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03))); if (svolt>=-40.) { AH_1=0.; BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1)))); TAU_H= 1.0/(AH_1+BH_1); } else { AH_2=(0.057*exp(-(svolt+80.)/6.8)); BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt)); TAU_H=1.0/(AH_2+BH_2); } H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43))); if(svolt>=-40.) { AJ_1=0.; BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.)))); TAU_J= 1.0/(AJ_1+BJ_1); } else { AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)* exp(-0.04391*svolt))*(svolt+37.78)/ (1.+exp(0.311*(svolt+79.23)))); BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14)))); TAU_J= 1.0/(AJ_2+BJ_2); } J_INF=H_INF; Xr1_INF=1./(1.+exp((-26.-svolt)/7.)); axr1=450./(1.+exp((-45.-svolt)/10.)); bxr1=6./(1.+exp((svolt-(-30.))/11.5)); TAU_Xr1=axr1*bxr1; Xr2_INF=1./(1.+exp((svolt-(-88.))/24.)); axr2=3./(1.+exp((-60.-svolt)/20.)); bxr2=1.12/(1.+exp((svolt-60.)/20.)); TAU_Xr2=axr2*bxr2; Xs_INF=1./(1.+exp((-5.-svolt)/14.)); Axs=1100./(sqrt(1.+exp((-10.-svolt)/6))); Bxs=1./(1.+exp((svolt-60.)/20.)); TAU_Xs=Axs*Bxs; R_INF=1./(1.+exp((20-svolt)/6.)); S_INF=1./(1.+exp((svolt+20)/5.)); TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8; TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.; D_INF=1./(1.+exp((-5-svolt)/7.5)); Ad=1.4/(1.+exp((-35-svolt)/13))+0.25; Bd=1.4/(1.+exp((svolt+5)/5)); Cd=1./(1.+exp((50-svolt)/20)); TAU_D=Ad*Bd+Cd; F_INF=1./(1.+exp((svolt+20)/7)); //TAU_F=1125*exp(-(svolt+27)*(svolt+27)/300)+80+165/(1.+exp((25-svolt)/10)); TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10)); // Updated from CellML FCa_INF=(1./(1.+pow((Cai/0.000325),8))+ 0.1/(1.+exp((Cai-0.0005)/0.0001))+ 0.20/(1.+exp((Cai-0.00075)/0.0008))+ 0.23 )/1.46; if(Cai<0.00035) G_INF=1./(1.+pow((Cai/0.00035),6)); else G_INF=1./(1.+pow((Cai/0.00035),16)); //Update gates rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M); rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H); rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J); rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1); rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2); rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs); rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S); rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R); rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D); rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F); fcaold= sfca; sfca = FCa_INF-(FCa_INF-sfca)*exptaufca; if(sfca>fcaold && (svolt)>-37.0) sfca = fcaold; gold = sg; sg = G_INF-(G_INF-sg)*exptaug; if(sg>gold && (svolt)>-37.0) sg=gold; //update voltage rDY_[0] = svolt + dt*(-sItot); rDY_[11] = sfca; rDY_[12] = sg; rDY_[13] = Cai; rDY_[14] = CaSR; rDY_[15] = Nai; rDY_[16] = Ki; }
DRB067-restrictpointer1-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: [email protected], [email protected], [email protected], [email protected], [email protected]) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* restrict pointers: no aliasing Array initialization using assignments. C99 is needed to compile this code e.g. gcc -std=c99 -c Stress-1.c */ #include <stdlib.h> typedef double real8; void foo(real8 * restrict newSxx, real8 * restrict newSyy, int length) { int i; #pragma cetus private(i) #pragma loop name foo#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<=(length-1); i+=1) { newSxx[i]=0.0; newSyy[i]=0.0; } return ; } void print(real8 * restrict newSxx, real8 * restrict newSyy, int length) { int i; #pragma cetus private(i) #pragma loop name print#0 for (i=0; i<=(length-1); i+=1) { printf("%lf %lf\n", newSxx[i], newSyy[i]); } return ; } int main() { int length = 1000; real8 * newSxx = malloc(length*sizeof (real8)); real8 * newSyy = malloc(length*sizeof (real8)); int _ret_val_0; foo(newSxx, newSyy, length); print(newSxx, newSyy, length); free(newSxx); free(newSyy); _ret_val_0=0; return _ret_val_0; }
morn_tensor.c
/* Copyright (C) 2019-2020 JingWeiZhangHuai <[email protected]> Licensed under the Apache License, Version 2.0; you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "morn_tensor.h" struct HandleTensorCreate { MTensor *tns; MChain *property; int64_t reserve[8]; int writeable; int batch; int size; float **data; MMemory *memory; float **backup_data; MMemory *backup_memory; }; #define HASH_TensorCreate 0x6b6cf658 void endTensorCreate(struct HandleTensorCreate *handle) { mException((handle->tns ==NULL),EXIT,"invalid tensor"); if(handle->property!=NULL) mChainRelease(handle->property); if(handle->data !=NULL) mFree(handle->data); if(handle->memory !=NULL) mMemoryRelease(handle->memory); if(handle->backup_data !=NULL) mFree(handle->backup_data); if(handle->backup_memory!=NULL) mMemoryRelease(handle->backup_memory); memset(handle->tns,0,sizeof(MTensor)); mFree(((MList **)(handle->tns))-1); } MTensor *TensorCreate(int batch,int channel,int height,int width,float **data,int device) { MList **phandle = (MList **)mMalloc(sizeof(MList *)+sizeof(MTensor)); MTensor *tns = (MTensor *)(phandle+1); memset(tns,0,sizeof(MTensor)); if(batch <0) {batch = 0; } tns->batch = batch; if(channel<0) {channel= 0; } tns->channel= channel; if(height <0) {height = 0; } tns->height = height; if(width <0) {width = 0; } tns->width = width; if(device <0) {device = MORN_HOST;} tns->device = MORN_HOST; *phandle = mHandleCreate(); MHandle *hdl=mHandle(tns,TensorCreate); struct HandleTensorCreate *handle = (struct HandleTensorCreate *)(hdl->handle); handle->tns = tns; int size = channel*height*width; if((batch==0)||(size == 0)) { mException((!INVALID_POINTER(data)),EXIT,"invalid input"); return tns; } size = size+8; handle->batch = batch; handle->data = (float **)mMalloc(batch*sizeof(float *)); tns->data = handle->data; if(!INVALID_POINTER(data)) { handle->size = 0; memcpy(handle->data,data,batch*sizeof(float *)); return tns; } handle->size= size; handle->memory = mMemoryCreate(batch,size*sizeof(float),device); void ***idx = malloc(batch*sizeof(void **)); for(int i=0;i<batch;i++) idx[i]=(void **)(&(handle->data[i])); mMemoryIndex(handle->memory,1,size*sizeof(float),idx,batch); free(idx); mPropertyFunction(tns,"device",mornMemoryDevice,handle->memory); for(int b=0;b<batch;b++) tns->data[b][channel*height*width]=1.0f; return tns; } void mTensorRelease(MTensor *tns) { mHandleRelease(tns); } MMemoryBlock *mTensorMemory(MTensor *tns,int batch) { int size = tns->channel*tns->height*tns->width+8; float *data = tns->data[batch]; struct HandleTensorCreate *handle = (struct HandleTensorCreate *)(ObjHandle(tns,0)->handle); if(handle->memory == NULL) { handle->memory = mMemoryCreate(batch,size*sizeof(float),MORN_HOST); mPropertyFunction(tns,"device",mornMemoryDevice,handle->memory); } MMemoryBlock *mem = handle->memory->data[batch]; if(mem->size<size) { void ***idx = malloc(batch*sizeof(void **)); for(int i=0;i<batch;i++) idx[i]=(void **)(&(handle->data[i])); mMemoryIndex(handle->memory,1,size*sizeof(float),idx,batch); free(idx); } if(mem->data!=data) memcpy(mem->data,data,size*sizeof(float)); return mem; } void TensorRedefine(MTensor *tns,int batch,int channel,int height,int width,float **data,int device) { mException((INVALID_POINTER(tns)),EXIT,"invalid input"); if(batch <= 0) batch = tns->batch; if(channel<= 0) channel= tns->channel; if(height <= 0) height = tns->height; if(width <= 0) width = tns->width; if(INVALID_POINTER(data)) data=tns->data; int size = channel*height*width+8; if((batch!=tns->batch)||(channel!=tns->channel)||(height!=tns->height)||(width!=tns->width)) mHandleReset(tns); int same_size = (batch<=tns->batch)&&(size<tns->channel*tns->height*tns->width)&&(data==tns->data); tns->batch=batch; tns->height=height; tns->width=width; tns->channel=channel; if(same_size&&(data==NULL)) goto tensor_redefine_end; if(same_size&&((device<0)||(device==mMemoryBlock(data[0])->device))) goto tensor_redefine_end; struct HandleTensorCreate *handle = (struct HandleTensorCreate *)(ObjHandle(tns,0)->handle); if(device<0) { if((data!=tns->data)&&(data!=NULL)) device=mMemoryBlock(data[0])->device; } if((data!=tns->data)&&(data!=NULL)) { for(int bc=0;bc<batch;bc++) mException(mMemoryBlock(data[bc])->device!=device,EXIT,"invalid data device"); } if((batch<=handle->batch)&&(size<=handle->size)&&(data==handle->data)) return; // int flag = (tns->batch)&&(tns->channel)&&(tns->height)&&(tns->width); // mException(reuse&&flag&&(handle->size==0),EXIT,"invalid redefine"); if((batch==0)||(size<=8)) { mException((data!=tns->data),EXIT,"invalid input"); tns->data=NULL; goto tensor_redefine_end; } if(batch>handle->batch){if(handle->data != NULL) {free(handle->data);}handle->data=NULL;} if(handle->data==NULL) { handle->data = (float **)malloc(batch*sizeof(float *)); handle->batch = batch; } if(data!=tns->data) { memcpy(handle->data,data,batch*sizeof(float *)); tns->data = handle->data; if(handle->backup_data !=NULL) mFree(handle->backup_data); if(handle->backup_memory!=NULL) mMemoryRelease(handle->backup_memory); goto tensor_redefine_end; } if(handle->memory == NULL) { handle->memory = mMemoryCreate(batch,size*sizeof(float),device); mPropertyFunction(tns,"device",mornMemoryDevice,handle->memory); } else mMemoryRedefine(handle->memory,batch,size*sizeof(float),device); void ***idx = malloc(batch*sizeof(void **)); for(int i=0;i<batch;i++) idx[i]=(void **)(&(handle->data[i])); mMemoryIndex(handle->memory,1,size*sizeof(float),idx,batch); free(idx); tns->data = handle->data; handle->size = size; tensor_redefine_end: for(int b=0;b<batch;b++) tns->data[b][channel*height*width]=1.0f; } float **mTensorBackup(MTensor *tns,int batch,int cn,int height,int width) { if(batch <=0) batch =tns->batch; if(cn <=0) cn =tns->channel; if(height<=0) height=tns->height; if(width <=0) width =tns->width; int size = cn*height*width; struct HandleTensorCreate *handle = (struct HandleTensorCreate *)(ObjHandle(tns,0)->handle); if(handle->backup_data!=NULL) mFree(handle->backup_data); handle->backup_data = (float **)mMalloc(batch*sizeof(float *)); if(handle->backup_memory == NULL) handle->backup_memory = mMemoryCreate(batch,size*sizeof(float),tns->device); else mMemoryRedefine(handle->backup_memory,batch,size*sizeof(float),tns->device); for(int i=0;i<batch;i++) handle->backup_data[i] = (float *)(handle->backup_memory->data[i]); return (handle->backup_data); } void MemCopy(void *dst,int dst_dev,void *src,int src_dev,int size); void mTensorCopy(MTensor *src,MTensor *dst,int device) { mException(INVALID_POINTER(src),EXIT,"invalid input source tensor"); mException(INVALID_POINTER(dst)&&(device<0),EXIT,"invalid input device"); if(device<0) device=dst->device; float **dst_data; int flag = (INVALID_POINTER(dst))||(dst==src); if(flag) {if(device==src->device){return;} dst_data=mTensorBackup(src,DFLT,DFLT,DFLT,DFLT);} else {if(device!=dst->device){mTensorRedefine(dst,DFLT,DFLT,DFLT,DFLT,NULL,device);} dst_data=dst->data;} // int size = src->channel*src->height*src->width; // for(int i=0;i<src->batch;i++) // MemCopy(dst_data[i],device,src->data[i],src->device,size*sizeof(float)); if(flag) mTensorRedefine(src,DFLT,DFLT,DFLT,DFLT,dst_data,device); } /* void mTensorAdd(MTensor *src1,MTensor *src2,MTensor *dst) { int i; mException((INVALID_TENSOR(src1)||INVALID_TENSOR(src2)),EXIT,"invalid input source"); int batch = src1->batch; mException((src2->batch!=batch)&&(src2->batch!=1),EXIT,"invalid input source"); mException((batch>1)&&(src2->batch==1)&&(dst==src2),EXIT,"invalid input"); int channel = src1->channel; int height = src1->height; int width = src1->width; mException((src2->channel!=channel)||(src2->height!=height)||(src2->width!=width),EXIT,"invalid input source"); int size = channel*height*width; if(dst==NULL) dst = src1; if((dst!=src1)&&(dst!=src2)) mTensorRedefine(dst,batch,channel,height,width,dst->data); for(int b=0;b<batch;b++) { float *data1 = src1->data[b]; float *data2 = (src2->batch>1)?src2->data[b]:src2->data[0]; float *data = dst ->data[b]; #pragma omp parallel for for(i=0;i<size;i++) data[i] = data1[i]+data2[i]; } } void mTensorSub(MTensor *src1,MTensor *src2,MTensor *dst) { int i; mException((INVALID_TENSOR(src1)||INVALID_TENSOR(src2)),EXIT,"invalid input source"); int batch = src1->batch; mException((src2->batch!=batch)&&(src2->batch!=1),EXIT,"invalid input source"); mException((batch>1)&&(src2->batch==1)&&(dst==src2),EXIT,"invalid input"); int channel = src1->channel; int height = src1->height; int width = src1->width; mException((src2->channel!=channel)||(src2->height!=height)||(src2->width!=width),EXIT,"invalid input source"); int size = channel*height*width; if(dst==NULL) dst = src1; if((dst!=src1)&&(dst!=src2)) mTensorRedefine(dst,batch,channel,height,width,dst->data); for(int b=0;b<batch;b++) { float *data1 = src1->data[b]; float *data2 = (src2->batch>1)?src2->data[b]:src2->data[0]; float *data = dst ->data[b]; #pragma omp parallel for for(i=0;i<size;i++) data[i] = data1[i]-data2[i]; } } void mTensorScalarMul(MTensor *src1,MTensor *src2,MTensor *dst) { int i; mException((INVALID_TENSOR(src1)||INVALID_TENSOR(src2)),EXIT,"invalid input source"); int batch = src1->batch; mException((src2->batch!=batch)&&(src2->batch!=1),EXIT,"invalid input source"); mException((batch>1)&&(src2->batch==1)&&(dst==src2),EXIT,"invalid input"); int channel = src1->channel; int height = src1->height; int width = src1->width; mException((src2->channel!=channel)||(src2->height!=height)||(src2->width!=width),EXIT,"invalid input source"); int size = channel*height*width; if(dst==NULL) dst = src1; if((dst!=src1)&&(dst!=src2)) mTensorRedefine(dst,batch,channel,height,width,dst->data); for(int b=0;b<batch;b++) { float *data1 = src1->data[b]; float *data2 = (src2->batch>1)?src2->data[b]:src2->data[0]; float *data = dst ->data[b]; #pragma omp parallel for for(i=0;i<size;i++) data[i] = data1[i]*data2[i]; } } void mTensorScalarDiv(MTensor *src1,MTensor *src2,MTensor *dst) { int i; mException((INVALID_TENSOR(src1)||INVALID_TENSOR(src2)),EXIT,"invalid input source"); int batch = src1->batch; mException((src2->batch!=batch)&&(src2->batch!=1),EXIT,"invalid input source"); mException((batch>1)&&(src2->batch==1)&&(dst==src2),EXIT,"invalid input"); int channel = src1->channel; int height = src1->height; int width = src1->width; mException((src2->channel!=channel)||(src2->height!=height)||(src2->width!=width),EXIT,"invalid input source"); int size = channel*height*width; if(dst==NULL) dst = src1; if((dst!=src1)&&(dst!=src2)) mTensorRedefine(dst,batch,channel,height,width,dst->data); for(int b=0;b<batch;b++) { float *data1 = src1->data[b]; float *data2 = (src2->batch>1)?src2->data[b]:src2->data[0]; float *data = dst ->data[b]; #pragma omp parallel for for(i=0;i<size;i++) data[i] = data1[i]/data2[i]; } } */ void mTensorOperate(MTensor *src,MTensor *dst, float (*func)(float)) { int i; mException(INVALID_TENSOR(src),EXIT,"invalid input source"); if(dst==NULL) dst = src; if(dst!=src ) mTensorRedefine(dst,src->batch,src->channel,src->height,src->width,dst->data); int size = src->channel*src->height*src->width; for(int b=0;b<src->batch;b++) { #pragma omp parallel for for(i=0;i<size;i++) dst->data[b][i] = func(src->data[b][i]); } }
GB_binop__plus_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__plus_fc64) // A.*B function (eWiseMult): GB (_AemultB_08__plus_fc64) // A.*B function (eWiseMult): GB (_AemultB_02__plus_fc64) // A.*B function (eWiseMult): GB (_AemultB_04__plus_fc64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__plus_fc64) // A*D function (colscale): GB (_AxD__plus_fc64) // D*A function (rowscale): GB (_DxB__plus_fc64) // C+=B function (dense accum): GB (_Cdense_accumB__plus_fc64) // C+=b function (dense accum): GB (_Cdense_accumb__plus_fc64) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__plus_fc64) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__plus_fc64) // C=scalar+B GB (_bind1st__plus_fc64) // C=scalar+B' GB (_bind1st_tran__plus_fc64) // C=A+scalar GB (_bind2nd__plus_fc64) // C=A'+scalar GB (_bind2nd_tran__plus_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // A pattern? 0 // B type: GxB_FC64_t // B pattern? 0 // BinaryOp: cij = GB_FC64_add (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ GxB_FC64_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ GxB_FC64_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_FC64_add (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_PLUS || GxB_NO_FC64 || GxB_NO_PLUS_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__plus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__plus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__plus_fc64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__plus_fc64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__plus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__plus_fc64) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__plus_fc64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; GxB_FC64_t alpha_scalar ; GxB_FC64_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ; beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__plus_fc64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__plus_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__plus_fc64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__plus_fc64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__plus_fc64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = GBX (Bx, p, false) ; Cx [p] = GB_FC64_add (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__plus_fc64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = GBX (Ax, p, false) ; Cx [p] = GB_FC64_add (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_add (x, aij) ; \ } GrB_Info GB (_bind1st_tran__plus_fc64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_FC64_add (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__plus_fc64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
vlisa_prewhitening.c
/* ** single subject lisa algorithm ** prewhitening ** ** G.Lohmann, MPI-KYB, 2018 */ #include "viaio/Vlib.h" #include "viaio/file.h" #include "viaio/mu.h" #include "viaio/option.h" #include "viaio/os.h" #include <viaio/VImage.h> #include <gsl/gsl_matrix.h> #include <gsl/gsl_vector.h> #include <gsl/gsl_cdf.h> #include <gsl/gsl_errno.h> #include <gsl/gsl_math.h> #include <gsl/gsl_rng.h> #include <gsl/gsl_randist.h> #include <gsl/gsl_histogram.h> #include <gsl/gsl_permutation.h> #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /*_OPENMP*/ typedef struct TrialStruct { int id; float onset; float duration; float height; } Trial; extern void VIsolatedVoxels(VImage src,float threshold); extern void VHistogram(gsl_histogram *histogram,VString filename); extern void VCheckImage(VImage src); extern void FDR(VImage src,VImage dest,gsl_histogram *nullhist,gsl_histogram *realhist,double); extern double ttest1(double *data1,int n); extern void ImageStats(VImage src,double *,double *,double *hmin,double *hmax); extern Trial *ReadDesign(VStringConst designfile,int *numtrials,int *nevents); extern gsl_matrix *VCreateDesign(int ntimesteps,int nevents,int deriv,VBoolean,gsl_matrix *); extern void VHemoModel(Trial *trial,int ntrials,int nevents,int,double,int,VBoolean,gsl_matrix *X,gsl_matrix *); extern Trial *CopyTrials(Trial *trial,int numtrials); extern void VWhiteGLM(gsl_matrix *Data,VImage map,gsl_matrix *X,gsl_vector *con,int numlags,VImage zmap); extern Trial *ConcatenateTrials(Trial **trial,int *numtrials,float *run_duration,int dlists,int sumtrials); extern double VImageVar(VImage src); extern void VImageCount(VImage src); extern void VBilateralFilter(VImage src,VImage,int radius,double var1,double var2,int); extern void VGetHistRange(VImage src,double *hmin,double *hmax); extern void VZScale(VImage src,float,float stddev); extern float VGetMode(VImage src); extern void GlobalMean(gsl_matrix *Data,gsl_matrix *covariates,int column); extern gsl_matrix *VReadCovariates(VString cfile,VBoolean normalize); extern VImage VoxelMap(VAttrList list); extern gsl_matrix *VReadImageData(VAttrList *list,int nlists); extern void VGetTimeInfos(VAttrList *list,int nlists,double *mtr,float *run_duration); extern void VRowNormalize(gsl_matrix *Data); extern void CheckTrialLabels(Trial *trial,int numtrials); extern void HistoUpdate(VImage src1,gsl_histogram *hist); extern void PlotDesign(gsl_matrix *X,double tr,VString filename); void XCheckImage(VImage src,char *filename) { VAttrList out_list = VCreateAttrList(); VAppendAttr(out_list,"image",NULL,VImageRepn,src); FILE *out_file = fopen(filename,"w"); VWriteFile (out_file, out_list); } /* shuffle each run separately to ensure exchangebility, concatenate individual permtables */ int **genperm(gsl_rng *rx,int *numtrials,int sumtrials,int dlists,int numperm) { int i,j,k; int **permtable = (int **) VCalloc(numperm,sizeof(int *)); gsl_permutation **perm = (gsl_permutation **) VCalloc(dlists,sizeof(gsl_permutation *)); for (k=0; k<dlists; k++) { perm[k] = gsl_permutation_alloc((size_t)numtrials[k]); gsl_permutation_init (perm[k]); } for (i = 0; i < numperm; i++) { permtable[i] = (int *) VCalloc(sumtrials,sizeof(int)); int jj=0; for (k=0; k<dlists; k++) { gsl_ran_shuffle (rx, perm[k]->data,numtrials[k],sizeof(size_t)); for (j=0; j<numtrials[k]; j++) { permtable[i][j+jj] = perm[k]->data[j] + jj; } jj += numtrials[k]; } } for (k=0; k<dlists; k++) { gsl_permutation_free(perm[k]); } return permtable; } VDictEntry HemoDict[] = { { "gamma_0", 0 }, { "gamma_1", 1 }, { "gamma_2", 2 }, { "gauss", 3 }, { NULL } }; int main (int argc, char *argv[]) { static VArgVector in_files; static VArgVector des_files; static VString cova_filename=""; static VString out_filename=""; static VString plot_filename=""; static VShort hemomodel = 0; static VBoolean firstcol = TRUE; static VShort numlags = 1; static VArgVector contrast; static VFloat alpha = 0.05; static VShort radius = 2; static VFloat rvar = 2.0; static VFloat svar = 2.0; static VShort numiter = 2; static VBoolean demean = TRUE; static VBoolean globalmean = FALSE; static VShort numperm = 5000; static VLong seed = 99402622; static VBoolean cleanup = TRUE; static VShort nproc = 0; static VOptionDescRec options[] = { {"in", VStringRepn, 0, & in_files, VRequiredOpt, NULL,"Input files" }, {"out", VStringRepn, 1, & out_filename, VRequiredOpt, NULL,"Output file" }, {"design", VStringRepn, 0, & des_files, VRequiredOpt, NULL,"Design files" }, {"contrast", VFloatRepn, 0, (VPointer) &contrast, VRequiredOpt, NULL, "Contrast vector"}, {"nuisance", VStringRepn, 1, & cova_filename, VOptionalOpt, NULL,"Nuisance regressors" }, {"demean",VBooleanRepn,1,(VPointer) &demean,VOptionalOpt,NULL,"Whether to subtract mean in nuisance regressors"}, {"plotdesign", VStringRepn, 1, & plot_filename, VOptionalOpt, NULL,"Filename for plotting design matrix X" }, {"hemo", VShortRepn, 1, (VPointer) &hemomodel, VOptionalOpt, HemoDict,"Hemodynamic model" }, {"col1", VBooleanRepn, 1, (VPointer) &firstcol, VOptionalOpt, NULL,"Whether to add a constant first column" }, {"alpha",VFloatRepn,1,(VPointer) &alpha,VOptionalOpt,NULL,"FDR significance level"}, {"perm",VShortRepn,1,(VPointer) &numperm,VOptionalOpt,NULL,"Number of permutations"}, {"seed",VLongRepn,1,(VPointer) &seed,VOptionalOpt,NULL,"Seed for random number generation"}, {"order", VShortRepn, 1, &numlags, VOptionalOpt, NULL,"Order of AR model" }, {"radius",VShortRepn,1,(VPointer) &radius,VOptionalOpt,NULL,"Bilateral parameter (radius in voxels)"}, {"rvar",VFloatRepn,1,(VPointer) &rvar,VOptionalOpt,NULL,"Bilateral parameter (radiometric)"}, {"svar",VFloatRepn,1,(VPointer) &svar,VOptionalOpt,NULL,"Bilateral parameter (spatial)"}, {"filteriterations",VShortRepn,1,(VPointer) &numiter,VOptionalOpt,NULL,"Bilateral parameter (number of iterations)"}, {"cleanup",VBooleanRepn,1,(VPointer) &cleanup,VOptionalOpt,NULL,"Whether to remove isloated voxels"}, {"j",VShortRepn,1,(VPointer) &nproc,VOptionalOpt,NULL,"Number of processors to use, '0' to use all"}, }; FILE *fp=NULL; VString in_filename; VAttrList out_list=NULL,geolist=NULL; int i; char *prg_name=GetLipsiaName("vlisa_prewhitening"); fprintf (stderr, "%s\n", prg_name); /* parse command line */ if (! VParseCommand (VNumber (options), options, & argc, argv)) { VReportUsage (argv[0], VNumber (options), options, NULL); exit (EXIT_FAILURE); } if (argc > 1) { VReportBadArgs (argc, argv); exit (EXIT_FAILURE); } /* omp-stuff */ #ifdef _OPENMP int num_procs=omp_get_num_procs(); if (nproc > 0 && nproc < num_procs) num_procs = nproc; fprintf(stderr," using %d cores\n",(int)num_procs); omp_set_num_threads(num_procs); #endif /* _OPENMP */ /* read functional image data */ int nlists = in_files.number; if (nlists < 1) VError(" no input"); VAttrList *list = (VAttrList *) VCalloc(nlists,sizeof(VAttrList)); for (i=0; i<nlists; i++) { in_filename = ((VString *) in_files.vector)[i]; fprintf(stderr," %3d: %s\n",i,in_filename); list[i] = VReadAttrList(in_filename,0L,TRUE,FALSE); if (geolist == NULL) { geolist = VGetGeoInfo(list[i]); double *DGeo = VGetGeoDim(geolist,NULL); if (fabs(DGeo[0]-4.0) > 0.01) VError(" Input files must be 4D (not 3D)"); } } /* get number of design files */ int dlists = des_files.number; if (dlists != nlists) { VError(" number of input functional files (%d) and design files (%d) do not match",nlists,dlists); } /* read data and voxel map */ double tr=0; float *run_duration = (float *) VCalloc(nlists,sizeof(float)); VGetTimeInfos(list,nlists,&tr,run_duration); gsl_matrix *Data = VReadImageData(list,nlists); VImage map = VoxelMap(list[0]); int nslices = VPixel(map,0,3,0,VShort); int nrows = VPixel(map,0,3,1,VShort); int ncols = VPixel(map,0,3,2,VShort); int ntimesteps = Data->size2; /* additional regressors, no task labels, not included in permutations */ gsl_matrix *ctmp1=NULL; gsl_matrix *ctmp2=NULL; gsl_matrix *covariates=NULL; int cdim = 1; int nuisance_dim=0; if (strlen(cova_filename) > 1) { ctmp1 = VReadCovariates(cova_filename,demean); if (ctmp1->size1 != Data->size2) VError(" num timesteps in covariate file not consistent with data"); nuisance_dim = ctmp1->size2; } if (globalmean) { if (ctmp1 != NULL) cdim = ctmp1->size2+1; ctmp2 = gsl_matrix_calloc(Data->size2,cdim); GlobalMean(Data,ctmp2,(int)(cdim-1)); } if (ctmp1 != NULL && ctmp2 == NULL) covariates = ctmp1; if (ctmp2 != NULL) covariates = ctmp2; /* design files with task labels */ Trial **trial = (Trial **) VCalloc(dlists,sizeof(Trial *)); int *numtrials = (int *) VCalloc(dlists,sizeof(int *)); int nevents = 0; int sumtrials = 0; for (i=0; i<dlists; i++) { in_filename = ((VString *) des_files.vector)[i]; fprintf(stderr," %3d: %s\n",i,in_filename); int kk=0,jj=0; trial[i] = ReadDesign(in_filename,&kk,&jj); numtrials[i] = kk; if (jj > nevents) nevents = jj; sumtrials += numtrials[i]; } fprintf(stderr," Number of trials: %d, number of event types: %d\n",sumtrials,nevents); Trial *alltrials = ConcatenateTrials(trial,numtrials,run_duration,nlists,sumtrials); CheckTrialLabels(alltrials,sumtrials); /* read contrast vector */ gsl_vector *cont = gsl_vector_alloc(contrast.number + nuisance_dim); gsl_vector_set_zero(cont); for (i=0; i < contrast.number; i++) { double u = ((VFloat *)contrast.vector)[i]; gsl_vector_set(cont,i,u); } /* alloc initial design matrix X */ gsl_matrix *X = VCreateDesign(ntimesteps,nevents,(int)hemomodel,firstcol,covariates); if (X->size2 != cont->size) VError(" dimension of contrast vector (%ld) does not match design matrix (%ld)",cont->size,X->size2); fprintf(stderr," Design file dimensions: %lu x %lu\n",X->size1,X->size2); /* ini random permutations */ gsl_rng_env_setup(); const gsl_rng_type *T = gsl_rng_default; gsl_rng *rx = gsl_rng_alloc(T); gsl_rng_set(rx,(unsigned long int)seed); int **permtable = genperm(rx,numtrials,sumtrials,dlists,(int)numperm); /* estimate null variance based on first 30 permutations */ float stddev=1.0; int nperm=0,tstperm = 30; if (numperm > 0) { if (tstperm > numperm) tstperm = numperm; fprintf(stderr," Initialization..."); double varsum=0,nx=0; #pragma omp parallel for shared(Data,X,cont,numlags,permtable) schedule(dynamic) for (nperm = 0; nperm < tstperm; nperm++) { VImage zmap = VCreateImage(nslices,nrows,ncols,VFloatRepn); Trial *permtrials = CopyTrials(alltrials,sumtrials); int j=0; for (j=0; j<sumtrials; j++) { int j0 = permtable[nperm][j]; permtrials[j].id = alltrials[j0].id; } gsl_matrix *X = VCreateDesign(ntimesteps,nevents,(int)hemomodel,firstcol,covariates); VHemoModel(permtrials,sumtrials,nevents,ntimesteps,tr,(int)hemomodel,firstcol,X,covariates); VWhiteGLM(Data,map,X,cont,(int) numlags,zmap); #pragma omp critical { varsum += VImageVar(zmap); nx++; } VDestroyImage(zmap); } double meanvar = varsum/nx; stddev = sqrt(meanvar); fprintf(stderr,"done.\n"); } /* no permutation */ VImage zmap1 = VCreateImage(nslices,nrows,ncols,VFloatRepn); VCopyImageAttrs (map,zmap1); VImage dst1 = VCreateImageLike (zmap1); VHemoModel(alltrials,sumtrials,nevents,ntimesteps,tr,(int)hemomodel,firstcol,X,covariates); if (strlen(plot_filename) > 0) PlotDesign(X,tr,plot_filename); VWhiteGLM(Data,map,X,cont,(int) numlags,zmap1); if (numperm == 0) { double z = VImageVar(zmap1); stddev = sqrt(z); /* update stddev */ } float mode=0; if (numperm > 0 || numiter > 0) VZScale(zmap1,mode,stddev); VBilateralFilter(zmap1,dst1,(int)radius,(double)rvar,(double)svar,(int)numiter); /* ini histograms */ double hmin=0,hmax=0; VGetHistRange(dst1,&hmin,&hmax); size_t nbins = 20000; gsl_histogram *hist0 = gsl_histogram_alloc (nbins); gsl_histogram_set_ranges_uniform (hist0,hmin,hmax); gsl_histogram *histz = gsl_histogram_alloc (nbins); gsl_histogram_set_ranges_uniform (histz,hmin,hmax); HistoUpdate(dst1,histz); /* random permutations */ #pragma omp parallel for shared(Data,X,cont,numlags) schedule(dynamic) for (nperm = 0; nperm < numperm; nperm++) { if (nperm%5 == 0) fprintf(stderr," perm %4d of %d\r",nperm,(int)numperm); /* randomly shuffle trial labels */ Trial *permtrials = CopyTrials(alltrials,sumtrials); int j=0; for (j=0; j<sumtrials; j++) { int j0 = permtable[nperm][j]; permtrials[j].id = alltrials[j0].id; } /* hemodynamic model */ gsl_matrix *X = VCreateDesign(ntimesteps,nevents,(int)hemomodel,firstcol,covariates); VHemoModel(permtrials,sumtrials,nevents,ntimesteps,tr,(int)hemomodel,firstcol,X,covariates); /* GLM */ VImage zmap = VCreateImageLike(zmap1); VWhiteGLM(Data,map,X,cont,(int)numlags,zmap); VZScale(zmap,mode,stddev); gsl_matrix_free(X); VFree(permtrials); /* bilateral filter */ VImage dst = VCreateImageLike (zmap); VBilateralFilter(zmap,dst,(int)radius,(double)rvar,(double)svar,(int)numiter); #pragma omp critical { HistoUpdate(dst,hist0); } VDestroyImage(dst); VDestroyImage(zmap); } /* apply fdr */ VImage fdrimage = VCopyImage (dst1,NULL,VAllBands); if (numperm > 0) { FDR(dst1,fdrimage,hist0,histz,(double)alpha); if (cleanup && alpha < 1.0) { VIsolatedVoxels(fdrimage,(float)(1.0-alpha)); } } VImageCount(fdrimage); /* ** output */ out_list = VCreateAttrList (); VHistory(VNumber(options),options,prg_name,&list[0],&out_list); /* update geoinfo, 4D to 3D */ if (geolist != NULL) { double *D = VGetGeoDim(geolist,NULL); D[0] = 3; D[4] = 1; VSetGeoDim(geolist,D); } VSetGeoInfo(geolist,out_list); VAppendAttr (out_list,"image",NULL,VImageRepn,fdrimage); fp = VOpenOutputFile (out_filename, TRUE); if (! VWriteFile (fp, out_list)) exit (1); fclose(fp); fprintf (stderr, "\n%s: done.\n", argv[0]); exit(0); }
traffic_parallel.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> #include <time.h> // Fundamental simulation unit typedef struct cars { int x; // x position on the road int y; // Lane number on the road int v; // Current velocity int v_d; // Desired velocity int lane_change_now; // Did lane change this iteration int lane_change_prev; // Did lane change last iteration } car; // Simulation parameters typedef struct { int LANES; // Number of lanes (currently only 2 possible) int L; // Length of the road int N; // Number of cars int MAX_ITER; // Iterations int v_max; // Maximum allowed velocity double p_change; // Lane change probability double p_brake; // Random braking probability } parameters; void initialise(car *cars, int *grid, int *new_grid, parameters *sim, int seed, unsigned short **xsubj, int threads); void update(car *cars, int *grid, int *new_grid, parameters *sim, unsigned short **xsubi); int aheadThisLane(car *cars, int car_index, parameters *sim); int aheadOtherLane(car *cars, int car_index, parameters *sim); int behindOtherLane(car *cars, int car_index, parameters *sim); int gapAhead(car *cars, int car_index, int *grid, parameters *sim); int gapAheadOther(car *cars, int car_index, int *grid, parameters *sim); int gapBehindOther(car *cars, int car_index, int *grid, parameters *sim); void printGrid(car *cars, int *grid, int length, parameters *sim); int mod(int a, int n); int main(int argc, char **argv) { if (argc < 5) { printf("Usage: ./e_traffic_parallel density p_change p_brake seed\n"); printf("density: float that determines number of cars.\n"); printf("p_change: float, probability of lane change.\n"); printf("p_brake: float, probability of a car randomly braking.\n"); printf("seed: integer used to seed the PRNG.\n"); exit(99); } parameters sim; sim.LANES = 2; sim.L = 133333; sim.N = (int) sim.LANES * sim.L * atof(argv[1]); sim.MAX_ITER = 5000; sim.v_max = 5; sim.p_change = atof(argv[2]); sim.p_brake = atof(argv[3]); int seed = atoi(argv[4]); car *cars = malloc(sizeof *cars * sim.N); int *grid = malloc(sizeof *grid * sim.LANES * sim.L); int *new_grid = malloc(sizeof *new_grid * sim.LANES * sim.L); int threads = 0; #pragma omp parallel { #pragma omp single { threads = omp_get_num_threads(); } } unsigned short **xsubj = malloc(threads * sizeof (unsigned short*)); for (int i = 0; i < threads; i++) { xsubj[i] = malloc(3 * sizeof (unsigned short)); } if (cars == NULL || grid == NULL || new_grid == NULL || xsubj == NULL) { printf("Could not allocate memory!!!\n"); exit(1); } // Statistical quantities of interest double density = (double) sim.N / (double) (sim.LANES * sim.L); // Number of vehicles per grid site double flow = 0; // Average velocity on the grid double lane_changes = 0; // Average number of lane changes double ping_pong_changes = 0; // Average number of lane changes on consicutive time steps initialise(cars, grid, new_grid, &sim, seed, xsubj, threads); printf("Initialisation complete.\n\n\n"); // struct timespec sleeper = {0, 400000000}; // struct timespec remain; // Allow 1000 time steps to pass to reach a steady state for (int i = 0; i < 1000; i++) { update(cars, grid, new_grid, &sim, xsubj); // printGrid(cars, grid, 100, sim); // nanosleep(&sleeper, &remain); } for (int i = 0; i < sim.MAX_ITER; i++) { update(cars, grid, new_grid, &sim, xsubj); #pragma omp parallel for reduction(+:lane_changes, ping_pong_changes) for (int j = 0; j < sim.N; j++) { lane_changes += cars[j].lane_change_now; if (cars[j].lane_change_now * cars[j].lane_change_prev == 1) { ping_pong_changes += 1; } } if (i % 5 == 0) { #pragma omp parallel for reduction(+:flow) for (int j = 0; j < sim.N; j++) { flow += cars[j].v; } } } flow /= (double) (sim.L * sim.MAX_ITER); lane_changes /= (double) (sim.L * sim.MAX_ITER); ping_pong_changes /= (double) (sim.L * sim.MAX_ITER); printf("Average car density: %G\n", density); printf("Average flow: %G\n", flow); printf("Average lane changes: %G\n", lane_changes); printf("Lane changes per car: %f\n", lane_changes / density); printf("Average ping pong lane changes: %G\n", ping_pong_changes); free(cars); free(grid); free(new_grid); for (int i = 0; i < threads; i++) { free(xsubj[i]); } free(xsubj); return 0; } /** * Initialises the cars array and the grids. * @param cars An array of struct car. * @param grid An integer array that stores the positions of cars on the road. * @param new_grid An integer array used to store the newly calculated positions. * @param sim A pointer to the simulation parameters of type struct parameters. * @param seed An integer used to seed the PRNGs. * @param xsubj An unsigned short integer double pointer to an array of size (threads * 3). Stores the states of PRNGs. * @param threads An integer, number of threads being used. */ void initialise(car *cars, int *grid, int *new_grid, parameters *sim, int seed, unsigned short **xsubj, int threads) { int x, y; #pragma omp parallel for for (int i = 0; i < sim->LANES; i++) { for (int j = 0; j < sim->L; j++) { grid[sim->L * i + j] = -1; new_grid[sim->L * i + j] = -1; } } unsigned short xsubi[3] = { seed + 1, seed + 2, seed + 3 }; for (int i = 0; i < sim->N; i++) { x = nrand48(xsubi) % sim->L; y = nrand48(xsubi) % sim->LANES; while (grid[sim->L * y + x] != -1) { x = nrand48(xsubi) % sim->L; y = nrand48(xsubi) % sim->LANES; } grid[sim->L * y + x] = i; cars[i].x = x; cars[i].y = y; cars[i].v = 0; cars[i].v_d = sim->v_max; cars[i].lane_change_now = 0; } for (int i = 0; i < threads; i++) { for (int j = 0; j < 3; j++) { xsubj[i][j] = (unsigned short) nrand48(xsubi); } } } /** * Updates the state of the simulation. * @param cars An array of struct car. * @param grid An integer array that stores the positions of cars on the road. * @param new_grid An integer array used to store the newly calculated positions. * @param sim A pointer to the simulation parameters of type struct parameters. * @param xsubi An unsigned short integer double pointer to an array of size (threads * 3). Stores the states of PRNGs. */ void update(car *cars, int *grid, int *new_grid, parameters *sim, unsigned short **xsubi) { #pragma omp parallel for for (int i = 0; i < sim->LANES; i++) { for (int j = 0; j < sim->L; j++) { new_grid[sim->L * i + j] = -1; } } #pragma omp parallel { #pragma omp for for (int i = 0; i < sim->N; i++) { cars[i].lane_change_prev = cars[i].lane_change_now; if ( erand48(xsubi[omp_get_thread_num()]) < sim->p_change && gapAhead(cars, i, grid, sim) < aheadThisLane(cars, i, sim) && gapAheadOther(cars, i, grid, sim) > aheadOtherLane(cars, i, sim) && gapBehindOther(cars, i, grid, sim) > behindOtherLane(cars, i, sim) ) { cars[i].y = (cars[i].y + 1) % sim->LANES; cars[i].lane_change_now = 1; } else { cars[i].lane_change_now = 0; } new_grid[sim->L * cars[i].y + cars[i].x] = i; } int gap; #pragma omp for private(gap) for (int i = 0; i < sim->N; i++) { gap = gapAhead(cars, i, new_grid, sim); if (cars[i].v < cars[i].v_d) { cars[i].v++; } if (cars[i].v > gap) { cars[i].v = gap; } if (cars[i].v > 0 && erand48(xsubi[omp_get_thread_num()]) < sim->p_brake) { cars[i].v--; } } #pragma omp for for (int i = 0; i < sim->LANES; i++) { for (int j = 0; j < sim->L; j++) { grid[sim->L * i + j] = -1; } } #pragma omp for for (int i = 0; i < sim->N; i++) { cars[i].x = (cars[i].x + cars[i].v) % sim->L; grid[sim->L * cars[i].y + cars[i].x] = i; } } } /** * Gives the look ahead parameter for the same lane. * @param cars An array of struct car. * @param car_index Integer index of the car in the cars array. * @param sim A pointer to the simulation parameters of type struct parameters. * @return integer look ahead parameter. */ int aheadThisLane(car *cars, int car_index, parameters *sim) { return cars[car_index].v + 1; } /** * Gives the look ahead parameter for the other lane. * @param cars An array of struct car. * @param car_index Integer index of the car in the cars array. * @param sim A pointer to the simulation parameters of type struct parameters. * @return integer look ahead parameter. */ int aheadOtherLane(car *cars, int car_index, parameters *sim) { return cars[car_index].v + 1; } /** * Gives the look behind parameter for the other lane. * @param cars An array of struct car. * @param car_index Integer index of the car in the cars array. * @param sim A pointer to the simulation parameters of type struct parameters. * @return integer look ahead parameter. */ int behindOtherLane(car *cars, int car_index, parameters *sim) { return sim->v_max; } /** * Gives the distance to the car immediately in front of this car in the same lane. * @param cars An array of struct car. * @param car_index Integer index of the car in the cars array. * @param grid An integer array that stores the positions of cars on the road. * @param sim A pointer to the simulation parameters of type struct parameters. * @return integer distance */ int gapAhead(car *cars, int car_index, int *grid, parameters *sim) { int lane = cars[car_index].y; int x = (cars[car_index].x + 1) % sim->L; int gap = 0; while (grid[sim->L * lane + x] == -1) { x = (x + 1) % sim->L; gap++; } return gap; } /** * Gives the distance to the car immediately in front of this car in the other lane. * Returns -1 if the adjacent position is occupied by a car. * @param cars An array of struct car. * @param car_index Integer index of the car in the cars array. * @param grid An integer array that stores the positions of cars on the road. * @param sim A pointer to the simulation parameters of type struct parameters. * @return integer distance */ int gapAheadOther(car *cars, int car_index, int *grid, parameters *sim) { int lane = (cars[car_index].y + 1) % sim->LANES; int x = cars[car_index].x; int gap = 0; if (grid[sim->L * lane + x] != -1) { return -1; } x = (x + 1) % sim->L; while (grid[sim->L * lane + x] == -1) { x = (x + 1) % sim->L; gap++; } return gap; } /** * Gives the distance to the car immediately behind this car in the other lane. * Returns -1 if the adjacent position is occupied by a car. * @param cars An array of struct car. * @param car_index Integer index of the car in the cars array. * @param grid An integer array that stores the positions of cars on the road. * @param sim A pointer to the simulation parameters of type struct parameters. * @return integer distance */ int gapBehindOther(car *cars, int car_index, int *grid, parameters *sim) { int lane = (cars[car_index].y + 1) % sim->LANES; int x = cars[car_index].x; int gap = 0; if (grid[sim->L * lane + x] != -1) { return -1; } x = (x - 1) % sim->L; while (grid[sim->L * lane + x] == -1) { x = mod((x - 1), sim->L); gap++; } return gap; } /** * Prints out the road on the screen. * @param cars An array of struct car. * @param grid An integer array that stores the positions of cars on the road. * @param length An integer that determines how many positions are printed. * @param sim A pointer to the simulation parameters of type struct parameters. */ void printGrid(car *cars, int *grid, int length, parameters *sim) { printf("\033[2A\033[K"); for (int i = 0; i < sim->LANES; i++) { for (int j = 0; j < length; j++) { if (grid[sim->L * i + j] != -1) { printf("\033[1;32m%d\033[0m", cars[grid[sim->L * i + j]].lane_change_now); } else { printf("\033[2;35m\u2588\033[0m"); } } printf("\n\033[K"); } } /** * The mathematical modulo operation. * This is consistent with the Euclidean algorithm so that the output is always non negative. * @param a The integer dividend. * @param n The integer divisor. * @return b An integer such that a ≡ b (mod n) */ int mod(int a, int n) { return a - n * (int) floor((double) a / (double) n); }
debug-1.c
/* PR debug/36617 */ /* { dg-do run } */ /* { dg-options "-g -fopenmp -O0" } */ int f1 (void) { int v1i, v1j, v1k, v1l = 0; v1i = 6; v1j = 8; #pragma omp parallel private (v1k) firstprivate (v1j) shared (v1i) reduction (+:v1l) { v1k = v1i + v1j; { int v1m = 1; v1l = v1m; } } return v1l; } int v2k = 9; int f2 (void) { int v2i = 6, v2j = 7; #pragma omp single private (v2i) firstprivate (v2k) { int v2l = v2j + v2k; v2i = 8; v2k = 10; v2j = v2l + v2i; } return v2i + v2j; } int f3 (void) { int v3i = 6, v3j = 7, v3k = 9; #pragma omp parallel { #pragma omp master v3i++; #pragma omp single private (v3i) firstprivate (v3k) { int v3l = v3j + v3k; v3i = 8; v3k = 10; v3j = v3l + v3i; } #pragma omp atomic v3k++; } return v3i + v3j; } int v4k = 9, v4l = 0; int f4 (void) { int v4i = 6, v4j = 7, v4n = 0; #pragma omp sections private (v4i) firstprivate (v4k) reduction (+:v4l) { #pragma omp section { int v4m = v4j + v4k; v4i = 8; v4k = 10; v4l++; v4n = v4m + v4i; } #pragma omp section { int v4o = v4j + v4k; v4i = 10; v4k = 11; v4l++; } } return v4i + v4j + v4l + v4n; } int f5 (void) { int v5i = 6, v5j = 7, v5k = 9, v5l = 0, v5n = 0, v5p = 0; #pragma omp parallel { #pragma omp master v5p++; #pragma omp sections private (v5i) firstprivate (v5k) reduction (+:v5l) { #pragma omp section { int v5m = v5j + v5k; v5i = 8; v5k = 10; v5l++; v5n = v5m + v5i; } #pragma omp section { int v5o = v5j + v5k; v5i = 10; v5k = 11; v5l++; } } } return v5i + v5j + v5l + v5n + v5p; } int v6k = 9, v6l = 0; int f6 (void) { int v6i = 6, v6j = 7, v6n = 0; #pragma omp for private (v6i) firstprivate (v6k) reduction (+:v6l) for (v6n = 0; v6n < 3; v6n++) { int v6m = v6j + v6k; v6i = 8; v6l++; } return v6i + v6j + v6k + v6l + v6n; } int f7 (void) { int v7i = 6, v7j = 7, v7k = 9, v7l = 0, v7n = 0, v7o = 1; #pragma omp parallel { #pragma omp master v7o++; #pragma omp for private (v7i) firstprivate (v7k) reduction (+:v7l) for (v7n = 0; v7n < 3; v7n++) { int v7m = v7j + v7k; v7i = 8; v7l++; } } return v7i + v7j + v7k + v7l + v7n; } int main (void) { f1 (); f2 (); f3 (); f4 (); f5 (); f6 (); f7 (); return 0; }
demo.c
// openmp program that solves a random system of up to 64 eqs in up to 64 vars. #include <assert.h> #include <sys/time.h> #include <inttypes.h> #include <stdio.h> #include <stdlib.h> #include <math.h> #include <err.h> #include <omp.h> #include "feslite.h" /* Try to solve a large system as fast as possible. */ int n = 45; static inline int idxq(int i, int j) { return j * (j - 1) / 2 + i; } u32 eval32(int n, const u32 * Fq, const u32 * Fl, int stride, u32 x) { // first expand the values of the variables from `x` u32 v[32]; for (int k = 0; k < n; k++) { v[k] = (x & 0x0001) ? 0xffffffff : 0x00000000; x >>= 1; } u32 y = Fl[0]; for (int idx_0 = 0; idx_0 < n; idx_0++) { // computes the contribution of degree-1 terms u32 v_0 = v[idx_0]; u32 l = Fl[stride * (1 + idx_0)]; // FIXME : get rid of this multiplication y ^= l & v_0; for (int idx_1 = 0; idx_1 < idx_0; idx_1++) { // computes the contribution of degree-2 terms u32 v_1 = v_0 & v[idx_1]; u32 q = Fq[idxq(idx_1, idx_0)]; y ^= q & v_1; } } return y; } u64 eval64(int n, const u64 * Q, const u64 * L, u64 x) { u64 v[64]; for (int k = 0; k < n; k++) { v[k] = (x & 1) ? 0xffffffffffffffffull : 0; x >>= 1ull; } assert(x == 0); u64 y = L[0]; for (int i = 0; i < n; i++) { // computes the contribution of degree-1 terms y ^= L[1 + i] & v[i]; for (int j = 0; j < i; j++) y ^= Q[idxq(j, i)] & v[i] & v[j]; } return y; } u64 rand64() { return ((u64) lrand48()) ^ (((u64) lrand48()) << 32ull); } u32 Fq[2016]; u64 Fq_start[2016]; u64 Fl_start[65]; #define MAX_BATCH_SIZE 64 struct bundle_t { int i; u32 prefixes[MAX_BATCH_SIZE]; u32 Fl[]; }; int m; struct bundle_t *current_bundle; int in_flight, created, solved; void fresh_bundle() { current_bundle = malloc(sizeof(struct bundle_t) + 33*m*sizeof(u32)); if (current_bundle == NULL) err(1, "impossible to allocate new bundle"); current_bundle->i = 0; #pragma omp atomic in_flight++; } void process_bundle(struct bundle_t *ready_bundle) { /* solve ready bundle */ int count = 256; u32 buffer[count * m]; int size[m]; feslite_solve(32, m, Fq, ready_bundle->Fl, count, buffer, size); // check against remaining equations, print for (int i = 0; i < m; i++) for (int j = 0; j < size[i]; j++) { u32 x = buffer[count*i + j]; u32 y = eval32(32, Fq, ready_bundle->Fl + i, m, x); assert(y == 0); u64 p = ready_bundle->prefixes[i]; u64 u = x ^ (p << 32); u64 v = eval64(n, Fq_start, Fl_start, u); if (v == 0) printf("\nfound %016" PRIx64 "\n", u); } /* free ready bundle */ free(ready_bundle); #pragma omp atomic solved += m; #pragma omp atomic in_flight--; #pragma omp critical { printf("\rcreated: %d\t Solved: %d\t In-flight: %d ", created, solved, in_flight); fflush(stdout); } } /* push a system to the current bundle */ void push(const u32 * Fl, u32 prefix) { created++; /* copy to current bundle */ current_bundle->prefixes[current_bundle->i] = prefix; for (int j = 0; j < 33; j++) current_bundle->Fl[m *j + current_bundle->i] = Fl[j]; current_bundle->i += 1; /* bundle full? */ if (current_bundle->i == m) { /* prepare new bundle */ struct bundle_t *ready_bundle = current_bundle; fresh_bundle(); #pragma omp task process_bundle(ready_bundle); } } void specialize(int n, const u32 * Fl, u32 prefix) { if (n == 32) { push(Fl, prefix); return; } /* specialize last variable to zero : do nothing! */ specialize(n-1, Fl, prefix << 1); /* specialize last variable to one */ u32 Fl_[n]; for (int i = 0; i < n-1; i++) Fl_[i + 1] = Fl[i + 1] ^ Fq[idxq(i, n-1)]; Fl_[0] = Fl[0] ^ Fl[n]; specialize(n-1, Fl_, (prefix << 1) ^ 1); } int main(int argc, char **argv) { if (argc > 1) n = atoi(argv[1]); m = feslite_preferred_batch_size(); printf("n = %d\n", n); int kernel = feslite_default_kernel(); const char *name = feslite_kernel_name(kernel); printf("Using kernel %s, %d lane(s)...\n", name, m); srand48(1337); /* initalize a random system */ int N = idxq(0, n); for (int i = 0; i < N; i++) Fq_start[i] = rand64(); for (int i = 0; i < n+1; i++) Fl_start[i] = rand64(); u64 x = rand64() & ((1ull << n) - 1); /* designated solution */ Fl_start[0] ^= eval64(n, Fq_start, Fl_start, x); assert(0 == eval64(n, Fq_start, Fl_start, x)); printf("Planted: %016" PRIx64 "\n", x); /* create the truncated 32 bits version */ u32 Fl[65]; for (int i = 0; i < N; i++) Fq[i] = Fq_start[i] & 0xffffffff; for (int i = 0; i < n+1; i++) Fl[i] = Fl_start[i] & 0xffffffff; fresh_bundle(); double start_wt = omp_get_wtime(); #pragma omp parallel #pragma omp single specialize(n, Fl, 0); double stop_wt = omp_get_wtime(); double seconds = stop_wt - start_wt; double rate = n - log2(seconds); printf("\t---> %.2f s\n", seconds); printf("\t---> 2^%.2f candidate/s\n", rate); return EXIT_SUCCESS; }
knapsack.c
/**********************************************************************************************/ /* This program is part of the Barcelona OpenMP Tasks Suite */ /* Copyright (C) 2009 Barcelona Supercomputing Center - Centro Nacional de Supercomputacion */ /* Copyright (C) 2009 Universitat Politecnica de Catalunya */ /* */ /* This program is free software; you can redistribute it and/or modify */ /* it under the terms of the GNU General Public License as published by */ /* the Free Software Foundation; either version 2 of the License, or */ /* (at your option) any later version. */ /* */ /* This program is distributed in the hope that it will be useful, */ /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ /* GNU General Public License for more details. */ /* */ /* You should have received a copy of the GNU General Public License */ /* along with this program; if not, write to the Free Software */ /* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ /**********************************************************************************************/ /* * Original code from the Cilk project * * Copyright (c) 2000 Massachusetts Institute of Technology * Copyright (c) 2000 Matteo Frigo */ #include <stdio.h> #include <stdlib.h> #include <limits.h> #include <string.h> #include "app-desc.h" #include "bots.h" int best_so_far; int number_of_tasks; #pragma omp threadprivate(number_of_tasks) int compare(struct item *a, struct item *b) { double c = ((double) a->value / a->weight) - ((double) b->value / b->weight); if (c > 0) return -1; if (c < 0) return 1; return 0; } int read_input(const char *filename, struct item *items, int *capacity, int *n) { int i; FILE *f; if (filename == NULL) filename = "\0"; f = fopen(filename, "r"); if (f == NULL) { fprintf(stderr, "open_input(\"%s\") failed\n", filename); return -1; } /* format of the input: #items capacity value1 weight1 ... */ fscanf(f, "%d", n); fscanf(f, "%d", capacity); for (i = 0; i < *n; ++i) fscanf(f, "%d %d", &items[i].value, &items[i].weight); fclose(f); /* sort the items on decreasing order of value/weight */ /* cilk2c is fascist in dealing with pointers, whence the ugly cast */ qsort(items, *n, sizeof(struct item), (int (*)(const void *, const void *)) compare); return 0; } /* * return the optimal solution for n items (first is e) and * capacity c. Value so far is v. */ #if defined(IF_CUTOFF) void knapsack_par(struct item *e, int c, int n, int v, int *sol, int l) { int with, without, best; double ub; number_of_tasks++; /* base case: full knapsack or no items */ if (c < 0) { *sol = INT_MIN; return; } /* feasible solution, with value v */ if (n == 0 || c == 0) { *sol = v; return; } ub = (double) v + c * e->value / e->weight; if (ub < best_so_far) { /* prune ! */ *sol = INT_MIN; return; } /* * compute the best solution without the current item in the knapsack */ #pragma omp task untied firstprivate(e,c,n,v,l) shared(without) if (l < bots_cutoff_value) knapsack_par(e + 1, c, n - 1, v, &without,l+1); /* compute the best solution with the current item in the knapsack */ #pragma omp task untied firstprivate(e,c,n,v,l) shared(with) if (l < bots_cutoff_value) knapsack_par(e + 1, c - e->weight, n - 1, v + e->value, &with,l+1); #pragma omp taskwait best = with > without ? with : without; /* * notice the race condition here. The program is still * correct, in the sense that the best solution so far * is at least best_so_far. Moreover best_so_far gets updated * when returning, so eventually it should get the right * value. The program is highly non-deterministic. */ if (best > best_so_far) best_so_far = best; *sol = best; } #elif defined (MANUAL_CUTOFF) void knapsack_par(struct item *e, int c, int n, int v, int *sol, int l) { int with, without, best; double ub; number_of_tasks++; /* base case: full knapsack or no items */ if (c < 0) { *sol = INT_MIN; return; } /* feasible solution, with value v */ if (n == 0 || c == 0) { *sol = v; return; } ub = (double) v + c * e->value / e->weight; if (ub < best_so_far) { /* prune ! */ *sol = INT_MIN; return; } if (l < bots_cutoff_value) { /* compute the best solution without the current item in the knapsack */ #pragma omp task untied firstprivate(e,c,n,v,l) shared(without) knapsack_par(e + 1, c, n - 1, v, &without,l+1); /* compute the best solution with the current item in the knapsack */ #pragma omp task untied firstprivate(e,c,n,v,l) shared(with) knapsack_par(e + 1, c - e->weight, n - 1, v + e->value, &with,l+1); #pragma omp taskwait } else { /* compute the best solution without the current item in the knapsack */ knapsack_seq(e + 1, c, n - 1, v, &without); /* compute the best solution with the current item in the knapsack */ knapsack_seq(e + 1, c - e->weight, n - 1, v + e->value, &with); } best = with > without ? with : without; /* * notice the race condition here. The program is still * correct, in the sense that the best solution so far * is at least best_so_far. Moreover best_so_far gets updated * when returning, so eventually it should get the right * value. The program is highly non-deterministic. */ if (best > best_so_far) best_so_far = best; *sol = best; } #else void knapsack_par(struct item *e, int c, int n, int v, int *sol, int l) { int with, without, best; double ub; number_of_tasks++; /* base case: full knapsack or no items */ if (c < 0) { *sol = INT_MIN; return; } /* feasible solution, with value v */ if (n == 0 || c == 0) { *sol = v; return; } ub = (double) v + c * e->value / e->weight; if (ub < best_so_far) { /* prune ! */ *sol = INT_MIN; return; } /* * compute the best solution without the current item in the knapsack */ #pragma omp task untied firstprivate(e,c,n,v,l) shared(without) knapsack_par(e + 1, c, n - 1, v, &without,l+1); /* compute the best solution with the current item in the knapsack */ #pragma omp task untied firstprivate(e,c,n,v,l) shared(with) knapsack_par(e + 1, c - e->weight, n - 1, v + e->value, &with,l+1); #pragma omp taskwait best = with > without ? with : without; /* * notice the race condition here. The program is still * correct, in the sense that the best solution so far * is at least best_so_far. Moreover best_so_far gets updated * when returning, so eventually it should get the right * value. The program is highly non-deterministic. */ if (best > best_so_far) best_so_far = best; *sol = best; } #endif void knapsack_seq(struct item *e, int c, int n, int v, int *sol) { int with, without, best; double ub; number_of_tasks++; /* base case: full knapsack or no items */ if (c < 0) { *sol = INT_MIN; return; } /* feasible solution, with value v */ if (n == 0 || c == 0) { *sol = v; return; } ub = (double) v + c * e->value / e->weight; if (ub < best_so_far) { /* prune ! */ *sol = INT_MIN; return; } /* * compute the best solution without the current item in the knapsack */ knapsack_seq(e + 1, c, n - 1, v, &without); /* compute the best solution with the current item in the knapsack */ knapsack_seq(e + 1, c - e->weight, n - 1, v + e->value, &with); best = with > without ? with : without; /* * notice the race condition here. The program is still * correct, in the sense that the best solution so far * is at least best_so_far. Moreover best_so_far gets updated * when returning, so eventually it should get the right * value. The program is highly non-deterministic. */ if (best > best_so_far) best_so_far = best; *sol = best; } void knapsack_main_par (struct item *e, int c, int n, int v, int *sol) { best_so_far = INT_MIN; #pragma omp parallel { number_of_tasks = 0; #pragma omp single #pragma omp task untied { knapsack_par(e, c, n, 0, sol, 0); } #pragma omp critical bots_number_of_tasks += number_of_tasks; } if (bots_verbose_mode) printf("Best value for parallel execution is %d\n\n", *sol); } void knapsack_main_seq (struct item *e, int c, int n, int v, int *sol) { best_so_far = INT_MIN; number_of_tasks = 0; knapsack_seq(e, c, n, 0, sol); if (bots_verbose_mode) printf("Best value for sequential execution is %d\n\n", *sol); } int knapsack_check (int sol_seq, int sol_par) { if (sol_seq == sol_par) return BOTS_RESULT_SUCCESSFUL; else return BOTS_RESULT_UNSUCCESSFUL; }
2763.c
// this source is derived from CHILL AST originally from file '/uufs/chpc.utah.edu/common/home/u1142914/lib/ytopt_vinu/polybench/polybench-code/stencils/heat-3d/kernel.c' as parsed by frontend compiler rose void kernel_heat_3d(int tsteps, int n, double A[120 + 0][120 + 0][120 + 0], double B[120 + 0][120 + 0][120 + 0]) { int t14; int t12; int t10; int t8; int t6; int t4; int t2; for (t2 = 1; t2 <= 500; t2 += 1) { #pragma omp parallel for private(t4,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 64) for (t10 = t8; t10 <= (n - 2 < t8 + 63 ? n - 2 : t8 + 63); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) B[t6][t10][t14] = 0.125 * (A[t6 + 1][t10][t14] - 2 * A[t6][t10][t14] + A[t6 - 1][t10][t14]) + 0.125 * (A[t6][t10 + 1][t14] - 2 * A[t6][t10][t14] + A[t6][t10 - 1][t14]) + 0.125 * (A[t6][t10][t14 + 1] - 2 * A[t6][t10][t14] + A[t6][t10][t14 - 1]) + A[t6][t10][t14]; #pragma omp parallel for private(t4,t8,t10,t12,t14) for (t4 = 1; t4 <= n - 2; t4 += 8) for (t6 = t4; t6 <= (t4 + 7 < n - 2 ? t4 + 7 : n - 2); t6 += 1) for (t8 = 1; t8 <= n - 2; t8 += 64) for (t10 = t8; t10 <= (n - 2 < t8 + 63 ? n - 2 : t8 + 63); t10 += 1) for (t12 = 1; t12 <= n - 2; t12 += 16) for (t14 = t12; t14 <= (n - 2 < t12 + 15 ? n - 2 : t12 + 15); t14 += 1) A[t6][t10][t14] = 0.125 * (B[t6 + 1][t10][t14] - 2 * B[t6][t10][t14] + B[t6 - 1][t10][t14]) + 0.125 * (B[t6][t10 + 1][t14] - 2 * B[t6][t10][t14] + B[t6][t10 - 1][t14]) + 0.125 * (B[t6][t10][t14 + 1] - 2 * B[t6][t10][t14] + B[t6][t10][t14 - 1]) + B[t6][t10][t14]; } }
Searching.202006191549.nested_parallel.h
// // Created by Zhen Peng on 6/19/2020. // #ifndef BATCH_SEARCHING_SEARCHING_H #define BATCH_SEARCHING_SEARCHING_H #include <vector> #include <boost/dynamic_bitset.hpp> //#include <boost/sort/sort.hpp> #include <iostream> #include <fstream> #include <unordered_map> #include <immintrin.h> #include <cstring> #include <unordered_set> #include <set> #include <cfloat> //#include <omp.h> #include "../include/definitions.h" //#include "../include/efanna2e/neighbor.h" #include "../include/utils.h" #include "../include/Candidate.h" #include "../include/parallelization.h" #include "../include/bitvector.h" namespace PANNS { class Searching { //private: public: idi num_v_ = 0; edgei num_e_ = 0; idi num_queries_ = 0; uint64_t dimension_ = 0; idi width_ = 0; // NSG largest degree idi ep_ = 0; // Start point // std::vector<dataf> data_load_; // std::vector<dataf> queries_load_; // std::vector< std::vector<dataf> > data_load_; // std::vector< std::vector<dataf> > queries_load_; // std::vector<distf> norms_; dataf *data_load_ = nullptr; dataf *queries_load_ = nullptr; // dataf *norms_; // std::vector< std::vector<idi> > nsg_graph_; // idi *nsg_graph_indices_; // idi *nsg_graph_out_edges_; // std::vector< std::vector<idi> > edge_list_; char *opt_nsg_graph_ = nullptr; uint64_t data_bytes_; uint64_t neighbor_bytes_; uint64_t vertex_bytes_; // For multithreads int num_threads_ = 1; // int num_real_threads_ = 1; int num_threads_intra_ = 1; int num_threads_inter_ = 1; dataf compute_norm( const dataf *data) const; // idi vertex_id); // const std::vector<PANNS::dataf> &data); // size_t loc_start, // idi dimension) dataf compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<dataf> &d_data, // const std::vector<dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, const dataf vertex_norm) const; // idi dimension) static idi insert_into_queue( std::vector<Candidate> &c_queue, idi c_queue_top, Candidate cand); static idi add_into_queue( std::vector<PANNS::Candidate> &queue, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand); static idi add_into_queue( std::vector<PANNS::Candidate> &queue, const idi queue_start, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand); static void add_into_queue_at( const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, // The insertion location, independent with queue_start const idi queue_start, idi &queue_top, // The number of elements in queue, independent with queue_start const idi queue_size); // The maximum capacity of queue, independent with queue_start. static void insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue_base, const idi insert_index, const idi queue_start, const idi queue_size); // idi insert_into_queue_nsg( // std::vector< Candidate > &c_queue, // idi c_queue_top, // Candidate cand); static idi merge_two_queues_into_1st_queue_seq_fixed( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); static void merge_two_queues_into_1st_queue_seq_incr( std::vector<Candidate> &queue1, const idi queue1_start, idi &queue1_size, // The number of element in queue1, independent with queue1_start. const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start. std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size); idi merge_all_queues_para_list( std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, std::vector<Candidate> &set_L, const idi L); // idi merge_all_queues_para_array( //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // const idi local_queue_length, // std::vector<Candidate> &set_L, // const idi L); idi merge_all_queues_para_array( std::vector<Candidate> &set_L, // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi local_queue_length, // std::vector<Candidate> &set_L, const idi L); idi merge_all_queues_queue_base( // std::vector< std::vector<Candidate> > &local_queues_list, std::vector<Candidate> &set_L, // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi queue_base, const int real_threads, const idi local_queue_length, // std::vector<Candidate> &set_L, const idi L); idi merge_all_queues_all_together_in_sequential( std::vector<Candidate> &set_L, std::vector<idi> &local_queues_ends, const idi local_queue_length, const idi L); idi min_all_queues_at_heads( const std::vector<Candidate> &set_L, std::vector<idi> &queue_heads, const std::vector<idi> &local_queues_ends, const idi local_queue_length, const idi L); public: // For Profiling // L3CacheMissRate cache_miss_kernel; uint64_t count_distance_computation_ = 0; // uint64_t count_single_query_computation_ = 0; // distf dist_min_ = 0; // distf dist_max_ = 0; double time_merge_ = 0; // double time_initialization_ = 0; // double time_sequential_phase_ = 0; // double time_parallel_phase_ = 0; // double time_insert_ = 0; // double time_compare_minimum_ = 0; // L3CacheMissRate profile_miss_rate; // uint64_t number_local_elements_ = 0; ~Searching() { free(data_load_); data_load_ = nullptr; // free(queries_load_); // _mm_free(data_load_); free(queries_load_); queries_load_ = nullptr; // free(norms_); // free(nsg_graph_indices_); // free(nsg_graph_out_edges_); free(opt_nsg_graph_); opt_nsg_graph_ = nullptr; } void load_data_load(char *filename); void load_queries_load(char *filename); void load_nsg_graph(char *filename); // void build_opt_graph(); void prepare_init_ids( std::vector<unsigned> &init_ids, const unsigned L) const; // void prepare_candidate_queue_list( // const float *query_load, // std::vector<std::vector<efanna2e::Neighbor> > &retset_list, // std::vector<boost::dynamic_bitset<> > &is_visited_list, // const std::vector<unsigned> &init_ids, // const boost::dynamic_bitset<> &flags, // unsigned batch_start, // unsigned batch_size, // unsigned L); // void search_in_batch( //// const float *query_load, // size_t K, // size_t L, // unsigned batch_start, // unsigned batch_size, // std::vector< std::vector<Candidate> > &set_L_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list, // const std::vector<idi> &init_ids, // const boost::dynamic_bitset<> &is_visited, // std::vector<std::vector<idi> > &set_K_list); void search_in_sequential( idi query_id, idi K, idi L, std::vector<Candidate> &set_L, // boost::dynamic_bitset<> &is_visited, // boost::dynamic_bitset<> is_visited, // std::vector<idi> &init_ids, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // void search_in_sequential_BitVector( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // idi get_out_degree(idi v_id) const // { // if (v_id < num_v_ - 1) { // return nsg_graph_indices_[v_id + 1] - nsg_graph_indices_[v_id]; // } else { // return num_e_ - nsg_graph_indices_[v_id]; // } // } void search_with_top_m( idi M, idi query_id, idi K, idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K); // std::vector< std::vector<idi> > &top_m_list); void search_with_top_m_scale_m( const PANNS::idi value_M_max, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited); // void search_with_top_m_myths_M( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void search_with_top_m_to_get_distance_range( // const PANNS::idi M, // const PANNS::idi query_id, //// const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids); // void search_with_top_m_profile_bit_CAS( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void search_with_top_m_no_local_arrays( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // boost::dynamic_bitset<> &is_visited); void search_with_top_m_in_batch( PANNS::idi M, PANNS::idi batch_start, PANNS::idi batch_size, PANNS::idi K, PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list); // void para_search_with_top_m_critical_area( // idi M, // idi query_id, // idi K, // idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_critical_area_no_omp( // idi M, // idi query_id, // idi K, // idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_critical_area_yes_omp( // idi M, // idi query_id, // idi K, // idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_visited_array( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited); // void para_search_with_top_m_merge_queues( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_queues_seq_merge( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K); // void para_search_with_top_m_merge_queues_no_CAS( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<idi> &local_queues_ends, //// std::vector<uint8_t> &is_visited); // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_in_array( // void para_search_with_top_m_merge_queues_new_threshold( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // BitVector &is_visited); // void para_search_with_top_m_merge_queues_by_sort( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &dest_offsets, // const std::vector<idi> &offsets_load_set_L, // Offsets for store into set_L. // BitVector &is_visited); void para_search_with_top_m_merge_queues_better_merge_v0( const idi M, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<Candidate> &top_m_candidates, std::vector<idi> &top_m_candidates, // std::vector<uint8_t> &is_visited); boost::dynamic_bitset<> &is_visited); // BitVector &is_visited); void para_search_with_top_m_merge_queues_better_merge_v2( const idi M, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<Candidate> &top_m_candidates, std::vector<idi> &top_m_candidates, // std::vector<uint8_t> &is_visited) boost::dynamic_bitset<> &is_visited, std::vector<distf> &local_thresholds); // BitVector &is_visited) void para_search_with_top_m_merge_queues_better_merge_v1( const idi M, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, // std::vector<uint8_t> &is_visited); boost::dynamic_bitset<> &is_visited); // BitVector &is_visited); void para_search_with_top_m_merge_queues_better_merge_v0_0( const idi M, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<Candidate> &top_m_candidates, std::vector<idi> &top_m_candidates, // std::vector<uint8_t> &is_visited) boost::dynamic_bitset<> &is_visited); // BitVector &is_visited) void para_search_with_top_m_merge_queues_less_merge( const idi M, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<Candidate> &top_m_candidates, std::vector<idi> &top_m_candidates, // std::vector<uint8_t> &is_visited) boost::dynamic_bitset<> &is_visited, std::vector<distf> &local_thresholds); // BitVector &is_visited) void para_search_with_top_m_merge_queues_no_merge( const idi M, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<Candidate> &top_m_candidates, std::vector<idi> &top_m_candidates, // std::vector<uint8_t> &is_visited) boost::dynamic_bitset<> &is_visited, std::vector<distf> &local_thresholds, const uint64_t computation_threshold); void para_search_with_top_m_merge_queues_scale_m_v0( const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<Candidate> &top_m_candidates, std::vector<idi> &top_m_candidates, // std::vector<uint8_t> &is_visited); boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_merge_queues_middle_m( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<Candidate> &top_m_candidates, std::vector<idi> &top_m_candidates, // std::vector<uint8_t> &is_visited) boost::dynamic_bitset<> &is_visited); // std::vector<distf> &local_thresholds); // BitVector &is_visited) void para_search_with_top_m_merge_queues_scale_m_v2( const idi value_M_min, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<Candidate> &top_m_candidates, std::vector<idi> &top_m_candidates, // std::vector<uint8_t> &is_visited) boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_merge_queues_scale_m_v3( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<Candidate> &top_m_candidates, std::vector<idi> &top_m_candidates, // std::vector<uint8_t> &is_visited) boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_merge_queues_middle_m_no_merge( const uint64_t computation_threshold, const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, const idi init_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_merge_queues_sequential_merge( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited); void para_search_with_top_m_nested_para( const idi batch_start, const idi batch_size, const idi value_M_middle, const idi value_M_max, const idi K, const idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue std::vector< std::vector<idi> > &top_m_candidates_list, std::vector< boost::dynamic_bitset<> > &is_visited_list); // void para_search_with_top_m_merge_queues_global_threshold( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_distance_threshold_m( //// const idi value_M_middle, //// const idi value_M_max, // const distf relative_dist_threshold, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); //// std::vector<distf> &local_thresholds) //// BitVector &is_visited) // void para_search_with_top_m_merge_queues_distance_threshold_m_middle_iteration( //// const idi value_M_middle, //// const idi value_M_max, // const distf relative_dist_threshold, // const idi middle_iteration, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_collectors( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_selecting( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited); // void para_search_with_top_m_merge_queues_myths( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // BitVector &is_visited); //// std::vector<uint8_t> &is_visited); //// boost::dynamic_bitset<> &is_visited); //// void para_prepare_init_ids( //// std::vector<unsigned> &init_ids, //// unsigned L) const; // void para_search_with_top_m_in_batch_embarassing_para( // const PANNS::idi M, // const PANNS::idi batch_start, // const PANNS::idi batch_size, // const PANNS::idi K, // const PANNS::idi L, // std::vector< std::vector<Candidate> > &set_L_list, // const std::vector<idi> &init_ids, // std::vector< std::vector<idi> > &set_K_list, // std::vector< boost::dynamic_bitset<> > &is_visited_list); void test_neighbors_distance_to_father( const idi num_selected) const; void test_neighbors_normalized_distance_to_father( const idi num_selected) const; void load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list); void get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const; }; // Class Searching /** * Input the data from the file. * @param filename */ inline void Searching::load_data_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, data_load_, num_v_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: data dimension " << dimension_ << " is not equal to query dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input queries from the file. * @param filename */ inline void Searching::load_queries_load(char *filename) { auto old_d = dimension_; DiskIO::load_data( filename, queries_load_, num_queries_, dimension_); if (old_d) { if (old_d != dimension_) { std::cerr << "Error: query dimension " << dimension_ << " is not equal to data dimension " << old_d << "." << std::endl; exit(EXIT_FAILURE); } } } /** * Input the NSG graph from the file. * Reference: https://github.com/ZJULearning/nsg/blob/master/src/index_nsg.cpp * @param filename */ inline void Searching::load_nsg_graph(char *filename) { std::ifstream fin(filename); if (!fin.is_open()) { std::cerr << "Error: cannot read file " << filename << " ." << std::endl; exit(EXIT_FAILURE); } fin.read(reinterpret_cast<char *>(&width_), sizeof(unsigned)); fin.read(reinterpret_cast<char *>(&ep_), sizeof(unsigned)); data_bytes_ = (1 + dimension_) * sizeof(dataf); neighbor_bytes_ = (1 + width_) * sizeof(idi); vertex_bytes_ = data_bytes_ + neighbor_bytes_; opt_nsg_graph_ = (char *) malloc(num_v_ * vertex_bytes_); if (!opt_nsg_graph_) { std::cerr << "Error: no enough memory for opt_nsg_graph_." << std::endl; exit(EXIT_FAILURE); } idi v_id = 0; num_e_ = 0; char *base_location = opt_nsg_graph_; while (true) { idi degree; fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); if (fin.eof()) { break; } num_e_ += degree; // std::vector<idi> tmp_ngbrs(degree); // fin.read(reinterpret_cast<char *>(tmp_ngbrs.data()), degree * sizeof(unsigned)); // Norm and data distf norm = compute_norm(data_load_ + v_id * dimension_); // distf norm = compute_norm(v_id); std::memcpy(base_location, &norm, sizeof(distf)); // Norm memcpy(base_location + sizeof(distf), data_load_ + v_id * dimension_, dimension_ * sizeof(dataf)); // Data base_location += data_bytes_; // Neighbors memcpy(base_location, &degree, sizeof(idi)); // Number of neighbors fin.read(base_location + sizeof(idi), degree * sizeof(unsigned)); // Neighbors // memcpy(location + sizeof(idi), tmp_ngbrs.data(), degree * sizeof(unsigned)); base_location += neighbor_bytes_; ++v_id; } if (v_id != num_v_) { std::cerr << "Error: NSG data has " << v_id << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; exit(EXIT_FAILURE); } free(data_load_); data_load_ = nullptr; // //////////////////////// // idi v_id = 0; // num_e_ = 0; // while (true) { // idi degree; // fin.read(reinterpret_cast<char *>(&degree), sizeof(unsigned)); // if (fin.eof()) { // break; // } // num_e_ += degree; // // std::vector<idi> ngbrs(degree); // fin.read(reinterpret_cast<char *>(ngbrs.data()), degree * sizeof(unsigned)); //// nsg_graph_.push_back(ngbrs); //// tmp_edge_list.push_back(ngbrs); // edge_list_.push_back(ngbrs); // ++v_id; // } // if (v_id != num_v_) { // std::cerr << "Error: NSG data has " << v_id // << " vertices, but origin data has " << num_v_ << " vertices." << std::endl; // exit(EXIT_FAILURE); // } } /** * Load those true top-K neighbors (ground truth) of queries * @param filename * @param[out] true_nn_list */ inline void Searching::load_true_NN( const char *filename, std::vector< std::vector<idi> > &true_nn_list) // unsigned &t_K) { std::ifstream fin(filename); if (!fin.is_open()) { fprintf(stderr, "Error: cannot open file %s\n", filename); exit(EXIT_FAILURE); } idi t_query_num; idi t_K; // unsigned t_K; fin.read(reinterpret_cast<char *>(&t_query_num), sizeof(t_query_num)); fin.read(reinterpret_cast<char *>(&t_K), sizeof(t_K)); // if (t_query_num != query_num) { // fprintf(stderr, "Error: query_num %u is not equal to the record %u in true-NN file %s\n", // query_num, t_query_num, filename); // exit(EXIT_FAILURE); // } if (t_query_num < num_queries_) { fprintf(stderr, "Error: t_query_num %u is smaller than num_queries_ %u\n", t_query_num, num_queries_); exit(EXIT_FAILURE); } if (t_K < 100) { fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); exit(EXIT_FAILURE); } // data = new unsigned[(size_t) t_query_num * (size_t) t_K]; true_nn_list.resize(t_query_num); for (idi q_i = 0; q_i < t_query_num; ++q_i) { true_nn_list[q_i].resize(t_K); } for (unsigned q_i = 0; q_i < t_query_num; ++q_i) { // size_t offset = q_i * t_K; for (unsigned n_i = 0; n_i < t_K; ++n_i) { unsigned id; float dist; fin.read(reinterpret_cast<char *>(&id), sizeof(id)); fin.read(reinterpret_cast<char *>(&dist), sizeof(dist)); // data[offset + n_i] = id; true_nn_list[q_i][n_i] = id; } } fin.close(); } inline void Searching::get_recall_for_all_queries( const std::vector< std::vector<idi> > &true_nn_list, const std::vector<std::vector<unsigned>> &set_K_list, std::unordered_map<unsigned, double> &recalls) const { // if (t_K < 100) { // fprintf(stderr, "Error: t_K %u is smaller than 100.\n", t_K); // exit(EXIT_FAILURE); // } if (true_nn_list[0].size() < 100) { fprintf(stderr, "Error: Number of true nearest neighbors of a query is smaller than 100.\n"); exit(EXIT_FAILURE); } recalls[1] = 0.0; recalls[5] = 0.0; recalls[10] = 0.0; recalls[20] = 0.0; recalls[50] = 0.0; recalls[100] = 0.0; for (unsigned q_i = 0; q_i < num_queries_; ++q_i) { // size_t offset = q_i * t_K; for (unsigned top_i = 0; top_i < 100; ++top_i) { unsigned true_id = true_nn_list[q_i][top_i]; for (unsigned n_i = 0; n_i < 100; ++n_i) { if (set_K_list[q_i][n_i] == true_id) { if (n_i < 1) recalls[1] += 1; if (n_i < 5) recalls[5] += 1; if (n_i < 10) recalls[10] += 1; if (n_i < 20) recalls[20] += 1; if (n_i < 50) recalls[50] += 1; if (n_i < 100) recalls[100] += 1; } } } } recalls[1] /= 1.0 * num_queries_; recalls[5] /= 5.0 * num_queries_; recalls[10] /= 10.0 * num_queries_; recalls[20] /= 20.0 * num_queries_; recalls[50] /= 50.0 * num_queries_; recalls[100] /= 100.0 * num_queries_; } inline void Searching::search_in_sequential( const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { // {//test // printf("Iteration: Relative_Distance:\n"); //// printf("Iteration: Relative_Distance:\n"); //// printf("----query: %u----\n", query_id); // } boost::dynamic_bitset<> is_visited(num_v_); for (idi v_i = 0; v_i < L; ++v_i) { is_visited[init_ids[v_i]] = true; } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); idi k = 0; // Index of every queue's first unchecked candidate. idi tmp_count = 0; // for debug // {// Print relative distance //// distf top_dist = set_L[0].distance_; // for (idi i_l = 0; i_l < L; ++i_l) { // printf("%u %f\n", // tmp_count, set_L[i_l].distance_); //// tmp_count, set_L[i_l].distance_ - top_dist); // } // } while (k < L) { Candidate &top_cand = set_L[k]; unsigned nk = L; if (!top_cand.is_checked_) { ++tmp_count; top_cand.is_checked_ = true; idi v_id = top_cand.id_; // Vertex ID. _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } // Traverse v_id's all neighbors, pushing them into the queue for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // Compute the distance ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate cand(nb_id, dist, false); // Insert into the queue idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } // {// Print relative distance //// distf top_dist = set_L[0].distance_; // for (idi i_l = 0; i_l < L; ++i_l) { // printf("%u %f\n", // tmp_count, set_L[i_l].distance_); //// tmp_count, set_L[i_l].distance_ - top_dist); // } // } } if (nk <= k) { k = nk; } else { ++k; } } // cache_miss_kernel.measure_stop(); for (size_t k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } // {//test // if (0 == query_id) { // exit(1); // } // } } //inline void Searching::search_in_sequential_BitVector( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// boost::dynamic_bitset<> is_visited(num_v_); // BitVector is_visited(num_v_); // //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { //// is_visited[init_ids[v_i]] = true; // is_visited.atomic_set_bit(init_ids[v_i]); // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { // Candidate &top_cand = set_L[k]; // unsigned nk = L; // if (!top_cand.is_checked_) { // top_cand.is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = true; // // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } //// cache_miss_kernel.measure_stop(); //#pragma omp parallel for // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} /** * Prepare init_ids and flags, as they are constant for all queries. * @param[out] init_ids * @param L */ inline void Searching::prepare_init_ids( std::vector<unsigned int> &init_ids, const unsigned L) const { // idi num_ngbrs = get_out_degree(ep_); // edgei edge_start = nsg_graph_indices_[ep_]; // // Store ep_'s neighbors as candidates // idi tmp_l = 0; // for (; tmp_l < L && tmp_l < num_ngbrs; tmp_l++) { // init_ids[tmp_l] = nsg_graph_out_edges_[edge_start + tmp_l]; // } // std::unordered_set<idi> visited_ids; boost::dynamic_bitset<> is_selected(num_v_); idi *out_edges = (idi *) (opt_nsg_graph_ + ep_ * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; idi init_ids_end = 0; // for (; tmp_l < L && tmp_l < out_degree; tmp_l++) { for (idi e_i = 0; e_i < out_degree && init_ids_end < L; ++e_i) { // idi v_id = out_edges[tmp_l]; idi v_id = out_edges[e_i]; if(is_selected[v_id]) { continue; } is_selected[v_id] = true; // init_ids[tmp_l] = v_id; init_ids[init_ids_end++] = v_id; // init_ids[tmp_l] = out_edges[tmp_l]; // visited_ids.insert(init_ids[tmp_l]); } // for (idi i = 0; i < tmp_l; ++i) { // is_visited[init_ids[i]] = true; // } // If ep_'s neighbors are not enough, add other random vertices idi tmp_id = ep_ + 1; // use tmp_id to replace rand(). while (init_ids_end < L) { tmp_id %= num_v_; idi v_id = tmp_id++; if (is_selected[v_id]) { continue; } // if (visited_ids.find(id) != visited_ids.end()) { // continue; // } is_selected[v_id] = true; // visited_ids.insert(id); init_ids[init_ids_end++] = v_id; // tmp_l++; } } // TODO: re-code in AVX-512 inline dataf Searching::compute_norm( const dataf *data) const // idi vertex_id) // const std::vector<PANNS::dataf> &data) // size_t loc_start, // idi dimension) { // const dataf *a = data.data() + loc_start; // const dataf *a = data_load_ + vertex_id * dimension_; // idi size = dimension_; dataf result = 0; //#define AVX_L2NORM(addr, dest, tmp) \ // tmp = _mm256_load_ps(addr); \ // tmp = _mm256_mul_ps(tmp, tmp); \ // dest = _mm256_add_ps(dest, tmp); #define AVX_L2NORM(addr, dest, tmp) \ tmp = _mm256_loadu_ps(addr); \ tmp = _mm256_mul_ps(tmp, tmp); \ dest = _mm256_add_ps(dest, tmp); __m256 sum; __m256 l0, l1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = data; const float *e_l = l + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_L2NORM(e_l, sum, l0); } for (unsigned i = 0; i < DD; i += 16, l += 16) { AVX_L2NORM(l, sum, l0); AVX_L2NORM(l + 8, sum, l1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; return result; } inline dataf Searching::compute_distance_with_norm( const dataf *v_data, const dataf *q_data, // idi vertex_id, // idi query_id, // const std::vector<PANNS::dataf> &d_data, // const std::vector<PANNS::dataf> &q_data, // PANNS::idi d_start, // PANNS::idi q_start, const dataf vertex_norm) const // idi dimension) { // idi size = dimension_; float result = 0; //#define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ // tmp1 = _mm256_load_ps(addr1);\ // tmp2 = _mm256_load_ps(addr2);\ // tmp1 = _mm256_mul_ps(tmp1, tmp2); \ // dest = _mm256_add_ps(dest, tmp1); #define AVX_DOT(addr1, addr2, dest, tmp1, tmp2) \ tmp1 = _mm256_loadu_ps(addr1);\ tmp2 = _mm256_loadu_ps(addr2);\ tmp1 = _mm256_mul_ps(tmp1, tmp2); \ dest = _mm256_add_ps(dest, tmp1); __m256 sum; __m256 l0, l1; __m256 r0, r1; unsigned D = (dimension_ + 7) & ~7U; unsigned DR = D % 16; unsigned DD = D - DR; const float *l = v_data; const float *r = q_data; // const float *l = (float *) (opt_nsg_graph_ + vertex_id * vertex_bytes_ + sizeof(distf)); // const float *r = queries_load_ + query_id * dimension_; const float *e_l = l + DD; const float *e_r = r + DD; float unpack[8] __attribute__ ((aligned (32))) = {0, 0, 0, 0, 0, 0, 0, 0}; sum = _mm256_load_ps(unpack); // sum = _mm256_loadu_ps(unpack); if (DR) { AVX_DOT(e_l, e_r, sum, l0, r0); } for (unsigned i = 0; i < DD; i += 16, l += 16, r += 16) { AVX_DOT(l, r, sum, l0, r0); AVX_DOT(l + 8, r + 8, sum, l1, r1); } _mm256_store_ps(unpack, sum); // _mm256_storeu_ps(unpack, sum); result = unpack[0] + unpack[1] + unpack[2] + unpack[3] + unpack[4] + unpack[5] + unpack[6] + unpack[7]; result = -2 * result + vertex_norm; return result; } //// DEPRECATED. // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. //inline idi Searching::add_into_queue( // std::vector<PANNS::Candidate> &queue, // idi &queue_top, // const idi queue_size, // const PANNS::Candidate &cand) //{ // assert(queue_size > 1); // if (0 == queue_top) { // queue[queue_top++] = cand; // return 0; // } else if (1 == queue_top) { // if (queue[0] < cand) { // queue[queue_top++] = cand; // return 1; // } else { // queue[++queue_top] = queue[0]; // queue[0] = cand; // return 0; // } // } // // if (queue[queue_top - 1] < cand) { // if (queue_top < queue_size) { // queue[queue_top++] = cand; // } // return queue_top; // } // // idi r = insert_into_queue( // queue, // queue_top - 1, // cand); //// {//test //// printf("r: %u" //// "queue_top: %u " //// "queue_size: %u\n", //// r, //// queue_top, //// queue_size); //// } // return r; // //// ///////////////////////////////////////////////////////////// //// // Find the insert location //// auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand); //// idi insert_loc = it_loc - queue.begin(); //// if (insert_loc == queue_size) { //// return queue_size; //// } //// //// // Insert ////// if (queue_top == queue_size) { ////// // If full already ////// --queue_top; ////// } //// memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), //// reinterpret_cast<char *>(queue.data() + insert_loc), //// (queue_top - insert_loc) * sizeof(Candidate)); ////// for (idi q_i = queue_top; q_i > insert_loc; --q_i) { ////// queue.at(q_i) = queue.at(q_i - 1); ////// } //// queue[insert_loc] = cand; //// ++queue_top; //// return insert_loc; //} // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. inline idi Searching::add_into_queue( std::vector<PANNS::Candidate> &queue, idi &queue_top, const idi queue_size, const PANNS::Candidate &cand) { if (0 == queue_top) { queue[queue_top++] = cand; return 0; } // Find the insert location auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand); idi insert_loc = it_loc - queue.begin(); if (insert_loc == queue_size) { return queue_size; } // Insert if (queue_top == queue_size) { // If full already --queue_top; } memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), reinterpret_cast<char *>(queue.data() + insert_loc), (queue_top - insert_loc) * sizeof(Candidate)); // for (idi q_i = queue_top; q_i > insert_loc; --q_i) { // queue.at(q_i) = queue.at(q_i - 1); // } queue[insert_loc] = cand; ++queue_top; return insert_loc; } // The difference from insert_into_queue is that add_into_queue will increase the queue size by 1. // add_into_queue with a queue_start inline idi Searching::add_into_queue( std::vector<PANNS::Candidate> &queue, const idi queue_start, idi &queue_top, // The insertion location starting from queue_start const idi queue_size, // The maximum capacity of queue, independent with queue_start. const PANNS::Candidate &cand) { if (0 == queue_top) { queue[queue_start + queue_top++] = cand; return 0; } idi queue_end = queue_start + queue_top; // Find the insert location auto it_loc = std::lower_bound(queue.begin() + queue_start, queue.begin() + queue_end, cand); // auto it_loc = std::lower_bound(queue.begin(), queue.begin() + queue_top, cand); idi insert_loc = it_loc - queue.begin(); if (queue_top < queue_size) { // Queue is not full if (insert_loc == queue_end) { // Insert at the end queue[insert_loc] = cand; ++queue_top; return queue_top - 1; } } else { // Queue is full if (insert_loc == queue_end) { return queue_size; } --queue_top; --queue_end; } if (cand.id_ == it_loc->id_) { // Duplicate return queue_size; } // Add into queue memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), reinterpret_cast<char *>(queue.data() + insert_loc), (queue_end - insert_loc) * sizeof(Candidate)); queue[insert_loc] = cand; ++queue_top; return insert_loc - queue_start; // //////////////// // if (insert_loc == queue_size + queue_start) { // return queue_size; // } // // if (cand.id_ == it_loc->id_) { // // Duplicate // return queue_size; // } // // // Insert // if (queue_top == queue_size) { // // If full already // --queue_top; // --queue_end; // } // memmove(reinterpret_cast<char *>(queue.data() + insert_loc + 1), // reinterpret_cast<char *>(queue.data() + insert_loc), // (queue_end - insert_loc) * sizeof(Candidate)); // queue[insert_loc] = cand; // ++queue_top; // return insert_loc - queue_start; } inline void Searching::add_into_queue_at( const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, // The insertion location, independent with queue_start const idi queue_start, idi &queue_size, // The number of elements in queue, independent with queue_start const idi queue_length) // The maximum capacity of queue, independent with queue_start. { const idi dest_index = queue_start + insert_index; if (queue_size == queue_length) { --queue_size; } memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1), reinterpret_cast<char *>(queue.data() + dest_index), (queue_size - insert_index) * sizeof(Candidate)); queue[dest_index] = cand; ++queue_size; } inline void Searching::insert_one_element_at( // const T &cand, // T *queue_base, const Candidate &cand, std::vector<Candidate> &queue, const idi insert_index, const idi queue_start, const idi queue_size) { const idi dest_index = queue_start + insert_index; memmove(reinterpret_cast<char *>(queue.data() + dest_index + 1), reinterpret_cast<char *>(queue.data() + dest_index), (queue_size - insert_index - 1) * sizeof(Candidate)); queue[dest_index] = cand; // memmove(reinterpret_cast<char *>(queue_base + dest_index + 1), // reinterpret_cast<char *>(queue_base + dest_index), // (queue_size - insert_index - 1) * sizeof(T)); // for (idi q_i = queue_size - 1; q_i > insert_index; --q_i) { // queue_base.at(q_i + queue_start) = queue_base.at(q_i - 1 + queue_start); // } // queue_base[dest_index] = cand; } /** * PANNS version of InsertIntoPool(): binary-search to find the insert place and then move. * @param[out] c_queue * @param c_queue_top * @param cand * @return */ inline idi Searching::insert_into_queue( std::vector<PANNS::Candidate> &c_queue, PANNS::idi c_queue_top, PANNS::Candidate cand) { if (c_queue[0].distance_ > cand.distance_) { // If the first memmove(reinterpret_cast<char *>(c_queue.data() + 1), reinterpret_cast<char *>(c_queue.data()), c_queue_top * sizeof(Candidate)); c_queue[0] = cand; return 0; } else if (c_queue[c_queue_top - 1].distance_ == cand.distance_) { // If the last if (c_queue[c_queue_top - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering c_queue[c_queue_top - 1] = cand; return c_queue_top - 1; } else { return c_queue_top; } } idi left = 0; idi right = c_queue_top; while (left < right) { idi mid = (right - left) / 2 + left; if (c_queue[mid].distance_ > cand.distance_) { right = mid; } else { left = mid + 1; } } // If the distance is the same if (0 != left && c_queue[left - 1].distance_ != cand.distance_) { ; } else { while (0 != left && c_queue[left - 1].distance_ == cand.distance_ && c_queue[left - 1].id_ > cand.id_) { // Use ID as the second metrics for ordering --left; } } // Insert to left memmove(reinterpret_cast<char *>(c_queue.data() + left + 1), reinterpret_cast<char *>(c_queue.data() + left), (c_queue_top - left) * sizeof(Candidate)); c_queue[left] = cand; return left; } //inline void Searching::cand_pushes_ngbrs_into_queue( // idi cand_id, // const dataf *query_data, // idi L, // idi &new_k, // boost::dynamic_bitset<> &is_visited, // std::vector<Candidate> &set_L) //{ // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist >= set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } //} //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { // Candidate &top_cand = set_L[k]; // unsigned nk = L; // if (!top_cand.is_checked_) { // top_cand.is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // Deprecated: cannot use std::set, because its element is constant. //inline void Searching::search_in_sequential( // const idi query_id, // const idi K, // const idi L, //// std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) const //{ // std::set<Candidate> set_L; // boost::dynamic_bitset<> is_visited(num_v_); // // for (idi v_i = 0; v_i < L; ++v_i) { // is_visited[init_ids[v_i]] = true; // } // const dataf *query_data = queries_load_ + query_id * dimension_; // // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // distf dist = compute_distance_with_norm(v_data, query_data, norm); //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // set_L.emplace(v_id, dist, false); // } //// std::sort(set_L.begin(), set_L.begin() + L); // idi k = 0; // Index of every queue's first unchecked candidate. // while (k < L) { //// Candidate &top_cand = set_L[k]; // std::set<Candidate>::iterator top_cand = std::next(set_L.begin(), k); // unsigned nk = L; // if (!top_cand->is_checked_) { // top_cand->is_checked_ = true; // idi v_id = top_cand.id_; // Vertex ID. // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + v_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // Traverse v_id's all neighbors, pushing them into the queue // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // // Compute the distance // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } // Candidate cand(nb_id, dist, false); // // Insert into the queue // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // if (nk <= k) { // k = nk; // } else { // ++k; // } // } // // for (size_t k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} /* Function: * queue1_size is fixed. */ inline idi Searching::merge_two_queues_into_1st_queue_seq_fixed( std::vector<Candidate> &queue1, const idi queue1_start, const idi queue1_size, std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); // Record the lowest insert location. auto it_loc = std::lower_bound( queue1.begin() + queue1_start, queue1.begin() + queue1_start + queue1_size, queue2[queue2_start]); idi insert_index = it_loc - (queue1.begin() + queue1_start); if (insert_index == queue1_size) { return insert_index; } else if (insert_index == queue1_size - 1) { queue1[queue1_start + insert_index] = queue2[queue2_start]; return insert_index; } // Insert the 1st of queue2 if (queue2[queue2_start].id_ != it_loc->id_) { // Not Duplicate insert_one_element_at( queue2[queue2_start], queue1, insert_index, queue1_start, queue1_size); } if (queue2_size == 1) { return insert_index; } // Insert idi q_i_1 = insert_index + 1 + queue1_start; idi q_i_2 = queue2_start + 1; const idi q_i_1_bound = queue1_start + queue1_size; const idi q_i_2_bound = queue2_start + queue2_size; // const idi insert_i_bound = queue1_start + limit_size; for (idi insert_i = insert_index + 1; insert_i < queue1_size; ++insert_i) { if (q_i_1 >= q_i_1_bound || q_i_2 >= q_i_2_bound) { // queue1 or queue2 finished traverse. Rest o break; } else if (queue1[q_i_1] < queue2[q_i_2]) { ++q_i_1; } else if (queue2[q_i_2] < queue1[q_i_1]) { // Insert queue2[q_i_2] into queue1 insert_one_element_at( queue2[q_i_2++], queue1, insert_i, queue1_start, queue1_size); ++q_i_1; } else { // Duplicate ++q_i_2; ++q_i_1; } } return insert_index; } /* Function: * queue1_size should be updated. * queue1_length should be provided. */ inline void Searching::merge_two_queues_into_1st_queue_seq_incr( std::vector<Candidate> &queue1, const idi queue1_start, idi &queue1_size, // The number of element in queue1, independent with queue1_start. const idi queue1_length, // The maximum capacity of queue1, independent with queue1_start. std::vector<Candidate> &queue2, const idi queue2_start, const idi queue2_size) // const idi limit_size) { assert(queue1_size && queue2_size); // Record the lowest insert location. auto it_loc = std::lower_bound( queue1.begin() + queue1_start, queue1.begin() + queue1_start + queue1_size, queue2[queue2_start]); idi insert_index = it_loc - (queue1.begin() + queue1_start); if (insert_index == queue1_size) { idi copy_count = (queue1_size + queue2_size > queue1_length) ? queue1_length - queue1_size : queue2_size; memmove(queue1.data() + queue1_start + queue1_size, queue2.data() + queue2_start, copy_count * sizeof(Candidate)); queue1_size += copy_count; return; } if (queue2[queue2_start].id_ != it_loc->id_) { // Not Duplicate add_into_queue_at( queue2[queue2_start], queue1, insert_index, queue1_start, queue1_size, queue1_length); } if (queue2_size == 1) { return; } // Insert idi q_i_1 = insert_index + 1 + queue1_start; idi q_i_2 = queue2_start + 1; idi q_i_1_bound = queue1_start + queue1_size; // When queue1_size is updated, so should be q_i_1_bound. const idi q_i_2_bound = queue2_start + queue2_size; // idi insert_i; for (idi insert_i = insert_index + 1; insert_i < queue1_length; ++insert_i) { if (q_i_1 >= q_i_1_bound) { queue1_size += std::min(queue1_length - insert_i, q_i_2_bound - q_i_2); for ( ; insert_i < queue1_size; ++insert_i) { queue1[queue1_start + insert_i] = queue2[q_i_2++]; } break; } else if (q_i_2 >= q_i_2_bound) { break; } else if (queue1[q_i_1] < queue2[q_i_2]) { ++q_i_1; } else if (queue2[q_i_2] < queue1[q_i_1]) { add_into_queue_at( queue2[q_i_2++], queue1, insert_i, queue1_start, queue1_size, queue1_length); ++q_i_1; q_i_1_bound = queue1_start + queue1_size; } else { // Duplicate ++q_i_2; ++q_i_1; } } } inline idi Searching::merge_all_queues_para_list( std::vector< std::vector<Candidate> > &local_queues_list, std::vector<idi> &local_queues_ends, std::vector<Candidate> &set_L, const idi L) { int size = 1 << (static_cast<idi>(log2(num_threads_))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); #pragma omp parallel for for (int i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi bi = i + (1 << d) - 1; // i + 2^d - 1 if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { local_queues_list[ai].swap(local_queues_list[bi]); std::swap(local_queues_ends[ai], local_queues_ends[bi]); continue; } // else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) { // local_queues_list[ai].swap(local_queues_list[bi]); // std::swap(local_queues_ends[ai], local_queues_ends[bi]); // } // merge_two_queues_into_1st_queue_seq( // local_queues_list[ai], // 0, // local_queues_ends[ai], // local_queues_list[bi], // 0, // local_queues_ends[bi]); idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; std::vector<Candidate> tmp_queue(tmp_length); std::merge( local_queues_list[ai].begin(), local_queues_list[ai].begin() + local_queues_ends[ai], local_queues_list[bi].begin(), local_queues_list[bi].begin() + local_queues_ends[bi], tmp_queue.begin()); if (tmp_length > L) { tmp_queue.resize(L); tmp_length = L; } else if (tmp_length < L) { tmp_queue.resize(L); } local_queues_list[ai].swap(tmp_queue); local_queues_ends[ai] = tmp_length; // {// Print queue a // printf("d: %u " // "i: %u " // "ai: %u " // "local_queues_ends[%d]: %d\n", // d, // i, // ai, // ai, // local_queues_ends[ai]); // for (idi i_q = 0; i_q < local_queues_ends[ai]; ++i_q) { // printf("[%u]: " // "id: %u " // "dist: %f\n", // i_q, // local_queues_list[ai][i_q].id_, // local_queues_list[ai][i_q].distance_); // } // } } } // Remain, prefix-sum-like merge if (size != num_threads_) { for (int i = size; i < num_threads_; ++i) { idi ai = i; idi bi = i - 1; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { local_queues_list[ai].swap(local_queues_list[bi]); std::swap(local_queues_ends[ai], local_queues_ends[bi]); continue; } // else if (local_queues_ends[ai] < L && local_queues_ends[bi] >= L) { // local_queues_list[ai].swap(local_queues_list[bi]); // std::swap(local_queues_ends[ai], local_queues_ends[bi]); // } // merge_two_queues_into_1st_queue_seq( // local_queues_list[ai], // 0, // local_queues_ends[ai], // local_queues_list[bi], // 0, // local_queues_ends[bi]); idi tmp_length = local_queues_ends[ai] + local_queues_ends[bi]; std::vector<Candidate> tmp_queue(tmp_length); std::merge( local_queues_list[ai].begin(), local_queues_list[ai].begin() + local_queues_ends[ai], local_queues_list[bi].begin(), local_queues_list[bi].begin() + local_queues_ends[bi], tmp_queue.begin()); if (tmp_length > L) { tmp_queue.resize(L); tmp_length = L; } else if (tmp_length < L) { tmp_queue.resize(L); } local_queues_list[ai].swap(tmp_queue); local_queues_ends[ai] = tmp_length; } } // Merge into set_L idi r = L; if (local_queues_ends[num_threads_ - 1]) { r = merge_two_queues_into_1st_queue_seq_fixed( set_L, 0, L, local_queues_list[num_threads_ - 1], 0, local_queues_ends[num_threads_ - 1]); } // Reset local_queues_ends std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return r; } /* Function: * Use large local_queues_array as a concatenation of all queues */ inline idi Searching::merge_all_queues_para_array( std::vector<Candidate> &set_L, std::vector<idi> &local_queues_ends, const idi local_queue_length, const idi L) { const idi num_queues = num_threads_intra_; idi nk = L; int size = 1 << (static_cast<idi>(log2(num_queues))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { uint32_t by = 1 << (d + 1); #pragma omp parallel for num_threads(num_threads_intra_) for (int i = 0; i < size; i += by) { idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi a_start = ai * local_queue_length; idi bi = i + (1 << d) - 1; // i + 2^d - 1 idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_queues - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Remain, prefix-sum-like merge if (size != num_queues) { for (int i = size; i < num_queues; ++i) { idi ai = i; idi a_start = ai * local_queue_length; idi bi = i - 1; idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_queues - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Reset local_queues_ends // Not do this for Collector Idea or Selecting Idea std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return nk; // return r; } /* Function: * When merge all queues (in an array, and [num_threads_ - 1] is the global queue), * the starting local is at [queue_base] */ inline idi Searching::merge_all_queues_queue_base( // std::vector< std::vector<Candidate> > &local_queues_list, std::vector<Candidate> &set_L, // std::vector<Candidate> &local_queues_array, std::vector<idi> &local_queues_ends, const idi queue_base, const int real_threads, const idi local_queue_length, // std::vector<Candidate> &set_L, const idi L) { idi nk = L; int size = 1 << (static_cast<idi>(log2(real_threads))); // int size = 1 << (static_cast<idi>(log2(num_threads_))); idi log2size = static_cast<idi>(log2(size)); for (idi d = 0; d < log2size; ++d) { idi by = 1 << (d + 1); idi i_bound = size + queue_base; #pragma omp parallel for num_threads(real_threads) for (idi i = queue_base; i < i_bound; i += by) { // for (int i = 0; i < size; i += by) { // idi ai = i + (1 << (d + 1)) - 1 + queue_base; // i + 2^(d+1) - 1 idi ai = i + (1 << (d + 1)) - 1; // i + 2^(d+1) - 1 idi a_start = ai * local_queue_length; // idi bi = i + (1 << d) - 1 + queue_base; // i + 2^d - 1 idi bi = i + (1 << d) - 1; // i + 2^d - 1 idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { // local_queues_list[ai].swap(local_queues_list[bi]); std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_threads_ - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Remain, prefix-sum-like merge if (size != real_threads) { // if (size != num_threads_) { for (int i = size + queue_base; i < num_threads_; ++i) { // for (int i = size; i < num_threads_; ++i) { idi ai = i; idi a_start = ai * local_queue_length; idi bi = i - 1; idi b_start = bi * local_queue_length; if (0 == local_queues_ends[bi]) { continue; } if (local_queues_ends[ai] == 0) { std::copy(set_L.begin() + b_start, set_L.begin() + b_start + local_queues_ends[bi], set_L.begin() + a_start); // Copy bi to ai local_queues_ends[ai] = local_queues_ends[bi]; local_queues_ends[bi] = 0; continue; } if (ai != static_cast<idi>(num_threads_ - 1)) { merge_two_queues_into_1st_queue_seq_incr( set_L, a_start, local_queues_ends[ai], local_queue_length, set_L, b_start, local_queues_ends[bi]); } else { idi r = merge_two_queues_into_1st_queue_seq_fixed( set_L, a_start, L, set_L, b_start, local_queues_ends[bi]); if (r < nk) { nk = r; } } } } // Reset local_queues_ends std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); return nk; // return r; } /* Function: * Merge all queues to the global queue, in a two-queue-merge way */ inline idi Searching::merge_all_queues_all_together_in_sequential( std::vector<Candidate> &set_L, std::vector<idi> &local_queues_ends, const idi local_queue_length, const idi L) { const idi num_queues = num_threads_; const idi global_queue_base = (num_queues - 1) * local_queue_length; std::vector<idi> queue_heads(num_queues, 0); idi queue_id_min; // bool is_finished = false; bool is_1st_selected = true; idi nk = L; // The highest location of insertion. { for (idi q_i = 0; q_i < num_queues; ++q_i) { if (0 == local_queues_ends[q_i]) { continue; } _mm_prefetch(set_L.data() + q_i * local_queue_length, _MM_HINT_T0); } } while (queue_heads[num_queues - 1] < L) { // time_compare_minimum_ -= WallTimer::get_time_mark(); queue_id_min = min_all_queues_at_heads( set_L, queue_heads, local_queues_ends, local_queue_length, L); // time_compare_minimum_ += WallTimer::get_time_mark(); if (queue_id_min != num_queues - 1) { // Not in the global queue // time_insert_ -= WallTimer::get_time_mark(); insert_one_element_at( set_L[queue_heads[queue_id_min] + queue_id_min * local_queue_length], set_L, queue_heads[num_queues - 1], global_queue_base, L); // time_insert_ += WallTimer::get_time_mark(); if (is_1st_selected) { // Get the highest inserting location is_1st_selected = false; nk = queue_heads[num_queues - 1]; } ++queue_heads[queue_id_min]; } ++queue_heads[num_queues - 1]; } // Reset local_queues_ends std::fill(local_queues_ends.begin(), local_queues_ends.end() - 1, 0); return nk; } /* Function: * Find the minimum among queues at their head locations */ inline idi Searching::min_all_queues_at_heads( const std::vector<Candidate> &set_L, std::vector<idi> &queue_heads, const std::vector<idi> &local_queues_ends, const idi local_queue_length, const idi L) { const idi num_queues = num_threads_; idi min_queue_id = num_queues - 1; Candidate min_candidate = set_L[queue_heads[min_queue_id] + min_queue_id * local_queue_length]; for (idi q_i = 0; q_i < num_queues - 1; ++q_i) { if (queue_heads[q_i] >= local_queues_ends[q_i]) { // q_i finished continue; } const Candidate &ele = set_L[queue_heads[q_i] + q_i * local_queue_length]; if (ele < min_candidate) { min_candidate = ele; min_queue_id = q_i; } else if (ele.id_ == min_candidate.id_) { // Redundant element ++queue_heads[q_i]; } } return min_queue_id; } inline void Searching::search_with_top_m( const PANNS::idi M, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K) { boost::dynamic_bitset<> is_visited(num_v_); { for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug while (k < L) { ++tmp_count; unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } } for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } } inline void Searching::search_with_top_m_scale_m( const PANNS::idi value_M_max, const PANNS::idi query_id, const PANNS::idi K, const PANNS::idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited) { // boost::dynamic_bitset<> is_visited(num_v_); { for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } const dataf *query_data = queries_load_ + query_id * dimension_; for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } // Get the distances of all candidates, store in the set set_L. for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L.begin(), set_L.begin() + L); // std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug idi M = 1; while (k < L) { ++tmp_count; unsigned nk = L; // Select M candidates idi last_k = L; for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[c_i].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; } // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } Candidate cand(nb_id, dist, false); idi r = insert_into_queue(set_L, L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates if (nk <= last_k) { k = nk; } else { k = last_k + 1; } if (M < value_M_max) { M <<= 1; } } for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i].id_; } {// Reset is_visited.reset(); } } ////void Searching::search_with_top_m( //inline void Searching::search_with_top_m_to_get_distance_range( // const PANNS::idi M, // const PANNS::idi query_id, //// const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids) //// std::vector<idi> &set_K) //{ // dist_max_ = -FLT_MAX; // dist_min_ = FLT_MAX; // boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. //// {// For distance range //// if (dist > dist_max_) { //// dist_max_ = dist; //// } //// if (dist < dist_min_) { //// dist_min_ = dist; //// } //// } // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } //// {// For distance range //// if (dist > dist_max_) { //// dist_max_ = dist; //// } //// if (dist < dist_min_) { //// dist_min_ = dist; //// } //// } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// For histogram // for (idi i_l = 0; i_l < L; ++i_l) { // distf dist = set_L[i_l].distance_; // {// For distance range // if (dist > dist_max_) { // dist_max_ = dist; // } // if (dist < dist_min_) { // dist_min_ = dist; // } // } // } // } // } // //// for (idi k_i = 0; k_i < K; ++k_i) { //// set_K[k_i] = set_L[k_i].id_; //// } //} // ////void Searching::search_with_top_m( //inline void Searching::search_with_top_m_myths_M( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// {//test //// printf("query_id: %u\n", query_id); //// } // const idi loc_range = L / 3; // // // boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // //// {// For histogram //// const distf dist_range = dist_max_ - dist_min_; //// printf("iter:%u\n", 0); //// for (idi i_l = 0; i_l < L; ++i_l) { //// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0); //// } //// } // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // std::vector<idi> range_count(3, 0); // idi zero_inserted_count = 0; //// {//test //// printf("tmp_count: %u\n", tmp_count); //// } // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } //// {//test //// printf("top_m_candidates_ends: %u\n", top_m_candidates_end); //// } // { // if (0 == top_m_candidates_end) { // break; // } // } // // // uint64_t count_neighbors = 0; // uint64_t count_inserted = 0; // std::vector<idi> locs_to_count(M); // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // // count_neighbors += out_degree; // idi num_inserted = 0; // // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // ++num_inserted; // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); //// { //// printf("c_i: %u " //// "count: %u " //// "loc_inserted: %u\n", //// c_i, //// num_inserted, //// r); //// } // if (r < nk) { // nk = r; // } // { // ++range_count[r / loc_range]; // } // } // { // if (0 == num_inserted) { // ++zero_inserted_count; // } // locs_to_count[c_i] = num_inserted; // count_inserted += num_inserted; // } //// { //// printf("c_i: %u " //// "num_inserted: %u\n", //// c_i, //// num_inserted); //// } // } //// { //// for (idi c_i = top_m_candidates_end; c_i < M; ++c_i) { //// locs_to_count[c_i] = 0; //// } //// printf("iter:%u\n", tmp_count); //// for (idi c_i = 0; c_i < M; ++c_i) { //// printf("%u %u\n", c_i, locs_to_count[c_i]); //// } //// } //// {//test //// idi sum = 0; //// for (const idi ct : range_count) sum += ct; //// printf("tmp_count: %u " //// "k: %u " //// "actual_M: %u %.1f%% " //// "zero_ins: %u %.1f%% " //// "1/3: %u %.1f%% " //// "2/3: %u %.1f%% " //// "3/3: %u %.1f%%\n", //// tmp_count, //// k, //// top_m_candidates_end, 100.0 * top_m_candidates_end / M, //// zero_inserted_count, 100.0 * zero_inserted_count / top_m_candidates_end, //// range_count[0], 100.0 * range_count[0] / sum, //// range_count[1], 100.0 * range_count[1] / sum, //// range_count[2], 100.0 * range_count[2] / sum); //// } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // { // printf("query:%uiter: %u " // "#neighbors: %lu " // "#inserted: %lu " // "ratio: %.2f%%\n", // query_id, tmp_count, // count_neighbors, // count_inserted, // 100.0 * count_inserted / count_neighbors); // } //// {// For histogram ////// const auto it_min = std::min_element(set_L.begin(), set_L.end()); ////// const auto it_max = std::max_element(set_L.begin(), set_L.end()); ////// const distf dist_min = it_min->distance_; ////// const distf dist_max = it_max->distance_; ////// const distf dist_min = it_min->distance_ - 1.0; ////// const distf dist_max = it_max->distance_ + 1.0; //// const distf dist_range = dist_max_ - dist_min_; ////// const distf dist_range = dist_max - dist_min; ////// { ////// printf("it_min->distance_: %f dist_min: %f\n", ////// it_min->distance_, dist_min); ////// } ////// const distf dist_range = it_max->distance_ - it_min->distance_; //// printf("iter:%u\n", tmp_count); //// for (idi i_l = 0; i_l < L; ++i_l) { ////// printf("%f\n", set_L[i_l].distance_); ////// printf("%f\n", (set_L[i_l].distance_ - dist_min) / dist_range * 100.0); //// printf("%f\n", (set_L[i_l].distance_ - dist_min_) / dist_range * 100.0); ////// printf("%.2f\n", (set_L[i_l].distance_ - it_min->distance_) / dist_range * 100.0); //// } //// } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // if (query_id == 3) { // exit(1); // } //} // //// Sequential Top-M algorithm for profiling purpose: byte array, CAS, and OpenMP ////void Searching::search_with_top_m( //inline void Searching::search_with_top_m_profile_bit_CAS( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// std::vector<uint8_t> is_visited(num_v_, 0); // Byte array //// boost::dynamic_bitset<> is_visited(num_v_); // Bit array // BitVector is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { //// is_visited[init_ids[c_i]] = true; // is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = true; // //// if (!AtomicOps::CAS(is_visited.data() + nb_id, //// static_cast<uint8_t>(0), //// static_cast<uint8_t>(1))) { //// continue; //// } // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} ///// Backup //inline void Searching::search_with_top_m( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} // ////// DEPRECATED: the is_visited array cannot be shared among threads. //inline void Searching::search_with_top_m_no_local_arrays( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // boost::dynamic_bitset<> &is_visited) //// std::vector< std::vector<idi> > &top_m_list) //{ //// boost::dynamic_bitset<> is_visited(num_v_); // // { // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = true; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = true; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} inline void Searching::search_with_top_m_in_batch( const PANNS::idi M, const PANNS::idi batch_start, const PANNS::idi batch_size, const PANNS::idi K, const PANNS::idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list) { std::vector< boost::dynamic_bitset<> > is_visited_list(batch_size, boost::dynamic_bitset<> (num_v_)); // Prepare the init_ids { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { auto &is_visited = is_visited_list[q_i]; for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = true; } } } // Initialize set_L_list { //#pragma omp parallel for for (idi q_i = 0; q_i < batch_size; ++q_i) { const dataf *query_data = queries_load_ + (q_i + batch_start) * dimension_; for (idi i = 0; i < L; i++) { idi v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L_list[q_i][i] = Candidate(v_id, dist, false); // False means not checked. } std::sort(set_L_list[q_i].begin(), set_L_list[q_i].begin() + L); } } { std::vector<idi> joint_queue(M * batch_size); // Joint queue for all shared top-M candidates idi joint_queue_end = 0; boost::dynamic_bitset<> is_in_joint_queue(num_v_); // std::vector< std::vector<idi> > cands_query_ids(num_v_, std::vector<idi>(batch_size)); // If candidate cand_id is selected by query q_i, q_i should be in cands_query_ids[cand_id]. // std::vector<idi> cands_query_ids_ends(num_v_, 0); std::unordered_map< idi, std::vector<idi> > cands_query_ids(batch_size * M); std::vector<idi> ks(batch_size, 0); // Indices of every queue's first unchecked candidate. std::vector<idi> nks(batch_size, L); // Indices of highest candidate inserted std::vector<idi> last_ks(batch_size, L); // Indices of lowest candidate unchecked std::vector<idi> queries_not_finished(batch_size); idi queries_not_finished_end = batch_size; for (idi q_i = 0; q_i < batch_size; ++q_i) { queries_not_finished[q_i] = q_i; } bool is_finished = false; idi counter_for_debug = 0; while (!is_finished) { ++counter_for_debug; // Build the new joint queue // Traverse every query's queue for(idi q_i = 0; q_i < queries_not_finished_end; ++q_i) { idi q_local_id = queries_not_finished[q_i]; // last_ks[q_local_id] = L; auto &set_L = set_L_list[q_local_id]; idi top_m_count = 0; for (idi c_i = ks[q_local_id]; c_i < L && top_m_count < M; ++c_i) { if (set_L[c_i].is_checked_) { continue; } set_L[c_i].is_checked_ = true; last_ks[q_local_id] = c_i; ++top_m_count; idi cand_id = set_L[c_i].id_; // Record which query selected cand_id auto tmp_c = cands_query_ids.find(cand_id); if (tmp_c != cands_query_ids.end()) { tmp_c->second.push_back(q_local_id); } else { cands_query_ids.emplace(cand_id, std::vector<idi>()); cands_query_ids[cand_id].reserve(batch_size); cands_query_ids[cand_id].push_back(q_local_id); } // cands_query_ids[cand_id][cands_query_ids_ends[cand_id]++] = q_local_id; // Add candidate cand_id into the joint queue if (is_in_joint_queue[cand_id]) { continue; } is_in_joint_queue[cand_id] = true; joint_queue[joint_queue_end++] = cand_id; } } queries_not_finished_end = 0; // Clear queries_not_finished // Traverse every shared candidate for (idi c_i = 0; c_i < joint_queue_end; ++c_i) { idi cand_id = joint_queue[c_i]; is_in_joint_queue[cand_id] = false; // Reset is_in_joint_queue idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; const auto &query_local_ids = cands_query_ids[cand_id]; // Push neighbors to every queue of the queries that selected cand_id. // Traverse cand_id's neighbors // idi &q_i_bound = cands_query_ids_ends[cand_id]; // for (idi q_i = 0; q_i < q_i_bound; ++q_i) { // idi q_local_id = query_local_ids[q_i]; for (idi q_local_id : query_local_ids) { dataf *query_data = queries_load_ + (q_local_id + batch_start) * dimension_; auto &is_visited = is_visited_list[q_local_id]; auto &set_L = set_L_list[q_local_id]; // // Traverse cand_id's neighbors for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; if (is_visited[nb_id]) { continue; } is_visited[nb_id] = true; auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L-1].distance_) { continue; } // if (dist >= set_L[L-1].distance_) { // continue; // } Candidate new_cand(nb_id, dist, false); idi insert_loc = insert_into_queue(set_L, L, new_cand); if (insert_loc < nks[q_local_id]) { nks[q_local_id] = insert_loc; } } } cands_query_ids.erase(cand_id); // q_i_bound = 0; // Clear cands_query_ids[cand_id] } joint_queue_end = 0; // Clear joint_queue for (idi q_local_id = 0; q_local_id < batch_size; ++q_local_id) { if (nks[q_local_id] <= last_ks[q_local_id]) { ks[q_local_id] = nks[q_local_id]; } else { ks[q_local_id] = last_ks[q_local_id] + 1; } nks[q_local_id] = L; last_ks[q_local_id] = L; if (ks[q_local_id] < L) { queries_not_finished[queries_not_finished_end++] = q_local_id; } } if (!queries_not_finished_end) { is_finished = true; } } } { for (idi q_i = 0; q_i < batch_size; ++q_i) { for (idi c_i = 0; c_i < K && c_i < L; ++c_i) { set_K_list[q_i + batch_start][c_i] = set_L_list[q_i][c_i].id_; } } } //// // {//test // for (idi q_i = 0; q_i < batch_size; ++q_i) { // printf("query: %u\n", q_i + batch_start); // for (idi c_i = 0; c_i < K; ++c_i) { // printf("%u: %u %f\n", c_i, set_L_list[q_i][c_i].id_, set_L_list[q_i][c_i].distance_); // } // } // } } //inline void Searching::para_search_with_top_m_critical_area( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { ////#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. ////#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; ////#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // ////#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // //inline void Searching::para_search_with_top_m_critical_area_no_omp( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { ////#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. ////#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; ////#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // ////#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // //inline void Searching::para_search_with_top_m_critical_area_yes_omp( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. ////#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; ////#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // ////#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //} // //inline void Searching::para_search_with_top_m_visited_array( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // std::vector<uint8_t> &is_visited) //// std::vector< std::vector<idi> > &top_m_list) //{ //// uint64_t count_visited = 0; // //// std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { ////#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// ++count_visited; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. // // OpenMP reduction(min : nk) has a problem if nk is unsigned. nk might end up with being MAX_UINT. ////#pragma omp parallel for ////#pragma omp parallel for reduction(min : nk) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } //// ++count_visited; // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++count_distance_computation_; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // idi r; ////#pragma omp critical // { // r = insert_into_queue(set_L, L, cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // ////#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // //// { //// printf("query_id: %u " //// "count_visited: %lu %f%%\n", //// query_id, //// count_visited, //// 100.0 * count_visited / num_v_); //// } //} // //inline void Searching::para_search_with_top_m_merge_queues( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //{ //// {//test //// printf("query_id: %u\n", query_id); //// } //// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_; // const idi local_queue_length = L; // std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); // std::vector<idi> local_queues_ends(num_threads_, 0); // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // // Add to the local queue. // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; //// // Merge. Parallel merging in every two queues. //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_para( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); ////// idi r = merge_two_queues_into_1st_queue_seq( ////// set_L, ////// 0, ////// L, ////// local_queues_list[tid], ////// 0, ////// local_queues_ends[tid]); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } //// {// text //// if (query_id == 4 && //// tmp_count == 5) { //// // Print local queues //// for (int t_i = 0; t_i < num_threads_; ++t_i) { ////// idi start_i = t_i * local_queue_length; //// for (idi q_i = 0; q_i < local_queues_ends[t_i]; ++q_i) { //// printf("t[%u][%u]: " //// "id: %u " //// "dist: %f\n", //// t_i, q_i, //// local_queues_list[t_i][q_i].id_, //// local_queues_list[t_i][q_i].distance_); //// } //// } //// printf("----------\n"); //// for (idi i = 0; i < L; ++i) { //// printf("set_L[%u]: " //// "id: %u " //// "dist: %f\n", //// i, //// set_L[i].id_, //// set_L[i].distance_); //// } //// printf("----------\n"); //// } //// } // // Merge. Merge all queues in parallel. // { // if (num_threads_ > 1) { // idi r = merge_all_queues_para_list( // local_queues_list, // local_queues_ends, // set_L, // L); // if (r < nk) { // nk = r; // } // } else { // if (local_queues_ends[0]) { // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, // local_queues_list[0], // 0, // local_queues_ends[0]); // local_queues_ends[0] = 0; // if (r < nk) { // nk = r; // } // } // } // } //// {//test //// if (query_id == 4) { //// for (idi i = 0; i < L; ++i) { //// printf("tmp_count: %u " //// "set_L[%u]: " //// "id: %u " //// "dist: %f\n", //// tmp_count, //// i, //// set_L[i].id_, //// set_L[i].distance_); //// } //// } //// //// } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// { //// exit(1); //// } //// {//test //// ////// if (query_id == 4) { //// for (idi i = 0; i < L; ++i) { //// printf("set_L[%u]: " //// "id: %u " //// "dist: %f\n", //// i, //// set_L[i].id_, //// set_L[i].distance_); //// } ////// exit(1); ////// } //// } //} // ////// Using local queue and then sequential merge. //inline void Searching::para_search_with_top_m_queues_seq_merge( // const PANNS::idi M, // const PANNS::idi query_id, // const PANNS::idi K, // const PANNS::idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K) //// std::vector< std::vector<idi> > &top_m_list) //{ //// const idi local_queue_length = ((L - 1) / num_threads_ + 1) * width_; // const idi local_queue_length = L; // std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); // std::vector<idi> local_queues_ends(num_threads_, 0); // std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //// for (idi v_i = 0; v_i < L; ++v_i) { //// idi v_id = init_ids[v_i]; //// _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); //// } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; //// { //// printf("tmp_count: %u " //// "k: %u\n", //// tmp_count, //// k); //// } // //// unsigned nk = L; //// int nk = L; // // // Select M candidates // idi last_k = L; // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; // // if (!AtomicOps::CAS(is_visited.data() + nb_id, // static_cast<uint8_t>(0), // static_cast<uint8_t>(1))) { // continue; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); //// idi r; ////#pragma omp critical //// { //// r = insert_into_queue(set_L, L, cand); //// if (r < nk) { //// nk = r; //// } //// } // // Add to the local queue. // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; // // Merge // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, // local_queues_list[tid], // 0, // local_queues_ends[tid]); //// L + 1); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } //// //// {//test //// for (idi k_i = 0; k_i < K; ++k_i) { //// printf("%u: %u: %u %f\n", //// query_id, //// k_i, set_L[k_i].id_, set_L[k_i].distance_); //// } //// exit(1); //// } //} // //inline void Searching::para_search_with_top_m_merge_queues_no_CAS( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<idi> &local_queues_ends, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited) //{ ////// const idi local_queue_length = ((M - 1) / num_threads_ + 1) * width_; //// const idi local_queue_length = L; //// std::vector< std::vector<Candidate> > local_queues_list(num_threads_, std::vector<Candidate>(local_queue_length)); //// std::vector<idi> local_queues_ends(num_threads_, 0); ////// std::vector<uint8_t> is_visited(num_v_, 0); //// boost::dynamic_bitset<> is_visited(num_v_); // // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // //// if (!AtomicOps::CAS(is_visited.data() + nb_id, //// static_cast<uint8_t>(0), //// static_cast<uint8_t>(1))) { //// continue; //// } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L-1].distance_) { // continue; // } //// if (dist >= set_L[L-1].distance_) { //// continue; //// } // Candidate cand(nb_id, dist, false); // // Add to the local queue. // add_into_queue(local_queues_list[tid], local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; //// // Merge. Parallel merging in every two queues. //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_para( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); ////// idi r = merge_two_queues_into_1st_queue_seq( ////// set_L, ////// 0, ////// L, ////// local_queues_list[tid], ////// 0, ////// local_queues_ends[tid]); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } //// // Merge. Merge all queues in parallel. //// { //// if (num_threads_ > 1) { //// idi r = merge_all_queues_para( //// local_queues_list, //// local_queues_ends, //// set_L, //// L); //// if (r < nk) { //// nk = r; //// } //// } else { //// if (local_queues_ends[0]) { //// idi r = merge_two_queues_into_1st_queue_seq( //// set_L, //// 0, //// L, //// local_queues_list[0], //// 0, //// local_queues_ends[0]); //// local_queues_ends[0] = 0; //// if (r < nk) { //// nk = r; //// } //// } //// } //// } // // Merge // { // for (int tid = 0; tid < num_threads_; ++tid) { // if (0 == local_queues_ends[tid]) continue; // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, // local_queues_list[tid], // 0, // local_queues_ends[tid]); //// L + 1); // local_queues_ends[tid] = 0; // Reset the local queue // if (r < nk) { // nk = r; // } // } // } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // // {// Reset // is_visited.reset(); //// std::fill(is_visited.begin(), is_visited.end(), 0); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } //} //inline void Searching::para_search_with_top_m_merge_queues_in_array( //inline void Searching::para_search_with_top_m_merge_queues_new_threshold( // const idi M, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue //// std::vector< std::vector<Candidate> > &local_queues_list, // std::vector<Candidate> &local_queues_array, // std::vector<idi> &local_queues_ends, // Sizes of local queue // BitVector &is_visited) //// std::vector<uint8_t> &is_visited) //// boost::dynamic_bitset<> &is_visited) //{ // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { //// is_visited[init_ids[c_i]] = 1; // is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // std::sort(set_L.begin(), set_L.begin() + L); // // idi min_index = L - 1; // distf min_1st = set_L[min_index].distance_; // // std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // if (set_L[c_i].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[c_i].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[c_i].id_; // } // // // Push M candidates' neighbors into the queue. //#pragma omp parallel for // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // const idi local_queue_start = tid * local_queue_length; // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; //// { // Sequential edition //// if (is_visited[nb_id]) { //// continue; //// } //// is_visited[nb_id] = 1; //// } //// { // __ATOMIC_SEQ_CST edition //// if (!AtomicOps::CAS(is_visited.data() + nb_id, //// static_cast<uint8_t>(0), //// static_cast<uint8_t>(1))) { //// continue; //// } //// } //// {// Acquire and Release edition //// if (__atomic_load_n(is_visited.data() + nb_id, __ATOMIC_ACQUIRE)) { //// continue; //// } //// __atomic_store_n(is_visited.data() + nb_id, 1, __ATOMIC_RELEASE); //// } // {// Self-defined BitVector // if (is_visited.atomic_is_bit_set(nb_id)) { // continue; // } // is_visited.atomic_set_bit(nb_id); // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // // if (dist > min_1st) { // continue; // } else if (min_index > 0) { // // Inserted, so min_1st needs update // if (dist > set_L[min_index - 1].distance_) { // min_1st = dist; // if (min_index < L - 1) { // ++min_index; // } // } else { // min_1st = set_L[--min_index].distance_; // } //// min_1st = set_L[--min_index].distance_; // } // //// if (dist > set_L[L-1].distance_) { //// continue; //// } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // add_into_queue(local_queues_array, local_queue_start, local_queues_ends[tid], local_queue_length, cand); // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // // idi nk = L; //// // Merge. Parallel merging in every two queues. //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_para( //// set_L, //// 0, //// L, //// local_queues_list[tid], //// 0, //// local_queues_ends[tid]); ////// idi r = merge_two_queues_into_1st_queue_seq( ////// set_L, ////// 0, ////// L, ////// local_queues_list[tid], ////// 0, ////// local_queues_ends[tid]); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } // // Merge. Merge all queues in parallel. // { // if (num_threads_ > 1) { // idi r = merge_all_queues_para_array( //// local_queues_list, // local_queues_array, // local_queues_ends, // local_queue_length, // set_L, // L); // if (r < nk) { // nk = r; // } // } else { // if (local_queues_ends[0]) { // idi r = merge_two_queues_into_1st_queue_seq_fixed( // set_L, // 0, // L, //// local_queues_list[0], // local_queues_array, // 0, // local_queues_ends[0]); // local_queues_ends[0] = 0; // if (r < nk) { // nk = r; // } // } // } // } //// // Merge Sequentially //// { //// for (int tid = 0; tid < num_threads_; ++tid) { //// if (0 == local_queues_ends[tid]) continue; //// idi r = merge_two_queues_into_1st_queue_seq_fixed( //// set_L, //// 0, //// L, ////// local_queues_list[tid], ////// 0, //// local_queues_array, //// tid * local_queue_length, //// local_queues_ends[tid]); ////// L + 1); //// local_queues_ends[tid] = 0; // Reset the local queue //// if (r < nk) { //// nk = r; //// } //// } //// } // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // } // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// is_visited.reset(); //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } //} /* * 5/7/2020-15:14 * Use 1 threads to scale M until the value_M_middle. * Then use multiple threads. */ inline void Searching::para_search_with_top_m_merge_queues_middle_m( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited) { // const idi base_set_L = (num_threads_ - 1) * local_queue_length; { #pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } uint64_t tmp_count_computation = 0; // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for #pragma omp parallel for reduction(+ : tmp_count_computation) for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + L); std::sort( set_L.begin() + base_set_L, set_L.begin() + base_set_L + L); local_queues_ends[num_threads_ - 1] = L; idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug idi M = 1; { // Single thread while (k < L && M < value_M_middle) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } { // Multiple Threads while (k < L) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. //#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) #pragma omp parallel for reduction(+ : tmp_count_computation) for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { int tid = omp_get_thread_num(); idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Add to the local queue. if (0 != tid) { // Non-Master threads using local queues add_into_queue( set_L, (tid - 1) * local_queue_length, local_queues_ends[tid - 1], local_queue_length, cand); } else { // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; // {// Local queues' ends // printf("query%u:iter: %u", query_id, tmp_count); // for (int i_t = 0; i_t < num_threads_; ++i_t) { // printf(" [%u]: %u", i_t, local_queues_ends[i_t]); // } // printf("\n"); // } // // Merge. Merge all queues in parallel. { time_merge_ -= WallTimer::get_time_mark(); if (num_threads_ > 1) { idi r = merge_all_queues_para_array( set_L, local_queues_ends, local_queue_length, L); if (r < nk) { nk = r; } } time_merge_ += WallTimer::get_time_mark(); } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i + base_set_L].id_; // set_K[k_i] = set_L[k_i].id_; } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); } // {//test // if (3 == query_id) { // exit(1); // } // } } inline void Searching::para_search_with_top_m_merge_queues_middle_m_no_merge( const uint64_t computation_threshold, const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, const idi init_size, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited) { uint64_t count_single_query_computation = 0; uint64_t count_init_computation = 0; uint64_t count_seq_computation = 0; uint64_t count_par_computation = 0; // {//test // printf("query_id: %u\n", query_id); // } // time_initialization_ -= WallTimer::get_time_mark(); // const idi base_set_L = (num_threads_ - 1) * local_queue_length; { #pragma omp parallel for for (idi c_i = 0; c_i < init_size; ++c_i) { // for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; // is_visited.atomic_set_bit(init_ids[c_i]); } } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < init_size; ++v_i) { // for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } uint64_t tmp_count_computation = 0; // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for #pragma omp parallel for reduction(+ : tmp_count_computation) for (unsigned i = 0; i < init_size; i++) { // for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. // set_L[i] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; count_init_computation += tmp_count_computation; count_single_query_computation += tmp_count_computation; tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + L); std::sort( set_L.begin() + base_set_L, set_L.begin() + base_set_L + init_size); // set_L.begin() + base_set_L + L); local_queues_ends[num_threads_ - 1] = init_size; // local_queues_ends[num_threads_ - 1] = L; // time_initialization_ += WallTimer::get_time_mark(); // time_sequential_phase_ -= WallTimer::get_time_mark(); // std::vector<idi> top_m_candidates(M); idi &global_queue_size = local_queues_ends[num_threads_ - 1]; idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug idi M = 1; { // Single thread while (k < L && M < value_M_middle && count_single_query_computation <= computation_threshold) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // int real_threads = std::min(static_cast<int>(M), num_threads_); // idi queue_base = num_threads_ - real_threads; // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) { // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) { // if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, global_queue_size, // local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; count_seq_computation += tmp_count_computation; count_single_query_computation += tmp_count_computation; tmp_count_computation = 0; // {// Local queues' ends // printf("query%u:iter: %u", query_id, tmp_count); // for (int i_t = 0; i_t < num_threads_; ++i_t) { // printf(" [%u]: %u", i_t, local_queues_ends[i_t]); // } // printf("\n"); // } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } // time_sequential_phase_ += WallTimer::get_time_mark(); // time_parallel_phase_ -= WallTimer::get_time_mark(); { // Multiple Threads while (k < L and count_single_query_computation <= computation_threshold) { // while (k < L) { ++tmp_count; // {//test // printf("tmp_count: %d " // "k: %u " // "global_queue_size: %u\n", // tmp_count, // k, // global_queue_size); // } // int real_threads = std::min(static_cast<int>(M), num_threads_); // idi queue_base = num_threads_ - real_threads; // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < global_queue_size && top_m_candidates_end < M; ++c_i) { // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. //#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) #pragma omp parallel for reduction(+ : tmp_count_computation) for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { int tid = omp_get_thread_num(); idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[global_queue_size - 1 + base_set_L].distance_) { // if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Add to the local queue. if (0 != tid) { // Non-Master threads using local queues add_into_queue( set_L, (tid - 1) * local_queue_length, local_queues_ends[tid - 1], local_queue_length, cand); } else { // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, global_queue_size, // local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; count_par_computation += tmp_count_computation; count_single_query_computation += tmp_count_computation; tmp_count_computation = 0; // {// Local queues' ends // printf("query%u:iter: %u", query_id, tmp_count); // for (int i_t = 0; i_t < num_threads_; ++i_t) { // printf(" [%u]: %u", i_t, local_queues_ends[i_t]); // } // printf("\n"); // } // Merge. Merge all queues in parallel. { if (num_threads_ > 1) { // idi r = merge_all_queues_queue_base( // set_L, // local_queues_ends, // queue_base, // real_threads, // local_queue_length, // L); idi r = merge_all_queues_para_array( set_L, local_queues_ends, local_queue_length, L); if (r < nk) { nk = r; } } } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } // {// Print relative distance //// distf top_dist = set_L[base_set_L].distance_; // for (idi i_l = 0; i_l < L; ++i_l) { // printf("%u %f\n", // tmp_count, set_L[i_l + base_set_L].distance_); //// tmp_count, set_L[i_l + base_set_L].distance_ - top_dist); // } // } } } // time_parallel_phase_ += WallTimer::get_time_mark(); #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i + base_set_L].id_; // set_K[k_i] = set_L[k_i].id_; } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); } // {//test // if (3 == query_id) { // exit(1); // } // } // {//test // printf("count_single: %lu " // "ct_init: %lu " // "ct_seq: %lu " // "ct_par: %lu\n", // count_single_query_computation, // count_init_computation, // count_seq_computation, // count_par_computation); // } } /* * 6/15/2020-14:40 * Queues merging together to the global queue */ inline void Searching::para_search_with_top_m_merge_queues_sequential_merge( const idi value_M_middle, const idi value_M_max, const idi query_id, const idi K, const idi L, std::vector<Candidate> &set_L, const std::vector<idi> &init_ids, std::vector<idi> &set_K, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; std::vector<idi> &local_queues_ends, // Sizes of local queue std::vector<idi> &top_m_candidates, boost::dynamic_bitset<> &is_visited) { // const idi base_set_L = (num_threads_ - 1) * local_queue_length; { #pragma omp parallel for for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; // is_visited.atomic_set_bit(init_ids[c_i]); } } const dataf *query_data = queries_load_ + query_id * dimension_; #pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } uint64_t tmp_count_computation = 0; // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for #pragma omp parallel for reduction(+ : tmp_count_computation) for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. // set_L[i] = Candidate(v_id, dist, false); // False means not checked. } count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + L); std::sort( set_L.begin() + base_set_L, set_L.begin() + base_set_L + L); local_queues_ends[num_threads_ - 1] = L; // std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug idi M = 1; { // Single thread while (k < L && M < value_M_middle) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } { // Multiple Threads while (k < L) { ++tmp_count; // {//test // if (num_threads_ == 2) { // printf("tmp_count: %d " // "k: %u\n", // tmp_count, // k); // } // } // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. //#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) #pragma omp parallel for reduction(+ : tmp_count_computation) for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { int tid = omp_get_thread_num(); idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Add to the local queue. if (0 != tid) { // Non-Master threads using local queues add_into_queue( set_L, (tid - 1) * local_queue_length, local_queues_ends[tid - 1], local_queue_length, cand); } else { // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, local_queues_ends[num_threads_ - 1], L, cand); if (r < nk) { nk = r; } } } } top_m_candidates_end = 0; // Clear top_m_candidates count_distance_computation_ += tmp_count_computation; tmp_count_computation = 0; // // Merge. Merge all queues in parallel. { // {//test // for (idi q_i = 0; q_i < num_threads_; ++q_i) { // if (0 == local_queues_ends[q_i]) { // continue; // } // for (idi e_i = 0; e_i < local_queues_ends[q_i]; ++e_i) { // printf("tmp_count: %u " // "q_i: %u " // "[%u]: (%u, %f)\n", // tmp_count, // q_i, // e_i, set_L[q_i * local_queue_length + e_i].id_, set_L[q_i * local_queue_length + e_i].distance_); // } // } // } // time_merge_ -= WallTimer::get_time_mark(); if (num_threads_ > 1) { idi r = merge_all_queues_all_together_in_sequential( set_L, local_queues_ends, local_queue_length, L); // idi r = merge_all_queues_para_array( // set_L, // local_queues_ends, // local_queue_length, // L); if (r < nk) { nk = r; } // {//test // printf("tmp_count: %u " // "r: %u " // "last_k: %u\n", // tmp_count, // r, // last_k); // for (idi l_i = 0; l_i < L; ++l_i) { // printf("tmp_count: %u " // "[%u]: (%u, %f)\n", // tmp_count, // l_i, set_L[l_i + base_set_L].id_, set_L[l_i + base_set_L].distance_); // } // } } // time_merge_ += WallTimer::get_time_mark(); } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } #pragma omp parallel for for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i + base_set_L].id_; // set_K[k_i] = set_L[k_i].id_; } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); } // {//test // if (0 == query_id) { // exit(1); // } // } } /* * 6/19/2020: * Intra-query + Inter-query */ inline void Searching::para_search_with_top_m_nested_para( const idi batch_start, const idi batch_size, const idi value_M_middle, const idi value_M_max, const idi K, const idi L, std::vector< std::vector<Candidate> > &set_L_list, const std::vector<idi> &init_ids, std::vector< std::vector<idi> > &set_K_list, const idi local_queue_length, // Maximum size of local queue const idi base_set_L, // base_set_L = (num_threads_intra_ - 1) * local_queue_length; std::vector< std::vector<idi> > &local_queues_ends_list, // Sizes of local queue std::vector< std::vector<idi> > &top_m_candidates_list, std::vector< boost::dynamic_bitset<> > &is_visited_list) { {// Initialize is_visited flag array #pragma omp parallel for num_threads(num_threads_inter_) for (idi q_i = 0; q_i < batch_size; ++q_i) { auto &is_visited = is_visited_list[q_i]; #pragma omp parallel for num_threads(num_threads_intra_) for (idi c_i = 0; c_i < L; ++c_i) { is_visited[init_ids[c_i]] = 1; } } } #pragma omp parallel for for (idi v_i = 0; v_i < L; ++v_i) { idi v_id = init_ids[v_i]; _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); } uint64_t tmp_count_total_computation = 0; #pragma omp parallel for num_threads(num_threads_inter_) reduction(+ : tmp_count_total_computation) for (idi q_i = 0; q_i < batch_size; ++q_i) { idi query_id = batch_start + q_i; auto &set_L = set_L_list[q_i]; auto &local_queues_ends = local_queues_ends_list[q_i]; auto &is_visited = is_visited_list[q_i]; const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } uint64_t tmp_count_computation = 0; // Get the distances of all candidates, store in the set set_L. //#pragma omp parallel for #pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_) for (unsigned i = 0; i < L; i++) { unsigned v_id = init_ids[i]; auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); dataf norm = *v_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(v_data, query_data, norm); set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. // set_L[i] = Candidate(v_id, dist, false); // False means not checked. } // count_distance_computation_ += tmp_count_computation; tmp_count_total_computation += tmp_count_computation; tmp_count_computation = 0; // std::sort(set_L.begin(), set_L.begin() + L); std::sort( set_L.begin() + base_set_L, set_L.begin() + base_set_L + L); local_queues_ends[num_threads_intra_ - 1] = L; // std::vector<idi> top_m_candidates(M); idi top_m_candidates_end = 0; idi k = 0; // Index of first unchecked candidate. idi tmp_count = 0; // for debug idi M = 1; auto &top_m_candidates = top_m_candidates_list[q_i]; { // Single thread while (k < L && M < value_M_middle) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); // {//test // if (391655 == nb_id) { // printf("tmp_count: %u " // "nb_id: %u " // "distf: %f\n", // tmp_count, // nb_id, // dist); // } // } if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, local_queues_ends[num_threads_intra_ - 1], L, cand); if (r < nk) { nk = r; } } } top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; tmp_count_total_computation += tmp_count_computation; tmp_count_computation = 0; if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } { // Multiple Threads while (k < L) { ++tmp_count; // {//test // printf("tmp_count: %d\n", tmp_count); // } // Select M candidates idi last_k = L; // Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { idi index_set_L = c_i + base_set_L; if (set_L[index_set_L].is_checked_) { continue; } last_k = c_i; // Record the location of the last candidate selected. set_L[index_set_L].is_checked_ = true; top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; } idi nk = L; // Push M candidates' neighbors into the queue. #pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(num_threads_intra_) for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { int tid = omp_get_thread_num(); idi cand_id = top_m_candidates[c_i]; _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); idi out_degree = *out_edges++; for (idi n_i = 0; n_i < out_degree; ++n_i) { _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); } for (idi e_i = 0; e_i < out_degree; ++e_i) { idi nb_id = out_edges[e_i]; { // Sequential edition if (is_visited[nb_id]) { continue; } is_visited[nb_id] = 1; } auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); dataf norm = *nb_data++; // ++count_distance_computation_; ++tmp_count_computation; distf dist = compute_distance_with_norm(nb_data, query_data, norm); // {//test // if (391655 == nb_id) { // printf("tmp_count: %u " // "nb_id: %u " // "distf: %f\n", // tmp_count, // nb_id, // dist); // } // } if (dist > set_L[L - 1 + base_set_L].distance_) { continue; } Candidate cand(nb_id, dist, false); // Add to the local queue. if (0 != tid) { // Non-Master threads using local queues add_into_queue( set_L, (tid - 1) * local_queue_length, local_queues_ends[tid - 1], local_queue_length, cand); } else { // Thread 0 maintains the "global" queue idi r = add_into_queue( set_L, base_set_L, local_queues_ends[num_threads_intra_ - 1], L, cand); if (r < nk) { nk = r; } } } } top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; tmp_count_total_computation += tmp_count_computation; tmp_count_computation = 0; // // Merge. Merge all queues in parallel. { // time_merge_ -= WallTimer::get_time_mark(); if (num_threads_intra_ > 1) { idi r = merge_all_queues_para_array( set_L, local_queues_ends, local_queue_length, L); if (r < nk) { nk = r; } } // time_merge_ += WallTimer::get_time_mark(); } if (nk <= last_k) { k = nk; } else { k = last_k + 1; } {// Scale M if (M < value_M_max) { M <<= 1; } else { M = value_M_max; } } } } count_distance_computation_ += tmp_count_total_computation; tmp_count_total_computation = 0; auto &set_K = set_K_list[query_id]; #pragma omp parallel for num_threads(num_threads_intra_) for (idi k_i = 0; k_i < K; ++k_i) { set_K[k_i] = set_L[k_i + base_set_L].id_; // set_K[k_i] = set_L[k_i].id_; } {// Reset // std::fill(is_visited.begin(), is_visited.end(), 0); is_visited.reset(); // is_visited.clear_all(); std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); } } // {//test // if (3 == query_id) { // exit(1); // } // } // { // for (idi k_i = 0; k_i < K; ++k_i) { // printf("%u: (%u %f)\n", // k_i, set_L_list[0][k_i].id_, set_L_list[0][k_i].distance_); // } // if (0 == batch_start) { // exit(1); // } // } } ///* // * 6/22/2020-09:38 // * A synchronized last element as the sentinel // */ //inline void Searching::para_search_with_top_m_merge_queues_global_threshold( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue // std::vector<idi> &top_m_candidates, // boost::dynamic_bitset<> &is_visited) //{ //// const idi base_set_L = (num_threads_ - 1) * local_queue_length; // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); // local_queues_ends[num_threads_ - 1] = L; // // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // { // Single thread // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // // } // } // // { // Multiple Threads // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // // idi nk = L; // // Push M candidates' neighbors into the queue. ////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // {// Local queues' ends //// printf("query%u:iter: %u", query_id, tmp_count); // idi total_elements = 0; // for (int i_t = 0; i_t < num_threads_ - 1; ++i_t) { // total_elements += local_queues_ends[i_t]; // } // number_local_elements_ += total_elements; //// printf(" total_elements: %u+%u\n", total_elements - local_queues_ends[num_threads_ - 1], local_queues_ends[num_threads_ - 1]); //// for (int i_t = 0; i_t < num_threads_; ++i_t) { //// printf(" [%u]: %u", i_t, local_queues_ends[i_t]); //// } //// printf("\n"); // } // //// // Merge. Merge all queues in parallel. // { // time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_ > 1) { // idi r = merge_all_queues_para_array( // set_L, // local_queues_ends, // local_queue_length, // L); // if (r < nk) { // nk = r; // } // } // time_merge_ += WallTimer::get_time_mark(); // } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // // } // } // // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// if (0 == query_id) { //// exit(1); //// } //// } //} ///* // * 6/7/2020-16:55 // * Use 1 threads to scale M until the value_M_middle. // * Then use multiple threads. // * Except for Thread 0, other threads are collectors. They collect, but do not merge. // * Only merge once after Thread 0 stops. // */ //inline void Searching::para_search_with_top_m_merge_queues_collectors( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited) //// std::vector<distf> &local_thresholds) //// BitVector &is_visited) //{ // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); //// boost::sort::block_indirect_sort( //// set_L.begin() + base_set_L, //// set_L.begin() + base_set_L + L, //// num_threads_); // local_queues_ends[num_threads_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // // Single thread // { // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // //// int real_threads = std::min(static_cast<int>(M), num_threads_); //// idi queue_base = num_threads_ - real_threads; // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // // Multiple Threads // { //// while (k < L/num_threads_/2) { // while (k < L) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } //// int real_threads = std::min(static_cast<int>(M), num_threads_); //// idi queue_base = num_threads_ - real_threads; // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi chunk_size; // if (num_threads_ <= top_m_candidates_end) { // chunk_size = (top_m_candidates_end - 1) / num_threads_ + 1; // } else { // chunk_size = 1; // } // idi nk = L; // // Push M candidates' neighbors into the queue. ////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) ////#pragma omp parallel for reduction(+ : tmp_count_computation) //#pragma omp parallel for reduction(+ : tmp_count_computation) schedule(static, chunk_size) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); //// { //// if (c_i < chunk_size && tid != 0) { //// printf("query_id: %u " //// "tmp_count: %u " //// "chunk_size: %u " //// "c_i: %u " //// "tid: %u\n", //// query_id, //// tmp_count, //// chunk_size, //// c_i, //// tid); //// } //// } // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // ////// // Merge. Merge all queues in parallel. //// { //// time_merge_ -= WallTimer::get_time_mark(); //// if (num_threads_ > 1) { ////// idi r = merge_all_queues_queue_base( ////// set_L, ////// local_queues_ends, ////// queue_base, ////// real_threads, ////// local_queue_length, ////// L); //// idi r = merge_all_queues_para_array( //// set_L, //// local_queues_ends, //// local_queue_length, //// L); //// if (r < nk) { //// nk = r; //// } //// } //// time_merge_ += WallTimer::get_time_mark(); //// } // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // //// // Merge only once after Master Thread stops. //// { //// time_merge_ -= WallTimer::get_time_mark(); //// if (num_threads_ > 1) { ////// idi r = merge_all_queues_queue_base( ////// set_L, ////// local_queues_ends, ////// queue_base, ////// real_threads, ////// local_queue_length, ////// L); //// merge_all_queues_para_array( //// set_L, //// local_queues_ends, //// local_queue_length, //// L); //// } //// time_merge_ += WallTimer::get_time_mark(); //// } // } // // //#pragma omp parallel for // for (idi k_i = 0; k_i < K; ++k_i) { // set_K[k_i] = set_L[k_i + base_set_L].id_; //// set_K[k_i] = set_L[k_i].id_; // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// printf("tmp_count: %u\n", tmp_count); //// if (3 == query_id) { //// exit(1); //// } //// } //} ///* // * 6/8/2020-16:39 // * Selecting rather than merging // */ //inline void Searching::para_search_with_top_m_merge_queues_selecting( // const idi value_M_middle, // const idi value_M_max, // const idi query_id, // const idi K, // const idi L, // std::vector<Candidate> &set_L, // const std::vector<idi> &init_ids, // std::vector<idi> &set_K, // const idi local_queue_length, // Maximum size of local queue // const idi base_set_L, // base_set_L = (num_threads_ - 1) * local_queue_length; // std::vector<idi> &local_queues_ends, // Sizes of local queue //// std::vector<Candidate> &top_m_candidates, // std::vector<idi> &top_m_candidates, //// std::vector<uint8_t> &is_visited) // boost::dynamic_bitset<> &is_visited) //{ // { //#pragma omp parallel for // for (idi c_i = 0; c_i < L; ++c_i) { // is_visited[init_ids[c_i]] = 1; //// is_visited.atomic_set_bit(init_ids[c_i]); // } // } // // const dataf *query_data = queries_load_ + query_id * dimension_; //#pragma omp parallel for // for (idi v_i = 0; v_i < L; ++v_i) { // idi v_id = init_ids[v_i]; // _mm_prefetch(opt_nsg_graph_ + v_id * vertex_bytes_, _MM_HINT_T0); // } // uint64_t tmp_count_computation = 0; // // Get the distances of all candidates, store in the set set_L. ////#pragma omp parallel for //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (unsigned i = 0; i < L; i++) { // unsigned v_id = init_ids[i]; // auto *v_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + v_id * vertex_bytes_); // dataf norm = *v_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(v_data, query_data, norm); // set_L[i + base_set_L] = Candidate(v_id, dist, false); // False means not checked. //// set_L[i] = Candidate(v_id, dist, false); // False means not checked. // } // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; //// std::sort(set_L.begin(), set_L.begin() + L); // std::sort( // set_L.begin() + base_set_L, // set_L.begin() + base_set_L + L); //// boost::sort::block_indirect_sort( //// set_L.begin() + base_set_L, //// set_L.begin() + base_set_L + L, //// num_threads_); // local_queues_ends[num_threads_ - 1] = L; // //// std::vector<idi> top_m_candidates(M); // idi top_m_candidates_end = 0; // idi k = 0; // Index of first unchecked candidate. // idi tmp_count = 0; // for debug // idi M = 1; // // // Single thread // { // while (k < L && M < value_M_middle) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } // //// int real_threads = std::min(static_cast<int>(M), num_threads_); //// idi queue_base = num_threads_ - real_threads; // // Select M candidates // idi last_k = L; //// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. // for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { // idi index_set_L = c_i + base_set_L; // if (set_L[index_set_L].is_checked_) { // continue; // } // last_k = c_i; // Record the location of the last candidate selected. // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // // idi nk = L; // // Push M candidates' neighbors into the queue. // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Thread 0 maintains the "global" queue // idi r = add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); // if (r < nk) { // nk = r; // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // // if (nk <= last_k) { // k = nk; // } else { // k = last_k + 1; // } // // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // // Multiple Threads // { //// while (k < L/num_threads_/2) { //// while (k < L) { // while (true) { // ++tmp_count; //// {//test //// printf("tmp_count: %d\n", tmp_count); //// } //// // Select M candidates //// idi last_k = L; ////// Cannot use OpenMP here because this for-loop needs early break by the 2nd condition. //// for (idi c_i = k; c_i < L && top_m_candidates_end < M; ++c_i) { //// idi index_set_L = c_i + base_set_L; //// if (set_L[index_set_L].is_checked_) { //// continue; //// } //// last_k = c_i; // Record the location of the last candidate selected. //// set_L[index_set_L].is_checked_ = true; //// top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; //// } // // // Select M candidates // { // idi traverse_count = 0; // idi bound_sub = L; // This is not always true! // for (idi sub = 0; sub < bound_sub && top_m_candidates_end < M && traverse_count < L; ++sub) { // for (int tid = 0; tid < num_threads_ && top_m_candidates_end < M && traverse_count < L; ++tid) { // if (sub >= local_queues_ends[tid]) { // continue; // } // idi index_set_L = tid * local_queue_length + sub; // if (set_L[index_set_L].is_checked_) { // continue; // } // set_L[index_set_L].is_checked_ = true; // top_m_candidates[top_m_candidates_end++] = set_L[index_set_L].id_; // } // } // // if (0 == top_m_candidates_end) { // break; // } // } // //// idi nk = L; // // Push M candidates' neighbors into the queue. ////#pragma omp parallel for reduction(+ : tmp_count_computation) num_threads(real_threads) //#pragma omp parallel for reduction(+ : tmp_count_computation) // for (idi c_i = 0; c_i < top_m_candidates_end; ++c_i) { // int tid = omp_get_thread_num(); // idi cand_id = top_m_candidates[c_i]; // _mm_prefetch(opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_, _MM_HINT_T0); // idi *out_edges = (idi *) (opt_nsg_graph_ + cand_id * vertex_bytes_ + data_bytes_); // idi out_degree = *out_edges++; // for (idi n_i = 0; n_i < out_degree; ++n_i) { // _mm_prefetch(opt_nsg_graph_ + out_edges[n_i] * vertex_bytes_, _MM_HINT_T0); // } // for (idi e_i = 0; e_i < out_degree; ++e_i) { // idi nb_id = out_edges[e_i]; // { // Sequential edition // if (is_visited[nb_id]) { // continue; // } // is_visited[nb_id] = 1; // } // // auto *nb_data = reinterpret_cast<dataf *>(opt_nsg_graph_ + nb_id * vertex_bytes_); // dataf norm = *nb_data++; //// ++count_distance_computation_; // ++tmp_count_computation; // distf dist = compute_distance_with_norm(nb_data, query_data, norm); // if (dist > set_L[L - 1 + base_set_L].distance_) { // continue; // } // // Candidate cand(nb_id, dist, false); // // Add to the local queue. // if (0 != tid) { // // Non-Master threads using local queues // add_into_queue( // set_L, // (tid - 1) * local_queue_length, // local_queues_ends[tid - 1], // local_queue_length, // cand); // } else { // // Thread 0 maintains the "global" queue //// idi r = // add_into_queue( // set_L, // base_set_L, // local_queues_ends[num_threads_ - 1], // L, // cand); //// if (r < nk) { //// nk = r; //// } // } // } // } // top_m_candidates_end = 0; // Clear top_m_candidates // count_distance_computation_ += tmp_count_computation; // tmp_count_computation = 0; // //// // Merge. Merge all queues in parallel. // { // time_merge_ -= WallTimer::get_time_mark(); // if (num_threads_ > 1) { //// idi r = merge_all_queues_queue_base( //// set_L, //// local_queues_ends, //// queue_base, //// real_threads, //// local_queue_length, //// L); //// idi r = // merge_all_queues_para_array( // set_L, // local_queues_ends, // local_queue_length, // L); //// if (r < nk) { //// nk = r; //// } // } // time_merge_ += WallTimer::get_time_mark(); // } //// if (nk <= last_k) { //// k = nk; //// } else { //// k = last_k + 1; //// } // {// Scale M // if (M < value_M_max) { // M <<= 1; // } else { // M = value_M_max; // } // } // } // } // // ////#pragma omp parallel for //// for (idi k_i = 0; k_i < K; ++k_i) { //// set_K[k_i] = set_L[k_i + base_set_L].id_; ////// set_K[k_i] = set_L[k_i].id_; //// } // // { // idi k_i = 0; // idi bound_sub = K / num_threads_; // for (idi sub = 0; sub < bound_sub; ++sub) { // for (int tid = 0; tid < num_threads_; ++tid) { // idi index_set_L = tid * local_queue_length + sub; // set_K[k_i++] = set_L[index_set_L].id_; // } // } // idi remain = K - k_i; // if (remain) { // for (int tid = 0; tid < remain; ++tid) { // idi index_set_L = tid * local_queue_length + bound_sub; // set_K[k_i++] = set_L[index_set_L].id_; // } // } // } // // {// Reset //// std::fill(is_visited.begin(), is_visited.end(), 0); // is_visited.reset(); //// is_visited.clear_all(); // std::fill(local_queues_ends.begin(), local_queues_ends.end(), 0); // } // //// {//test //// printf("tmp_count: %u\n", tmp_count); //// if (3 == query_id) { //// exit(1); //// } //// } //} } // namespace PANNS #endif //BATCH_SEARCHING_SEARCHING_H
indirectaccess1-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: [email protected], [email protected], [email protected], [email protected], [email protected]) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This program is extracted from a real application at LLNL. Two pointers (xa1 and xa2) have a pair of values with a distance of 12. They are used as start base addresses for two 1-D arrays. Their index set has two indices with distance of 12: 999 +12 = 1011. So there is loop carried dependence. However, having loop carried dependence does not mean data races will always happen. The iterations with loop carried dependence must be scheduled to different threads in order for data races to happen. In this example, we use schedule(static,1) to increase the chance that the dependent loop iterations will be scheduled to different threads. Data race pair: xa1[idx]@128:5 vs. xa2[idx]@129:5 */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #define N 180 int indexSet[N] = { 521, 523, 525, 527, 529, 531, 547, 549, 551, 553, 555, 557, 573, 575, 577, 579, 581, 583, 599, 601, 603, 605, 607, 609, 625, 627, 629, 631, 633, 635, 651, 653, 655, 657, 659, 661, 859, 861, 863, 865, 867, 869, 885, 887, 889, 891, 893, 895, 911, 913, 915, 917, 919, 923, // change original 921 to 923 = 911+12 937, 939, 941, 943, 945, 947, 963, 965, 967, 969, 971, 973, 989, 991, 993, 995, 997, 999, 1197, 1199, 1201, 1203, 1205, 1207, 1223, 1225, 1227, 1229, 1231, 1233, 1249, 1251, 1253, 1255, 1257, 1259, 1275, 1277, 1279, 1281, 1283, 1285, 1301, 1303, 1305, 1307, 1309, 1311, 1327, 1329, 1331, 1333, 1335, 1337, 1535, 1537, 1539, 1541, 1543, 1545, 1561, 1563, 1565, 1567, 1569, 1571, 1587, 1589, 1591, 1593, 1595, 1597, 1613, 1615, 1617, 1619, 1621, 1623, 1639, 1641, 1643, 1645, 1647, 1649, 1665, 1667, 1669, 1671, 1673, 1675, 1873, 1875, 1877, 1879, 1881, 1883, 1899, 1901, 1903, 1905, 1907, 1909, 1925, 1927, 1929, 1931, 1933, 1935, 1951, 1953, 1955, 1957, 1959, 1961, 1977, 1979, 1981, 1983, 1985, 1987, 2003, 2005, 2007, 2009, 2011, 2013}; int main (int argc, char* argv[]) { // max index value is 2013. +12 to obtain a valid xa2[idx] after xa1+12. // +1 to ensure a reference like base[2015] is within the bound. double * base = (double*) malloc(sizeof(double)* (2013+12+1)); if (base == 0) { printf ("Error in malloc(). Aborting ...\n"); return 1; } double * xa1 = base; double * xa2 = xa1 + 12; int i; // initialize segments touched by indexSet for (i =521; i<= 2025; ++i) { base[i]=0.5*i; } // default static even scheduling may not trigger data race, using static,1 instead. #pragma omp parallel for schedule(static,1) for (i =0; i< N; ++i) { int idx = indexSet[i]; xa1[idx]+= 1.0 + i; xa2[idx]+= 3.0 + i; } printf("x1[999]=%f xa2[1285]=%f\n", xa1[999], xa2[1285]); free (base); return 0; }
rose_v1_firstprivate.c
#include <omp.h> int g; void foo() { int i; int x; int y = 1; int a[100]; int b[100]; #pragma omp parallel for private (y,i) firstprivate (x) for (i = 0; i <= 99; i += 1) { y = x + 1 + g; b[i] = x + 1 + g; // x=... // ... =x } x = g; } int a[100]; void foo2() { int i; int tmp; tmp = 10; // It would be wrong to parallelize the following loop // since the true dependence between tmp in an iteration // and tmp in the following iteration. // Even firstprivate cannot help this. for (i = 0; i <= 99; i += 1) { a[i] = tmp; tmp = a[i] + i; } printf("a[0]=%d\n",a[0]); printf("a[40]=%d\n",a[40]); printf("a[99]=%d\n",a[99]); }
bfs_replicated_csc.c
/* Copyright (C) 2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ #define _GNU_SOURCE #include "common.h" #include "oned_csc.h" #include "onesided.h" #include <mpi.h> #include <stdint.h> #include <inttypes.h> #include <stdlib.h> #include <stddef.h> #include <string.h> #include <limits.h> #include <assert.h> char IMPLEMENTATION[] = "MPI BFS_REPLICATED_CSC"; static oned_csc_graph g; static unsigned long* g_in_queue; static unsigned long* g_in_queue_summary; static unsigned long* g_out_queue; static unsigned long* g_out_queue_summary; static unsigned long* g_visited; static void allocate_memory(void) { int64_t maxlocalverts = g.max_nlocalverts; int64_t local_queue_summary_size = (maxlocalverts + ULONG_BITS * ULONG_BITS - 1) / ULONG_BITS / ULONG_BITS; int64_t local_queue_size = local_queue_summary_size * ULONG_BITS; int64_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size); int64_t global_queue_size = MUL_SIZE(local_queue_size); g_in_queue = (unsigned long*)xmalloc(global_queue_size * sizeof(unsigned long)); g_in_queue_summary = (unsigned long*)xmalloc(global_queue_summary_size * sizeof(unsigned long)); g_out_queue = (unsigned long*)xmalloc(local_queue_size * sizeof(unsigned long)); g_out_queue_summary = (unsigned long*)xmalloc(local_queue_summary_size * sizeof(unsigned long)); g_visited = (unsigned long*)xmalloc(local_queue_size * sizeof(unsigned long)); } static void deallocate_memory(void) { free(g_in_queue); g_in_queue = NULL; free(g_in_queue_summary); g_in_queue_summary = NULL; free(g_out_queue); g_out_queue = NULL; free(g_out_queue_summary); g_out_queue_summary = NULL; free(g_visited); g_visited = NULL; } void make_graph_data_structure(const tuple_graph* const tg) { convert_graph_to_oned_csc(tg, &g); allocate_memory(); /* Make sure all of the space is available */ deallocate_memory(); } void free_graph_data_structure(void) { free_oned_csc_graph(&g); /* deallocate_memory(); */ } int bfs_writes_depth_map(void) { return 0; } /* This version is the traditional level-synchronized BFS using two queues. A * bitmap is used to indicate which vertices have been visited. Messages are * sent and processed asynchronously throughout the code to hopefully overlap * communication with computation. */ void run_bfs(int64_t root, int64_t* pred) { allocate_memory(); const ptrdiff_t nlocalverts = g.nlocalverts; /* const int64_t nglobalverts = g.nglobalverts; */ const size_t* const restrict rowstarts = g.rowstarts; const int64_t* const restrict column = g.column; /* Set up the visited bitmap. */ int lg_local_queue_size = g.lg_local_queue_size; int64_t local_queue_size = INT64_C(1) << lg_local_queue_size; int64_t local_queue_summary_size = local_queue_size / ULONG_BITS; int64_t global_queue_summary_size = MUL_SIZE(local_queue_summary_size); int64_t global_queue_size = MUL_SIZE(local_queue_size); #if 0 int64_t* restrict column_swizzled = (int64_t*)xmalloc(nlocaledges * sizeof(int64_t)); { size_t i; for (i = 0; i < nlocaledges; ++i) { int64_t c = column[i]; column_swizzled[i] = SWIZZLE_VERTEX(c); } } #endif unsigned long* restrict in_queue = g_in_queue; memset(in_queue, 0, global_queue_size * sizeof(unsigned long)); unsigned long* restrict in_queue_summary = g_in_queue_summary; memset(in_queue_summary, 0, global_queue_summary_size * sizeof(unsigned long)); unsigned long* restrict out_queue = g_out_queue; unsigned long* restrict out_queue_summary = g_out_queue_summary; unsigned long* restrict visited = g_visited; memset(visited, 0, local_queue_size * sizeof(unsigned long)); #define SET_IN(v) do {int64_t vs = SWIZZLE_VERTEX(v); size_t word_idx = vs / ULONG_BITS; int bit_idx = vs % ULONG_BITS; unsigned long mask = (1UL << bit_idx); in_queue_summary[word_idx / ULONG_BITS] |= (1UL << (word_idx % ULONG_BITS)); in_queue[word_idx] |= mask;} while (0) #define TEST_IN(vs) (((in_queue_summary[vs / ULONG_BITS / ULONG_BITS] & (1UL << ((vs / ULONG_BITS) % ULONG_BITS))) != 0) && ((in_queue[vs / ULONG_BITS] & (1UL << (vs % ULONG_BITS))) != 0)) #define TEST_VISITED_LOCAL(v) ((visited[(v) / ULONG_BITS] & (1UL << ((v) % ULONG_BITS))) != 0) #define TAS_VISITED_LOCAL(v) (((__sync_fetch_and_or(&visited[(v) / ULONG_BITS], (1UL << ((v) % ULONG_BITS))) & (1UL << ((v) % ULONG_BITS))) != 0) ? 1 : (__sync_fetch_and_or(&out_queue[(v) / ULONG_BITS], (1UL << ((v) % ULONG_BITS))), 0)) // #define SET_VISITED_LOCAL(v) do {size_t word_idx = (v) / ULONG_BITS; int bit_idx = (v) % ULONG_BITS; unsigned long mask = (1UL << bit_idx); __sync_fetch_and_or(&visited[word_idx], mask); __sync_fetch_and_or(&out_queue[word_idx], mask);} while (0) #define SET_VISITED_LOCAL(v) do {size_t word_idx = (v) / ULONG_BITS; int bit_idx = (v) % ULONG_BITS; unsigned long mask = (1UL << bit_idx); visited[word_idx] |= mask; out_queue[word_idx] |= mask;} while (0) SET_IN(root); {ptrdiff_t i; _Pragma("omp parallel for schedule(static)") for (i = 0; i < nlocalverts; ++i) pred[i] = -1;} if (VERTEX_OWNER(root) == rank) { pred[VERTEX_LOCAL(root)] = root; SET_VISITED_LOCAL(VERTEX_LOCAL(root)); } uint16_t cur_level = 0; while (1) { ++cur_level; #if 0 if (rank == 0) fprintf(stderr, "BFS level %" PRIu16 "\n", cur_level); #endif memset(out_queue, 0, local_queue_size * sizeof(unsigned long)); // memset(out_queue_summary, 0, local_queue_summary_size * sizeof(unsigned long)); ptrdiff_t i, ii_summary; #if 0 #pragma omp parallel for schedule(static) for (i = 0; i < global_queue_summary_size; ++i) { unsigned long val = 0UL; int j; unsigned long mask = 1UL; for (j = 0; j < ULONG_BITS; ++j, mask <<= 1) { if (in_queue[i * ULONG_BITS + j]) val |= mask; } in_queue_summary[i] = val; } #endif unsigned long not_done = 0; #pragma omp parallel for schedule(static) reduction(|:not_done) for (ii_summary = 0; ii_summary < global_queue_summary_size; ++ii_summary) { uint64_t val_summary = in_queue_summary[ii_summary]; if (val_summary == 0) continue; int ii_offset; ptrdiff_t ii; for (ii_offset = 0; ii_offset < ULONG_BITS; ++ii_offset) { if ((val_summary & (UINT64_C(1) << ii_offset)) == 0) continue; ii = ii_summary * ULONG_BITS + ii_offset; uint64_t val = in_queue[ii]; if (val == 0) continue; size_t i, i_end = rowstarts[ii + 1]; for (i = rowstarts[ii]; i < i_end; ++i) { int64_t c = column[i]; int64_t v0_local = c / ULONG_BITS; if ((val & (UINT64_C(1) << (c % ULONG_BITS))) != 0 /* TEST_IN(v1_swizzled) */ && !TAS_VISITED_LOCAL(v0_local)) { assert (pred[v0_local] == -1); int64_t v1_swizzled = (int64_t)ii * ULONG_BITS + c % ULONG_BITS; pred[v0_local] = UNSWIZZLE_VERTEX(v1_swizzled); not_done |= 1; } } } } #if 1 #pragma omp parallel for schedule(static) for (i = 0; i < local_queue_summary_size; ++i) { unsigned long val = 0UL; int j; unsigned long mask = 1UL; for (j = 0; j < ULONG_BITS; ++j, mask <<= 1) { unsigned long full_val = out_queue[i * ULONG_BITS + j]; visited[i * ULONG_BITS + j] |= full_val; if (full_val) val |= mask; } out_queue_summary[i] = val; // not_done |= val; } #endif MPI_Allreduce(MPI_IN_PLACE, &not_done, 1, MPI_UNSIGNED_LONG, MPI_BOR, MPI_COMM_WORLD); if (not_done == 0) break; MPI_Allgather(out_queue, local_queue_size, MPI_UNSIGNED_LONG, in_queue, local_queue_size, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); MPI_Allgather(out_queue_summary, local_queue_summary_size, MPI_UNSIGNED_LONG, in_queue_summary, local_queue_summary_size, MPI_UNSIGNED_LONG, MPI_COMM_WORLD); } deallocate_memory(); } void get_vertex_distribution_for_pred(size_t count, const int64_t* vertex_p, int* owner_p, size_t* local_p) { const int64_t* restrict vertex = vertex_p; int* restrict owner = owner_p; size_t* restrict local = local_p; ptrdiff_t i; #pragma omp parallel for for (i = 0; i < (ptrdiff_t)count; ++i) { owner[i] = VERTEX_OWNER(vertex[i]); local[i] = VERTEX_LOCAL(vertex[i]); } } int64_t vertex_to_global_for_pred(int v_rank, size_t v_local) { return VERTEX_TO_GLOBAL(v_rank, v_local); } size_t get_nlocalverts_for_pred(void) { return g.nlocalverts; }