source
stringlengths
3
92
c
stringlengths
26
2.25M
avx512vnni_gemm.h
#pragma once #include "intgemm/intgemm_config.h" #ifdef INTGEMM_COMPILER_SUPPORTS_AVX512VNNI #include "avx512_gemm.h" #include "types.h" namespace intgemm { namespace AVX512VNNI { // Workaround extra vmovdqa64 https://gcc.gnu.org/bugzilla/show_bug.cgi?id=94663 INTGEMM_AVX512VNNI static inline void VNNI8(__m512i &c, __m512i a, __m512i b) { #if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) asm ("vpdpbusds %2, %1, %0" : "+x"(c) : "x"(a), "mx"(b)); #else c = _mm512_dpbusds_epi32(c, a, b); #endif } struct Kernels8 : public AVX512BW::Kernels8 { template <typename Callback> INTGEMM_AVX512VNNI static void Multiply(const int8_t *A, const int8_t *B, Index A_rows, Index width, Index B_cols, Callback callback) { assert(width % sizeof(Register) == 0); assert(B_cols % 8 == 0); assert(reinterpret_cast<uintptr_t>(A) % sizeof(Register) == 0); assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0); auto callback_impl = callbacks::CallbackImpl<CPUType::AVX2, Callback>(callback); const Index simd_width = width / sizeof(Register); Register zeros = setzero_si<Register>(); // Go over 8 columns of B at a time. #pragma omp for for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) { const Register *B0_col = reinterpret_cast<const Register*>(B) + B0_colidx * simd_width; // Process one row of A at a time. Doesn't seem to be faster to do multiple rows of A at once. for (Index A_rowidx = 0; A_rowidx < A_rows; ++A_rowidx) { // Iterate over shared (inner) dimension. const Register *A_live = reinterpret_cast<const Register *>(A + A_rowidx * width); const Register *A_end = A_live + simd_width; const Register *B_live = B0_col; // TODO: separate first step. Register sum0 = zeros, sum1 = zeros, sum2 = zeros, sum3 = zeros, sum4 = zeros, sum5 = zeros, sum6 = zeros, sum7 = zeros; for (; A_live != A_end; ++A_live, B_live += 8) { Register a = *A_live; // Retrieve the conveniently consecutive values of B. Register b0 = *B_live; Register b1 = *(B_live + 1); Register b2 = *(B_live + 2); Register b3 = *(B_live + 3); Register b4 = *(B_live + 4); Register b5 = *(B_live + 5); Register b6 = *(B_live + 6); Register b7 = *(B_live + 7); // Get a mask where a is negative. __mmask64 neg_mask = _mm512_test_epi8_mask(a, _mm512_set1_epi8(-128)); Register a_positive = _mm512_abs_epi8(a); // Negate by subtracting from zero with a mask. b0 = _mm512_mask_sub_epi8(b0, neg_mask, zeros, b0); b1 = _mm512_mask_sub_epi8(b1, neg_mask, zeros, b1); b2 = _mm512_mask_sub_epi8(b2, neg_mask, zeros, b2); b3 = _mm512_mask_sub_epi8(b3, neg_mask, zeros, b3); b4 = _mm512_mask_sub_epi8(b4, neg_mask, zeros, b4); b5 = _mm512_mask_sub_epi8(b5, neg_mask, zeros, b5); b6 = _mm512_mask_sub_epi8(b6, neg_mask, zeros, b6); b7 = _mm512_mask_sub_epi8(b7, neg_mask, zeros, b7); VNNI8(sum0, a_positive, b0); VNNI8(sum1, a_positive, b1); VNNI8(sum2, a_positive, b2); VNNI8(sum3, a_positive, b3); VNNI8(sum4, a_positive, b4); VNNI8(sum5, a_positive, b5); VNNI8(sum6, a_positive, b6); VNNI8(sum7, a_positive, b7); } Register pack0123 = Pack0123(sum0, sum1, sum2, sum3); Register pack4567 = Pack0123(sum4, sum5, sum6, sum7); auto total = PermuteSummer(pack0123, pack4567); callback_impl.Run(total, callbacks::OutputBufferInfo(A_rowidx, B0_colidx, A_rows, B_cols)); } } } template <typename Callback> INTGEMM_AVX512VNNI static void Multiply8Shift(const uint8_t *A, const int8_t *B, Index A_rows, Index width, Index B_cols, Callback callback) { assert(width % sizeof(Register) == 0); assert(B_cols % 8 == 0); assert(reinterpret_cast<uintptr_t>(A) % sizeof(Register) == 0); assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0); auto callback_impl = callbacks::CallbackImpl<CPUType::AVX2, Callback>(callback); const Index simd_width = width / sizeof(Register); Register zeros = setzero_si<Register>(); // Go over 8 columns of B at a time. #pragma omp for for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) { const Register *B0_col = reinterpret_cast<const Register*>(B) + B0_colidx * simd_width; // Process one row of A at a time. Doesn't seem to be faster to do multiple rows of A at once. for (Index A_rowidx = 0; A_rowidx < A_rows; ++A_rowidx) { // Iterate over shared (inner) dimension. const Register *A_live = reinterpret_cast<const Register *>(A + A_rowidx * width); const Register *A_end = A_live + simd_width; const Register *B_live = B0_col; // TODO: separate first step. Register sum0 = zeros, sum1 = zeros, sum2 = zeros, sum3 = zeros, sum4 = zeros, sum5 = zeros, sum6 = zeros, sum7 = zeros; for (; A_live != A_end; ++A_live, B_live += 8) { Register a = *A_live; //MultiplyAdd VNNI8(sum0, a, *B_live); VNNI8(sum1, a, *(B_live + 1)); VNNI8(sum2, a, *(B_live + 2)); VNNI8(sum3, a, *(B_live + 3)); VNNI8(sum4, a, *(B_live + 4)); VNNI8(sum5, a, *(B_live + 5)); VNNI8(sum6, a, *(B_live + 6)); VNNI8(sum7, a, *(B_live + 7)); } Register pack0123 = Pack0123(sum0, sum1, sum2, sum3); Register pack4567 = Pack0123(sum4, sum5, sum6, sum7); auto total = PermuteSummer(pack0123, pack4567); callback_impl.Run(total, callbacks::OutputBufferInfo(A_rowidx, B0_colidx, A_rows, B_cols)); } } } template <typename Callback> INTGEMM_AVX512VNNI static void PrepareBias(const int8_t *B, Index width, Index B_cols, Callback callback) { assert(width % sizeof(Register) == 0); assert(B_cols % 8 == 0); assert(reinterpret_cast<uintptr_t>(B) % sizeof(Register) == 0); auto callback_impl = callbacks::CallbackImpl<CPUType::AVX2, Callback>(callback); Index simd_width = width / sizeof(Register); Register zeros = setzero_si<Register>(); const Register a = set1_epi8<Register>(1); // Go over 8 columns of B at a time. #pragma omp for for (Index B0_colidx = 0; B0_colidx < B_cols; B0_colidx += 8) { const Register *B0_col = reinterpret_cast<const Register*>(B) + B0_colidx * simd_width; const Register *B_live = B0_col; //In order to make the code look as much as possible as the above function const Register *B_end = B_live + simd_width*8; // TODO: separate first step. Register sum0 = zeros, sum1 = zeros, sum2 = zeros, sum3 = zeros, sum4 = zeros, sum5 = zeros, sum6 = zeros, sum7 = zeros; for (; B_live != B_end; B_live += 8) { // Retrieve the conveniently consecutive values of B. VNNI8(sum0, a, *B_live); VNNI8(sum1, a, *(B_live + 1)); VNNI8(sum2, a, *(B_live + 2)); VNNI8(sum3, a, *(B_live + 3)); VNNI8(sum4, a, *(B_live + 4)); VNNI8(sum5, a, *(B_live + 5)); VNNI8(sum6, a, *(B_live + 6)); VNNI8(sum7, a, *(B_live + 7)); } Register pack0123 = Pack0123(sum0, sum1, sum2, sum3); Register pack4567 = Pack0123(sum4, sum5, sum6, sum7); auto total = PermuteSummer(pack0123, pack4567); callback_impl.Run(total, callbacks::OutputBufferInfo(0, B0_colidx, 1, B_cols)); } } constexpr static const char *const kName = "8-bit AVX512VNNI"; static const CPUType kUses = CPUType::AVX512VNNI; }; } // namespace AVX512VNNI } // namespace intgemm #endif
346989_so4_itt.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "ittnotify.h" #include "xmmintrin.h" #include "pmmintrin.h" #include <stdio.h> #include "omp.h" #define min(a, b) (((a) < (b)) ? (a) : (b)) #define max(a, b) (((a) > (b)) ? (a) : (b)) struct dataobj { void *restrict data; int *size; int *npsize; int *dsize; int *hsize; int *hofs; int *oofs; }; int Kernel(struct dataobj *restrict block_sizes_vec, struct dataobj *restrict damp_vec, const float dt, const float h_x, const float h_y, const float h_z, struct dataobj *restrict nnz_sp_source_mask_vec, struct dataobj *restrict save_src_vec, struct dataobj *restrict source_id_vec, struct dataobj *restrict source_mask_vec, struct dataobj *restrict sp_source_mask_vec, struct dataobj *restrict usol_vec, struct dataobj *restrict vp_vec, const int sp_zi_m, const int time_M, const int time_m, struct profiler *timers, const int x_M, const int x_m, const int y_M, const int y_m, const int z_M, const int z_m, const int nthreads) { int (*restrict block_sizes) __attribute__ ((aligned (64))) = (int (*)) block_sizes_vec->data; float(*restrict damp)[damp_vec->size[1]][damp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[damp_vec->size[1]][damp_vec->size[2]])damp_vec->data; int(*restrict nnz_sp_source_mask)[nnz_sp_source_mask_vec->size[1]] __attribute__((aligned(64))) = (int(*)[nnz_sp_source_mask_vec->size[1]])nnz_sp_source_mask_vec->data; float(*restrict save_src)[save_src_vec->size[1]] __attribute__((aligned(64))) = (float(*)[save_src_vec->size[1]])save_src_vec->data; int(*restrict source_id)[source_id_vec->size[1]][source_id_vec->size[2]] __attribute__((aligned(64))) = (int(*)[source_id_vec->size[1]][source_id_vec->size[2]])source_id_vec->data; float(*restrict source_mask)[source_mask_vec->size[1]][source_mask_vec->size[2]] __attribute__((aligned(64))) = (float(*)[source_mask_vec->size[1]][source_mask_vec->size[2]])source_mask_vec->data; int(*restrict sp_source_mask)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]] __attribute__((aligned(64))) = (int(*)[sp_source_mask_vec->size[1]][sp_source_mask_vec->size[2]])sp_source_mask_vec->data; float(*restrict usol)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]] __attribute__((aligned(64))) = (float(*)[usol_vec->size[1]][usol_vec->size[2]][usol_vec->size[3]])usol_vec->data; float(*restrict vp)[vp_vec->size[1]][vp_vec->size[2]] __attribute__((aligned(64))) = (float(*)[vp_vec->size[1]][vp_vec->size[2]])vp_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); __itt_resume(); int xb_size = block_sizes[0]; int y0_blk0_size = block_sizes[3]; int x0_blk0_size = block_sizes[2]; int yb_size = block_sizes[1]; printf(" Tiles: %d, %d ::: Blocks %d, %d \n", xb_size, yb_size, x0_blk0_size, y0_blk0_size); int sf = 2; int t_blk_size = 2 * sf * (time_M - time_m); for (int t_blk = time_m; t_blk < sf * (time_M - time_m); t_blk += sf * t_blk_size) // for each t block { for (int xb = x_m; xb <= (x_M + sf * (time_M - time_m)); xb += xb_size + 1) { for (int yb = y_m; yb <= (y_M + sf * (time_M - time_m)); yb += yb_size + 1) { for (int time = t_blk, t0 = (time + 1) % (3), t1 = (time) % (3), t2 = (time + 2) % (3); time <= 1 + min(t_blk + t_blk_size - 1, sf * (time_M - time_m)); time += sf, t0 = (((time / sf) % (time_M - time_m + 1)) + 1) % (3), t1 = (((time / sf) % (time_M - time_m + 1))) % (3), t2 = (((time / sf) % (time_M - time_m + 1)) + 2) % (3)) { int tw = ((time / sf) % (time_M - time_m + 1)); #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(2) schedule(dynamic, 1) for (int x0_blk0 = max((x_m + time), xb); x0_blk0 <= min((x_M + time), (xb + xb_size)); x0_blk0 += x0_blk0_size) { for (int y0_blk0 = max((y_m + time), yb); y0_blk0 <= min((y_M + time), (yb + yb_size)); y0_blk0 += y0_blk0_size) { for (int x = x0_blk0; x <= min(min((x_M + time), (xb + xb_size)), (x0_blk0 + x0_blk0_size - 1)); x++) { for (int y = y0_blk0; y <= min(min((y_M + time), (yb + yb_size)), (y0_blk0 + y0_blk0_size - 1)); y++) { #pragma omp simd aligned(damp, usol, vp : 32) for (int z = z_m; z <= z_M; z += 1) { float r14 = -2.5F * usol[t1][x - time + 4][y - time + 4][z + 4]; float r13 = 1.0 / dt; float r12 = 1.0 / (dt * dt); float r11 = 1.0 / (vp[x - time + 4][y - time + 4][z + 4] * vp[x - time + 4][y - time + 4][z + 4]); usol[t0][x - time + 4][y - time + 4][z + 4] = (r11 * (-r12 * (-2.0F * usol[t1][x - time + 4][y - time + 4][z + 4] + usol[t2][x - time + 4][y - time + 4][z + 4])) + r13 * (damp[x - time + 1][y - time + 1][z + 1] * usol[t1][x - time + 4][y - time + 4][z + 4]) + (r14 - 8.33333333e-2F * (usol[t1][x - time + 4][y - time + 4][z + 2] + usol[t1][x - time + 4][y - time + 4][z + 6]) + 1.33333333F * (usol[t1][x - time + 4][y - time + 4][z + 3] + usol[t1][x - time + 4][y - time + 4][z + 5])) / ((h_z * h_z)) + (r14 - 8.33333333e-2F * (usol[t1][x - time + 4][y - time + 2][z + 4] + usol[t1][x - time + 4][y - time + 6][z + 4]) + 1.33333333F * (usol[t1][x - time + 4][y - time + 3][z + 4] + usol[t1][x - time + 4][y - time + 5][z + 4])) / ((h_y * h_y)) + (r14 - 8.33333333e-2F * (usol[t1][x - time + 2][y - time + 4][z + 4] + usol[t1][x - time + 6][y - time + 4][z + 4]) + 1.33333333F * (usol[t1][x - time + 3][y - time + 4][z + 4] + usol[t1][x - time + 5][y - time + 4][z + 4])) / ((h_x * h_x))) / (r11 * r12 + r13 * damp[x - time + 1][y - time + 1][z + 1]); } #pragma omp simd aligned(damp, usol, vp : 32) for (int sp_zi = sp_zi_m; sp_zi <= nnz_sp_source_mask[x - time][y - time] - 1; sp_zi += 1) { int zind = sp_source_mask[x - time][y - time][sp_zi]; float r0 = save_src[((time / sf) % (time_M - time_m + 1))][source_id[x - time][y - time][zind]] * source_mask[x - time][y - time][zind]; usol[t0][x - time + 4][y - time + 4][zind + 4] += r0; } } } } } } } } } } /* End sectiom*/ __itt_pause(); return 0; }
hoNDArray.h
/** \file hoNDArray.h \brief CPU-based N-dimensional array (data container) */ #pragma once #include "NDArray.h" #include "complext.h" #include "vector_td.h" #include <type_traits> #include <boost/shared_ptr.hpp> #include <stdexcept> #include "TypeTraits.h" namespace Gadgetron{ namespace Indexing { class Slice {}; constexpr auto slice = Slice{}; } template<class... ARGS> struct ValidIndex : std::integral_constant<bool, Core::all_of_v<Core::is_convertible_v<ARGS,size_t>...>> {}; template<> struct ValidIndex<> : std::true_type {}; template<class... ARGS> struct ValidIndex<Indexing::Slice,ARGS...> : ValidIndex<ARGS...> {}; namespace { namespace gadgetron_detail { template <size_t count, class... ARGS> struct count_slices { static constexpr size_t value = count; }; template <size_t count, class... ARGS> struct count_slices<count, Indexing::Slice, ARGS...> : count_slices<count + 1, ARGS...> {}; template <size_t count, class T, class... ARGS> struct count_slices<count, T, ARGS...> : count_slices<count, ARGS...> {}; template <class... ARGS> struct is_contiguous_index { static constexpr bool value = true; }; template<class T, class... ARGS> struct is_contiguous_index<T,ARGS...>{ static constexpr bool value = !Core::any_of_v<Core::is_same_v<Indexing::Slice,ARGS>...>; }; template<class... ARGS> struct is_contiguous_index<Indexing::Slice,ARGS...> { static constexpr bool value = is_contiguous_index<ARGS...>::value; }; static_assert(is_contiguous_index<Indexing::Slice>::value); static_assert(is_contiguous_index<Indexing::Slice,size_t>::value); static_assert(is_contiguous_index<Indexing::Slice,long long >::value); static_assert(is_contiguous_index<Indexing::Slice,Indexing::Slice,size_t>::value); static_assert(is_contiguous_index<Indexing::Slice,Indexing::Slice,long long>::value); static_assert(!is_contiguous_index<size_t ,Indexing::Slice,Indexing::Slice,size_t>::value); static_assert(!is_contiguous_index<int,Indexing::Slice,Indexing::Slice,size_t>::value); static_assert(!is_contiguous_index<long long,Indexing::Slice,Indexing::Slice,size_t>::value); } } template<class T> class hoNDArray; template<class T, size_t D, bool contigous = false> class hoNDArrayView { public: hoNDArrayView& operator=(const hoNDArrayView<T,D,!contigous>&); hoNDArrayView& operator=(const hoNDArrayView&); hoNDArrayView<T,D,contigous>& operator=(const hoNDArray<T>&); template<class... INDICES> std::enable_if_t<Core::all_of_v<Core::is_convertible_v<INDICES,size_t>...> && (sizeof...(INDICES) == D),T&> operator()(INDICES... indices); template<class... INDICES> std::enable_if_t<Core::all_of_v<Core::is_convertible_v<INDICES,size_t>...> && (sizeof...(INDICES) == D),const T&> operator()(INDICES... indices) const; template<typename Dummy1 = void, typename = std::enable_if_t<contigous,Dummy1>> operator hoNDArray<T>(); operator const hoNDArray<T>() const; private: friend class hoNDArray<T>; friend class hoNDArrayView<T,D,!contigous>; hoNDArrayView(const std::array<size_t,D>& strides, const std::array<size_t,D>& dimensions, T*); vector_td<size_t, D> strides; vector_td<size_t, D> dimensions; T* data; }; template <typename T> class hoNDArray : public NDArray<T> { public: typedef NDArray<T> BaseClass; typedef float coord_type; typedef T value_type; using iterator = T*; using const_iterator = const T*; hoNDArray(); explicit hoNDArray(const std::vector<size_t> &dimensions); [[deprecated("Pass vector as reference instead")]] explicit hoNDArray(const std::vector<size_t> *dimensions); [[deprecated("Pass vector as reference instead")]] explicit hoNDArray(boost::shared_ptr< std::vector<size_t> > dimensions); [[deprecated("Pass vector as reference instead")]] hoNDArray(const std::vector<size_t> *dimensions, T* data, bool delete_data_on_destruct = false); hoNDArray(const std::vector<size_t> &dimensions, T* data, bool delete_data_on_destruct = false); [[deprecated("Pass vector as reference instead")]] hoNDArray(boost::shared_ptr< std::vector<size_t> > dimensions, T* data, bool delete_data_on_destruct = false); hoNDArray(std::initializer_list<size_t> dimensions); hoNDArray(std::initializer_list<size_t> dimensions,T* data, bool delete_data_on_destruct = false); explicit hoNDArray(size_t len); hoNDArray(size_t sx, size_t sy); hoNDArray(size_t sx, size_t sy, size_t sz); hoNDArray(size_t sx, size_t sy, size_t sz, size_t st); hoNDArray(size_t sx, size_t sy, size_t sz, size_t st, size_t sp); hoNDArray(size_t sx, size_t sy, size_t sz, size_t st, size_t sp, size_t sq); hoNDArray(size_t sx, size_t sy, size_t sz, size_t st, size_t sp, size_t sq, size_t sr); hoNDArray(size_t sx, size_t sy, size_t sz, size_t st, size_t sp, size_t sq, size_t sr, size_t ss); hoNDArray(size_t len, T* data, bool delete_data_on_destruct = false); hoNDArray(size_t sx, size_t sy, T* data, bool delete_data_on_destruct = false); hoNDArray(size_t sx, size_t sy, size_t sz, T* data, bool delete_data_on_destruct = false); hoNDArray(size_t sx, size_t sy, size_t sz, size_t st, T* data, bool delete_data_on_destruct = false); hoNDArray(size_t sx, size_t sy, size_t sz, size_t st, size_t sp, T* data, bool delete_data_on_destruct = false); hoNDArray(size_t sx, size_t sy, size_t sz, size_t st, size_t sp, size_t sq, T* data, bool delete_data_on_destruct = false); hoNDArray(size_t sx, size_t sy, size_t sz, size_t st, size_t sp, size_t sq, size_t sr, T* data, bool delete_data_on_destruct = false); hoNDArray(size_t sx, size_t sy, size_t sz, size_t st, size_t sp, size_t sq, size_t sr, size_t ss, T* data, bool delete_data_on_destruct = false); virtual ~hoNDArray(); // Copy constructors hoNDArray(const hoNDArray<T> &a); template<class S> explicit hoNDArray(const hoNDArray<S>& other); [[deprecated]] explicit hoNDArray(const hoNDArray<T> *a); //Move constructors //Move constructors hoNDArray(hoNDArray<T>&& a) noexcept; hoNDArray& operator=(hoNDArray&& rhs) noexcept; // Assignment operator hoNDArray& operator=(const hoNDArray& rhs); template<unsigned int D, bool C> hoNDArray& operator=(const hoNDArrayView<T,D,C>& view); bool operator==(const hoNDArray& rhs) const; virtual void create(const std::vector<size_t>& dimensions); [[deprecated("Pass vector as reference instead")]] virtual void create(const std::vector<size_t> *dimensions); [[deprecated("Pass vector as reference instead")]] virtual void create(boost::shared_ptr< std::vector<size_t> > dimensions); virtual void create(std::initializer_list<size_t> dimensions); virtual void create(std::initializer_list<size_t> dimensions,T* data, bool delete_data_on_destruct = false); virtual void create(const std::vector<size_t> &dimensions, T* data, bool delete_data_on_destruct = false); [[deprecated("Pass vector as reference instead")]] virtual void create(const std::vector<size_t> *dimensions, T* data, bool delete_data_on_destruct = false); [[deprecated("Pass vector as reference instead")]] virtual void create(boost::shared_ptr<std::vector<size_t> > dimensions, T* data, bool delete_data_on_destruct = false); virtual void create(size_t len); virtual void create(size_t sx, size_t sy); virtual void create(size_t sx, size_t sy, size_t sz); virtual void create(size_t sx, size_t sy, size_t sz, size_t st); virtual void create(size_t sx, size_t sy, size_t sz, size_t st, size_t sp); virtual void create(size_t sx, size_t sy, size_t sz, size_t st, size_t sp, size_t sq); virtual void create(size_t sx, size_t sy, size_t sz, size_t st, size_t sp, size_t sq, size_t sr); virtual void create(size_t sx, size_t sy, size_t sz, size_t st, size_t sp, size_t sq, size_t sr, size_t ss); virtual void create(size_t sx, size_t sy, size_t sz, size_t st, size_t sp, size_t sq, size_t sr, size_t ss, size_t su); virtual void create(size_t len, T* data, bool delete_data_on_destruct = false); virtual void create(size_t sx, size_t sy, T* data, bool delete_data_on_destruct = false); virtual void create(size_t sx, size_t sy, size_t sz, T* data, bool delete_data_on_destruct = false); virtual void create(size_t sx, size_t sy, size_t sz, size_t st, T* data, bool delete_data_on_destruct = false); virtual void create(size_t sx, size_t sy, size_t sz, size_t st, size_t sp, T* data, bool delete_data_on_destruct = false); virtual void create(size_t sx, size_t sy, size_t sz, size_t st, size_t sp, size_t sq, T* data, bool delete_data_on_destruct = false); virtual void create(size_t sx, size_t sy, size_t sz, size_t st, size_t sp, size_t sq, size_t sr, T* data, bool delete_data_on_destruct = false); virtual void create(size_t sx, size_t sy, size_t sz, size_t st, size_t sp, size_t sq, size_t sr, size_t ss, T* data, bool delete_data_on_destruct = false); virtual void create(size_t sx, size_t sy, size_t sz, size_t st, size_t sp, size_t sq, size_t sr, size_t ss, size_t su, T* data, bool delete_data_on_destruct = false); T& operator()( const std::vector<size_t>& ind ); const T& operator()( const std::vector<size_t>& ind ) const; T& operator()( size_t x ); const T& operator()( size_t x ) const; T& operator()( size_t x, size_t y ); const T& operator()( size_t x, size_t y ) const; T& operator()( size_t x, size_t y, size_t z ); const T& operator()( size_t x, size_t y, size_t z ) const; T& operator()( size_t x, size_t y, size_t z, size_t s ); const T& operator()( size_t x, size_t y, size_t z, size_t s ) const; T& operator()( size_t x, size_t y, size_t z, size_t s, size_t p ); const T& operator()( size_t x, size_t y, size_t z, size_t s, size_t p ) const; T& operator()( size_t x, size_t y, size_t z, size_t s, size_t p, size_t r ); const T& operator()( size_t x, size_t y, size_t z, size_t s, size_t p, size_t r ) const; T& operator()( size_t x, size_t y, size_t z, size_t s, size_t p, size_t r, size_t a ); const T& operator()( size_t x, size_t y, size_t z, size_t s, size_t p, size_t r, size_t a ) const; T& operator()( size_t x, size_t y, size_t z, size_t s, size_t p, size_t r, size_t a, size_t q ); const T& operator()( size_t x, size_t y, size_t z, size_t s, size_t p, size_t r, size_t a, size_t q ) const; T& operator()( size_t x, size_t y, size_t z, size_t s, size_t p, size_t r, size_t a, size_t q, size_t u ); const T& operator()( size_t x, size_t y, size_t z, size_t s, size_t p, size_t r, size_t a, size_t q, size_t u ) const; template<class... INDICES, class = std::enable_if_t<Core::any_of_v<Core::is_same_v<INDICES,Indexing::Slice>...>> > auto operator()(const INDICES&... ); template<class... INDICES, class = std::enable_if_t<Core::any_of_v<Core::is_same_v<INDICES,Indexing::Slice>...>> > auto operator()(const INDICES&... ) const -> const hoNDArrayView<T,gadgetron_detail::count_slices<0, INDICES...>::value, gadgetron_detail::is_contiguous_index<INDICES...>::value>; void fill(T value); T* begin(); const T* begin() const; T* end(); const T* end() const; T& at( size_t idx ); const T& at( size_t idx ) const; T& operator[]( size_t idx ); const T& operator[]( size_t idx ) const; //T& operator()( size_t idx ); //const T& operator()( size_t idx ) const; //T& operator()( const std::vector<size_t>& ind ); //const T& operator()( const std::vector<size_t>& ind ) const; template<typename T2> bool copyFrom(const hoNDArray<T2>& aArray) { try { if (!this->dimensions_equal(&aArray)) { this->create(aArray.dimensions()); } long long i; #pragma omp parallel for default(none) private(i) shared(aArray) for (i = 0; i < (long long)elements_; i++) { data_[i] = static_cast<T>(aArray(i)); } } catch (...) { GERROR_STREAM("Exceptions happened in hoNDArray::copyFrom(...) ... "); return false; } return true; } void get_sub_array(const std::vector<size_t>& start, std::vector<size_t>& size, hoNDArray<T>& out) const; virtual void print(std::ostream& os) const; virtual void printContent(std::ostream& os) const; [[deprecated("Use IO::write instead")]] virtual bool serialize(char*& buf, size_t& len) const; [[deprecated("Use IO::read instead")]] virtual bool deserialize(char* buf, size_t& len); protected: using BaseClass::dimensions_; using BaseClass::offsetFactors_; using BaseClass::data_; using BaseClass::elements_; using BaseClass::delete_data_on_destruct_; virtual void allocate_memory(); virtual void deallocate_memory(); // Generic allocator / deallocator // template<class X> void _allocate_memory( size_t size, X** data ) { *data = new X[size]; } template<class X> void _deallocate_memory( X* data ) { delete [] data; } }; } #include "hoNDArray.hxx"
GB_unop__sinh_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__sinh_fc32_fc32 // op(A') function: GB_unop_tran__sinh_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = csinhf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = csinhf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = csinhf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SINH || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__sinh_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = csinhf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__sinh_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
utils.h
#define MIN(a,b) (((a<b)?a:b)) static inline unsigned long long cycles() { unsigned long long u; asm volatile ("rdtscp;shlq $32,%%rdx;orq %%rdx,%%rax;movq %%rax,%0":"=q"(u)::"%rax", "%rdx", "rcx"); return u; } // double myseconds() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } // void init(double* p, int n, int m) { #pragma omp parallel for for (int j = 0; j < n; ++j) { for (int i = 0; i < n; i++) { if ( ((i == 0) || (j == 0)) || (i == n-1) || (j == m-1) ) p[j*n + i] = 1.; else p[j*n + i] = 0.; } } } double maxNorm(double* v1, double* v2, int size) { double mymax = 0.; #pragma omp parallel for reduction(max: mymax) for (int ii = 0; ii < size; ++ii) { if (fabs(*v1 - *v2) > mymax) { mymax = fabs(*v1 - *v2); } ++v1; ++v2; } return mymax; } double l2Norm(double* v1, double* v2, int size) { double myl2 = 0.; #pragma omp parallel for reduction(+: myl2) for (int ii = 0; ii < size; ++ii) { myl2 += fabs(v1[ii]-v2[ii])*(v1[ii] - v2[ii]); } return sqrt(myl2)/size; } void print(double* p, int m, int n) { for (int i=0; i < MIN(n, 15); ++i) { for (int j=MIN(m, 15); j > 0; --j) { printf("%e ", *p); ++p; } p += m - MIN(m, 15); printf("\n"); } }
processing_img.c
#include <stdlib.h> #include <stdio.h> #include <time.h> #include <unistd.h> #include <mpi.h> #include <string.h> #ifdef OMP //if defined during compile, try to use OpenMP #ifdef _OPENMP #include <omp.h> #endif #endif //OMP #include "processing_img.h" #include "send_wrappers.h" #include "recv_wrappers.h" #define filter_sum 1 //int filter[9] = { 1, 2, 1, 2, 4, 2, 1, 2, 1 }; //int filter[9] = {0,1,0,1,-4,1,0,1,0}; // Edge detect int filter[9] = {0,-1,0,-1,5,-1,0,-1,0}; //int filter[9] = {0,0,0,-1,1,0,0,0,0}; //int filter[9] = {0,0,0,0,1,0,0,0,0}; // Forward declarations int calculate_filtered_pixel(int pixel_idx, int* src_array, int width, int height, int* filter); int compare_blocks(int* first_array, int* second_array, int block_width, int block_height); int* create_random_array(int width, int height) { int array_size = width * height; int* array = malloc(array_size * sizeof(int)); int i; srand(time(NULL)); for (i = 0; i < array_size; i++) { int shade = rand() % 255; array[i] = shade; // array[i] = i; } return array; } int* process_img(int* block, int block_width, int block_height, int rep_num, int cnv_option, int cnv_rounds) { int i, rank, proc_num; MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &proc_num); MPI_Request* requests_send, *requests_recv; int* tmp_block = malloc(block_width * block_height * sizeof(int)); memset(tmp_block, '\0', block_width * block_height * sizeof(int)); for (i = 1; i <= rep_num; i++) { // Send outer requests_send = send_data(block, rank, proc_num, block_width, block_height); // Recv foreign requests_recv = recv_data(block, rank, proc_num, block_width, block_height); // Process our (inner) block compute_inner_values(block, tmp_block, block_width, block_height, filter); // Wait for our foreign wait_on_recv(requests_recv); // Process our (outer) block compute_outer_values(block, tmp_block, block_width, block_height, filter); //check for convergence if ( cnv_option && i % cnv_rounds == 0 ) { int* cnv_buffer; int convergence = compare_blocks(block, tmp_block, block_width, block_height); if (rank == 0) { cnv_buffer = malloc(proc_num * sizeof(int)); MPI_Gather(&convergence, 1, MPI_INT, cnv_buffer, 1, MPI_INT, 0, MPI_COMM_WORLD); int cnv_sum = 0, j; for ( j = 0; j < proc_num; j++) cnv_sum += cnv_buffer[j]; MPI_Bcast( &cnv_sum, 1, MPI_INT, 0, MPI_COMM_WORLD); if (cnv_sum > 0) break; } else { int cnv_sum = 0; MPI_Gather(&convergence, 1, MPI_INT, cnv_buffer, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast( &cnv_sum, 1, MPI_INT, 0, MPI_COMM_WORLD); if (cnv_sum > 0) break; } } // Wait on send of our outer wait_on_send(requests_send); int* tmp = tmp_block; tmp_block = block; block = tmp; } free(tmp_block); return block; } void compute_inner_values(int* src_array, int* dest_array, int width, int height, int* filter) { int i, row_number = 3; int array_size = width * height; #ifdef OMP #ifdef _OPENMP #pragma omp parallel for num_threads(4) #endif #endif // OMP for (i = 2 * width + 2; i < array_size - 2 * width - 2; i++) { if( i % (row_number * width - 2) == 0) { //to skip elements of the first and last column i += 3; row_number++; continue; } dest_array[i] = calculate_filtered_pixel(i, src_array, width, height, filter); } } void compute_outer_values(int* src_array, int* dest_array, int width, int height, int* filter) { int i; int array_size = width * height; // Compute outer lines for (i = width + 1; i < array_size - width - 1; i++) { // Go to last line if (i % (2 * width - 1) == 0) { i = array_size - 2 * width; continue; } dest_array[i] = calculate_filtered_pixel(i, src_array, width, height, filter); } // Compute outer columns for (i = 2 * width + 1; i < array_size - 3 * width + 3; i += width) { dest_array[i] = calculate_filtered_pixel(i, src_array, width, height, filter); int right_idx = i + width - 3; dest_array[right_idx] = calculate_filtered_pixel(right_idx, src_array, width, height, filter); } } int calculate_filtered_pixel(int pixel_idx, int* src_array, int width, int height, int* filter) { int sum = 0; int j, z = 0; int coef = - 1; for (j = pixel_idx + coef * width - 1; j <= pixel_idx + width + 1; j++ ) { if ((j % width) == (pixel_idx + 2) % width) { coef++; j = pixel_idx + coef * width - 2; continue; } sum += (src_array[j] * filter[z]) / filter_sum; z++; } if (sum > 255) return 255; else return sum; } void print_array(int* arr, int width, int height) { int i; for (i = 0; i< width * height;i++) { if((i != 0) && (i % width == 0)) printf("\n"); printf("%4d ", arr[i]); } putchar('\n'); } // return 0 if blocks are equal // return 1 if blocks are diferent int compare_blocks(int* first_array, int* second_array, int block_width, int block_height) { int i; for ( i = block_width + 1; i < block_height * block_width - block_width - 1; i++) { if (first_array[i] != second_array[i]) return 1; } return 0; }
GB_binop__pow_fp64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_08__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_02__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_04__pow_fp64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__pow_fp64) // A*D function (colscale): GB ((none)) // D*A function (rowscale): GB ((none)) // C+=B function (dense accum): GB (_Cdense_accumB__pow_fp64) // C+=b function (dense accum): GB (_Cdense_accumb__pow_fp64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__pow_fp64) // C=scalar+B GB (_bind1st__pow_fp64) // C=scalar+B' GB (_bind1st_tran__pow_fp64) // C=A+scalar GB (_bind2nd__pow_fp64) // C=A'+scalar GB (_bind2nd_tran__pow_fp64) // C type: double // A type: double // A pattern? 0 // B type: double // B pattern? 0 // BinaryOp: cij = GB_pow (aij, bij) #define GB_ATYPE \ double #define GB_BTYPE \ double #define GB_CTYPE \ double // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ double aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ double bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ double t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = GB_pow (x, y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 1 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_POW || GxB_NO_FP64 || GxB_NO_POW_FP64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__pow_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__pow_fp64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__pow_fp64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type double double bwork = (*((double *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *restrict Cx = (double *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__pow_fp64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; double alpha_scalar ; double beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((double *) alpha_scalar_in)) ; beta_scalar = (*((double *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__pow_fp64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__pow_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__pow_fp64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__pow_fp64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__pow_fp64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double *Cx = (double *) Cx_output ; double x = (*((double *) x_input)) ; double *Bx = (double *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; double bij = GBX (Bx, p, false) ; Cx [p] = GB_pow (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__pow_fp64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; double *Cx = (double *) Cx_output ; double *Ax = (double *) Ax_input ; double y = (*((double *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; double aij = GBX (Ax, p, false) ; Cx [p] = GB_pow (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow (x, aij) ; \ } GrB_Info GB (_bind1st_tran__pow_fp64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ double #if GB_DISABLE return (GrB_NO_VALUE) ; #else double x = (*((const double *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ double } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ double aij = GBX (Ax, pA, false) ; \ Cx [pC] = GB_pow (aij, y) ; \ } GrB_Info GB (_bind2nd_tran__pow_fp64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else double y = (*((const double *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__land_int64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_int64) // A.*B function (eWiseMult): GB (_AemultB_01__land_int64) // A.*B function (eWiseMult): GB (_AemultB_02__land_int64) // A.*B function (eWiseMult): GB (_AemultB_03__land_int64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_int64) // A*D function (colscale): GB (_AxD__land_int64) // D*A function (rowscale): GB (_DxB__land_int64) // C+=B function (dense accum): GB (_Cdense_accumB__land_int64) // C+=b function (dense accum): GB (_Cdense_accumb__land_int64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_int64) // C=scalar+B GB (_bind1st__land_int64) // C=scalar+B' GB (_bind1st_tran__land_int64) // C=A+scalar GB (_bind2nd__land_int64) // C=A'+scalar GB (_bind2nd_tran__land_int64) // C type: int64_t // A type: int64_t // B,b type: int64_t // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ int64_t #define GB_BTYPE \ int64_t #define GB_CTYPE \ int64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ int64_t aij = GBX (Ax, pA, A_iso) // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ int64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ int64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_INT64 || GxB_NO_LAND_INT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__land_int64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_int64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_int64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type int64_t int64_t bwork = (*((int64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_int64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_int64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *restrict Cx = (int64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_int64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__land_int64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__land_int64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_int64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_int64) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *Cx = (int64_t *) Cx_output ; int64_t x = (*((int64_t *) x_input)) ; int64_t *Bx = (int64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; int64_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_int64) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; int64_t *Cx = (int64_t *) Cx_output ; int64_t *Ax = (int64_t *) Ax_input ; int64_t y = (*((int64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; int64_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_int64) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ int64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t x = (*((const int64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ int64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ int64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_int64) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t y = (*((const int64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__min_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__min_uint32 // A.*B function (eWiseMult): GB_AemultB__min_uint32 // A*D function (colscale): GB_AxD__min_uint32 // D*A function (rowscale): GB_DxB__min_uint32 // C+=B function (dense accum): GB_Cdense_accumB__min_uint32 // C+=b function (dense accum): GB_Cdense_accumb__min_uint32 // C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__min_uint32 // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__min_uint32 // C=scalar+B GB_bind1st__min_uint32 // C=scalar+B' GB_bind1st_tran__min_uint32 // C=A+scalar GB_bind2nd__min_uint32 // C=A'+scalar GB_bind2nd_tran__min_uint32 // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = GB_IMIN (aij, bij) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_IMIN (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MIN || GxB_NO_UINT32 || GxB_NO_MIN_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB_Cdense_ewise3_accum__min_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__min_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__min_uint32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__min_uint32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__min_uint32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__min_uint32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__min_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__min_uint32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__min_uint32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = GB_IMIN (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__min_uint32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = GB_IMIN (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_IMIN (x, aij) ; \ } GrB_Info GB_bind1st_tran__min_uint32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = GB_IMIN (aij, y) ; \ } GrB_Info GB_bind2nd_tran__min_uint32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
add.c
//-------------------------------------------------------------------------// // // // This benchmark is an OpenMP C version of the NPB BT code. This OpenMP // // C version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the OpenMP Fortran versions in // // "NPB3.3-OMP" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this OpenMP C version to // // [email protected] // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: [email protected] // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header.h" #include "timers.h" //--------------------------------------------------------------------- // addition of update to the vector u //--------------------------------------------------------------------- void add() { int i, j, k, m; //kai //int k15; // consistent_data(&k15, "int", 1); if (timeron) timer_start(t_add); #pragma omp parallel for default(shared) private(i,j,k,m) for (k = 1; k <= grid_points[2]-2; k++) { for (j = 1; j <= grid_points[1]-2; j++) { for (i = 1; i <= grid_points[0]-2; i++) { for (m = 0; m < 5; m++) { u[k][j][i][m] = u[k][j][i][m] + rhs[k][j][i][m]; } } } //kai k15 = k; // printf("k15=%p\n",&k15); } if (timeron) timer_stop(t_add); }
SirFAST.c
/* * Copyright (c) <2008 - 2020>, University of Washington, Simon Fraser University, Bilkent University * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, this list * of conditions and the following disclaimer. * - Redistributions in binary form must reproduce the above copyright notice, this * list of conditions and the following disclaimer in the documentation and/or other * materials provided with the distribution. * - Neither the names of the University of Washington, Simon Fraser University, * nor the names of its contributors may be * used to endorse or promote products derived from this software without specific * prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Authors: Farhad Hormozdiari Faraz Hach Can Alkan Emails: farhadh AT uw DOT edu fhach AT cs DOT sfu DOT ca calkan AT cs DOT bilkent DOT edu DOT tr */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <dirent.h> #include <xmmintrin.h> #include <emmintrin.h> #include <mmintrin.h> #include <omp.h> #include "Common.h" #include "Reads.h" #include "HashTable.h" #include "Output.h" #include "SirFAST.h" #include "RefGenome.h" #define min(a,b) ((a)>(b)?(b):(a)) #define min3(a,b,c) ((a)>(b)?(b>c?c:b):(a>c?c:a)) #define CHARCODE(a) (a=='A' ? 0 : (a=='C' ? 1 : (a=='G' ? 2 : (a=='T' ? 3 : 4)))) #define MAX_REF_SIZE 18 #define KEY_LENGTH 10 #define KEY_LENGTH0 5 #define INDEL_GAP 3 #define EXPECTED_GAP 2 #define INITIAL_GAP01 -1 #define INITIAL_GAP12 0 #define INITIAL_GAP23 5 #define DEBUG 2 #define TEST_KEY_NUM 3 char *versionNumberF = "0.4"; long long verificationCnt = 0; long long mappingCnt = 0; long long mappedSeqCnt = 0; long long completedSeqCnt = 0; char *mappingOutput; char *_msf_refGen = NULL; int _msf_refGenLength = 0; int _msf_refGenOffset = 0; char *_msf_refGenName = NULL; int _msf_refGenBeg; int _msf_refGenEnd; IHashTable *_msf_hashTable = NULL; int *_msf_samplingLocsEnds; Read *_msf_seqList; int _msf_seqListSize; int _msf_totalSeqListSize; Pair *_msf_sort_seqList = NULL; int *_msf_map_sort_seqList; ReadIndexTable *_msf_rIndex = NULL; int _msf_rIndexSize; int _msf_rIndexMax; int **_msf_verifiedLocs = NULL; int *_msf_seqHits; int _msf_openFiles = 0; int _msf_maxLSize = 0; int _msf_maxRSize = 0; MappingInfo *_msf_mappingInfo; BestFullMappingInfo *bestHitMappingInfo; int _msf_maxFile = 0; char _msf_fileName[4000][200][2][FILE_NAME_LENGTH]; int _msf_fileCount[4000]; char *_msf_readHasConcordantMapping; int *_msf_oeaMapping; int *_msf_discordantMapping; /************************************************************************************************************/ int compare(const void *a, const void *b) { return ((Pair *) a)->hv - ((Pair *) b)->hv; } float str2int(char *str, int index1, int index2) { char tmp[SEQ_MAX_LENGTH]; strncpy(tmp, &str[index1], index2 - index1); tmp[index2 - index1] = '\0'; return atol(tmp); } void initBestMapping(int totalReadNumber) { int i = 0; bestHitMappingInfo = getMem(totalReadNumber * sizeof(BestFullMappingInfo), "bestHitMappingInfo @initBestMapping()"); for (i = 0; i < totalReadNumber; i++) { bestHitMappingInfo[i].loc = -1; bestHitMappingInfo[i].tprob = 0.0; } } void finalizeBestConcordantDiscordant() { int i = 0; for (i = 0; i < _msf_seqListSize / 2; i++) { outputPairFullMappingInfo(i); } freeMem(bestHitMappingInfo, _msf_seqListSize * sizeof(BestFullMappingInfo), "bestHitMappingInfo @finalizeBestConcordantDiscordant()"); } void finalizeBestSingleMapping() { int i = 0; char *_tmpQual, *_tmpSeq; char rqual[SEQ_LENGTH + 1]; SAM _msf_output; OPT_FIELDS _msf_optionalFields[2]; rqual[SEQ_LENGTH] = '\0'; for(i = 0; i < _msf_seqListSize; i++) { if(_msf_seqList[i].hits[0] != 0) { if (bestHitMappingInfo[i].dir) { reverse(_msf_seqList[i].qual, rqual, SEQ_LENGTH); _tmpQual = rqual; _tmpSeq = _msf_seqList[i].rseq; } else { _tmpQual = _msf_seqList[i].qual; _tmpSeq = _msf_seqList[i].seq; } _msf_output.QNAME = _msf_seqList[i].name; _msf_output.FLAG = 16 * bestHitMappingInfo[i].dir; _msf_output.RNAME = bestHitMappingInfo[i].chr; _msf_output.POS = bestHitMappingInfo[i].loc; _msf_output.MAPQ = mapQ(i); _msf_output.CIGAR = bestHitMappingInfo[i].cigar; _msf_output.MRNAME = "*"; _msf_output.MPOS = 0; _msf_output.ISIZE = 0; _msf_output.SEQ = _tmpSeq; _msf_output.QUAL = _tmpQual; _msf_output.optSize = 2; _msf_output.optFields = _msf_optionalFields; _msf_optionalFields[0].tag = "NM"; _msf_optionalFields[0].type = 'i'; _msf_optionalFields[0].iVal = bestHitMappingInfo[i].err; _msf_optionalFields[1].tag = "MD"; _msf_optionalFields[1].type = 'Z'; _msf_optionalFields[1].sVal = bestHitMappingInfo[i].md; output(_msf_output, 0); } } freeMem(bestHitMappingInfo, _msf_seqListSize * sizeof(FullMappingInfo), "bestHitMappingInfo @finalizeBestSingleMapping()"); } void preProcessReads() { int i = 0; _msf_sort_seqList = getMem(_msf_seqListSize * sizeof(Pair), "_msf_sort_seqList @preProcessReads()"); for (i = 0; i < _msf_seqListSize; i++) { _msf_sort_seqList[i].hv = hashVal(_msf_seqList[i].seq); _msf_sort_seqList[i].readNumber = i; } qsort(_msf_sort_seqList, _msf_seqListSize, sizeof(Pair), compare); _msf_map_sort_seqList = getMem(_msf_seqListSize * sizeof(int), "_msf_map_sort_seqList @preProcessReads()"); for (i = 0; i < _msf_seqListSize; i++) _msf_map_sort_seqList[_msf_seqList[i].readNumber] = i; } void resetFAST(unsigned int seqListSize) { freeMem(_msf_samplingLocsEnds, 1, "_msf_samplingLocsEnds @resetFAST()"); _msf_samplingLocsEnds = NULL; freeMem(_msf_oeaMapping, _msf_seqListSize * sizeof(int), "_msf_oeaMapping @resetFAST()"); freeMem(_msf_discordantMapping, _msf_seqListSize * sizeof(int), "_msf_discordantMapping @resetFAST()"); freeMem(_msf_sort_seqList, _msf_seqListSize * sizeof(Pair), "_msf_sort_seqList @resetFAST()"); freeMem(_msf_map_sort_seqList, _msf_seqListSize * sizeof(int), "_msf_map_sort_seqList @resetFAST()"); if (pairedEndMode) { freeMem(_msf_mappingInfo, seqListSize * sizeof (MappingInfo), "_msf_mappingInfo @resetFAST()"); freeMem(_msf_seqHits, _msf_seqListSize * sizeof(int), "_msf_seqHits @resetFAST()"); _msf_seqHits = NULL; freeMem(_msf_readHasConcordantMapping, _msf_seqListSize / 2 * sizeof(char), "_msf_readHasConcordantMapping @resetFAST()"); _msf_refGenOffset = 0; } } void finalizeFAST() { freeMem(_msf_seqHits, (_msf_seqListSize) * sizeof(int), "_msf_seqHits @finalizetFAST()"); freeMem(_msf_refGenName, 4 * SEQ_LENGTH, "_msf_refGenName @finalizeFAST()"); freeMem(_msf_map_sort_seqList, sizeof(Pair) * _msf_seqListSize, "_msf_map_sort_seqList @finalizeFAST()"); freeMem(_msf_sort_seqList, sizeof(int) * _msf_seqListSize, "_msf_sort_seqList @finalizeFAST()"); } int addCigarSize(int cnt) { if (cnt < 10) return 1; else if (cnt < 100) return 2; return 3; } /* Generate Cigar from the back tracking matrix */ /* void generateCigar(char *matrix, int matrixLength, char *cigar) { int i = 0; int counterM = 0; int counterI = 0; int counterD = 0; int cigarSize = 0; cigar[0] = '\0'; while (i < matrixLength) { if (matrix[i] == 'M') { counterM++; if (counterI != 0) { sprintf(cigar, "%s%dI", cigar, counterI); cigarSize += addCigarSize(counterI) + 1; cigar[cigarSize] = '\0'; counterI = 0; } else if (counterD != 0) { sprintf(cigar, "%s%dD", cigar, counterD); cigarSize += addCigarSize(counterD) + 1; cigar[cigarSize] = '\0'; counterD = 0; } } else if (matrix[i] == 'I') { if (counterM != 0) { sprintf(cigar, "%s%dM", cigar, counterM); cigarSize += addCigarSize(counterM) + 1; cigar[cigarSize] = '\0'; counterM = 0; } else if (counterD != 0) { sprintf(cigar, "%s%dD", cigar, counterD); cigarSize += addCigarSize(counterD) + 1; cigar[cigarSize] = '\0'; counterD = 0; } counterI++; i++; } else if (matrix[i] == 'D') { if (counterM != 0) { sprintf(cigar, "%s%dM", cigar, counterM); cigarSize += addCigarSize(counterM) + 1; cigar[cigarSize] = '\0'; counterM = 0; } else if (counterI != 0) { sprintf(cigar, "%s%dI", cigar, counterI); cigarSize += addCigarSize(counterI) + 1; cigar[cigarSize] = '\0'; counterI = 0; } counterD++; i++; } else { counterM++; if (counterI != 0) { sprintf(cigar, "%s%dI", cigar, counterI); cigarSize += addCigarSize(counterI) + 1; cigar[cigarSize] = '\0'; counterI = 0; } else if (counterD != 0) { sprintf(cigar, "%s%dD", cigar, counterD); cigarSize += addCigarSize(counterD) + 1; cigar[cigarSize] = '\0'; counterD = 0; } } i++; } if (counterM != 0) { sprintf(cigar, "%s%dM", cigar, counterM); cigarSize += addCigarSize(counterM) + 1; cigar[cigarSize] = '\0'; counterM = 0; } else if (counterI != 0) { sprintf(cigar, "%s%dI", cigar, counterI); cigarSize += addCigarSize(counterI) + 1; cigar[cigarSize] = '\0'; counterI = 0; } else if (counterD != 0) { sprintf(cigar, "%s%dD", cigar, counterD); cigarSize += addCigarSize(counterD) + 1; cigar[cigarSize] = '\0'; counterD = 0; } cigar[cigarSize] = '\0'; } */ /* Creates the Cigar output from the mismatching positions format [0-9]+(([ACTGN]|\^[ACTGN]+)[0-9]+)* */ /* void generateCigarFromMD(char *mismatch, int mismatchLength, char *cigar) { int i = 0; int j = 0; int start = 0; int cigarSize = 0; cigar[0] = '\0'; while (i < mismatchLength) { if (mismatch[i] >= '0' && mismatch[i] <= '9') { start = i; while (mismatch[i] >= '0' && mismatch[i] <= '9' && i < mismatchLength) i++; int value = atoi(mismatch + start); for (j = 0; j < value - 1; j++) { cigar[cigarSize] = 'M'; cigarSize++; } cigar[cigarSize] = 'M'; } else if (mismatch[i] == '^') { cigar[cigarSize] = 'I'; i++; } else if (mismatch[i] == '\'') { cigar[cigarSize] = 'D'; i++; } else { cigar[cigarSize] = 'M'; cigarSize++; } cigarSize++; i++; } cigar[cigarSize] = '\0'; } */ void generateSNPSAM(char *matrix, int matrixLength, char *outputSNP) { int i = 0; int counterM = 0; int counterD = 0; char delete[100]; int snpSize = 0; outputSNP[0] = '\0'; delete[0] = '\0'; while (i < matrixLength) { if (matrix[i] == 'M') { counterM++; if (counterD != 0) { delete[counterD] = '\0'; counterD = 0; sprintf(outputSNP, "%s^%s", outputSNP, delete); snpSize += strlen(delete) + 1; outputSNP[snpSize] = '\0'; delete[0] = '\0'; } } else if (matrix[i] == 'D') { if (counterM != 0) { sprintf(outputSNP, "%s%d", outputSNP, counterM); snpSize += addCigarSize(counterM); outputSNP[snpSize] = '\0'; counterM = 0; delete[counterD] = matrix[i + 1]; i++; counterD++; } else if (counterD != 0) { delete[counterD] = matrix[i + 1]; counterD++; i++; } else { delete[counterD] = matrix[i + 1]; counterD++; i++; } } else if (matrix[i] == 'I') { if (counterM != 0) { // sprintf(outputSNP, "%s%d\0", outputSNP, counterM); //counterM++; } else if (counterD != 0) { delete[counterD] = '\0'; sprintf(outputSNP, "%s^%s", outputSNP, delete); snpSize += strlen(delete) + 1; outputSNP[snpSize] = '\0'; counterD = 0; delete[0] = '\0'; } i++; } else { if (counterM != 0) { sprintf(outputSNP, "%s%d", outputSNP, counterM); snpSize += addCigarSize(counterM); outputSNP[snpSize] = '\0'; counterM = 0; } if (counterD != 0) { delete[counterD] = '\0'; counterD = 0; sprintf(outputSNP, "%s^%s", outputSNP, delete); snpSize += strlen(delete) + 1; outputSNP[snpSize] = '\0'; delete[0] = '\0'; } sprintf(outputSNP, "%s%c", outputSNP, matrix[i]); snpSize += 1; outputSNP[snpSize] = '\0'; } i++; } if (counterM != 0) { sprintf(outputSNP, "%s%d", outputSNP, counterM); snpSize += addCigarSize(counterM); outputSNP[snpSize] = '\0'; counterM = 0; } else if (counterD != 0) { delete[counterD] = '\0'; sprintf(outputSNP, "%s^%s", outputSNP, delete); snpSize += strlen(delete) + 1; outputSNP[snpSize] = '\0'; counterD = 0; } outputSNP[snpSize] = '\0'; } int compareOut(const void *a, const void *b) { FullMappingInfo *aInfo = (FullMappingInfo *) a; FullMappingInfo *bInfo = (FullMappingInfo *) b; return aInfo->loc - bInfo->loc; } /************************************************/ /* direction = 0 forward */ /* 1 backward */ /************************************************/ void outputPairFullMappingInfo(int readNumber) { char *seq1, *seq2, *rseq1, *rseq2, *qual1, *qual2; char rqual1[SEQ_LENGTH + 1], rqual2[SEQ_LENGTH + 1]; SAM _msf_output; OPT_FIELDS _msf_optionalFields[8]; rqual1[SEQ_LENGTH] = rqual2[SEQ_LENGTH] = '\0'; seq1 = _msf_seqList[readNumber * 2].seq; rseq1 = _msf_seqList[readNumber * 2].rseq; qual1 = _msf_seqList[readNumber * 2].qual; reverse(_msf_seqList[readNumber * 2].qual, rqual1, SEQ_LENGTH); seq2 = _msf_seqList[readNumber * 2 + 1].seq; rseq2 = _msf_seqList[readNumber * 2 + 1].rseq; qual2 = _msf_seqList[readNumber * 2 + 1].qual; reverse(_msf_seqList[readNumber * 2 + 1].qual, rqual2, SEQ_LENGTH); if (bestHitMappingInfo[readNumber * 2].loc == -1 && bestHitMappingInfo[readNumber * 2 + 1].loc == -1) { return; } else { char *seq; char *qual; char d1; char d2; int isize; int proper = 0; // ISIZE CALCULATION // The distance between outer edges isize = abs( bestHitMappingInfo[readNumber * 2].loc - bestHitMappingInfo[readNumber * 2 + 1].loc) + SEQ_LENGTH - 2; if (bestHitMappingInfo[readNumber * 2].loc - bestHitMappingInfo[readNumber * 2 + 1].loc > 0) { isize *= -1; } d1 = (bestHitMappingInfo[readNumber * 2].dir == -1) ? 1 : 0; d2 = (bestHitMappingInfo[readNumber * 2 + 1].dir == -1) ? 1 : 0; if (d1) { seq = rseq1; qual = rqual1; } else { seq = seq1; qual = qual1; } //TODO for CG like SOLID if ( (d1 && d2) || (!d1 && !d2)) { proper = 2; } else { proper = 0; } _msf_output.POS = bestHitMappingInfo[readNumber * 2].loc; _msf_output.MPOS = bestHitMappingInfo[readNumber * 2 + 1].loc; _msf_output.FLAG = 1 + proper + 16 * d1 + 32 * d2 + 64; _msf_output.ISIZE = isize; _msf_output.SEQ = seq; _msf_output.QUAL = qual; _msf_output.QNAME = _msf_seqList[readNumber * 2].name; _msf_output.RNAME = bestHitMappingInfo[readNumber * 2].chr; _msf_output.MAPQ = mapQ(readNumber * 2) + mapQ(readNumber * 2 + 1); _msf_output.CIGAR = bestHitMappingInfo[readNumber * 2].cigar; _msf_output.MRNAME = "="; _msf_output.optSize = 2; _msf_output.optFields = _msf_optionalFields; _msf_optionalFields[0].tag = "NM"; _msf_optionalFields[0].type = 'i'; _msf_optionalFields[0].iVal = bestHitMappingInfo[readNumber * 2].err; _msf_optionalFields[1].tag = "MD"; _msf_optionalFields[1].type = 'Z'; _msf_optionalFields[1].sVal = bestHitMappingInfo[readNumber * 2].md; output(_msf_output, 0); if (d2) { seq = rseq2; qual = rqual2; } else { seq = seq2; qual = qual2; } _msf_output.POS = bestHitMappingInfo[readNumber * 2 + 1].loc; _msf_output.MPOS = bestHitMappingInfo[readNumber * 2].loc; _msf_output.FLAG = 1 + proper + 16 * d2 + 32 * d1 + 128; _msf_output.ISIZE = -isize; _msf_output.SEQ = seq; _msf_output.QUAL = qual; _msf_output.QNAME = _msf_seqList[readNumber * 2].name; _msf_output.RNAME = bestHitMappingInfo[readNumber * 2].chr; _msf_output.MAPQ = mapQ(readNumber * 2) + mapQ(readNumber * 2 + 1); _msf_output.CIGAR = bestHitMappingInfo[readNumber * 2 + 1].cigar; _msf_output.MRNAME = "="; _msf_output.optSize = 2; _msf_output.optFields = _msf_optionalFields; _msf_optionalFields[0].tag = "NM"; _msf_optionalFields[0].type = 'i'; _msf_optionalFields[0].iVal = bestHitMappingInfo[readNumber * 2 + 1].err; _msf_optionalFields[1].tag = "MD"; _msf_optionalFields[1].type = 'Z'; _msf_optionalFields[1].sVal = bestHitMappingInfo[readNumber * 2 + 1].md; output(_msf_output, 0); } } /* Find the closet one to the c @return 0: if the x1 is closer to c 1: if the x2 is closer to c 2: if both distance are equal -1: if error */ int findNearest(int x1, int x2, int c) { if (abs(x1 - c) < abs(x2 - c)) return 0; else if (abs(x1 - c) > abs(x2 - c)) return 1; else if (abs(x1 - c) == abs(x2 - c)) return 2; else return -1; } double mapProb(int readNumber, char *md, int dir, int err){ int i = 0; int mdlen = strlen(md); char buf[MAX_CIGAR_SIZE]; int j = 0; double phred = 0.0; int errloc = 0; int errcnt = 0; //since I cannot calculate deletion base quality buf[0] = 0; if (err == 0) return 1.0; while (i<mdlen){ if (isdigit(md[i])) buf[j++]=md[i++]; else if (isalpha(md[i])){ /* mismatch */ errcnt++; buf[j] = '\0'; if (j != 0) errloc += atoi(buf); else if (i!=0) errloc++; j=0; buf[0]=0; if (dir) phred += (double) (_msf_seqList[readNumber].qual[SEQ_LENGTH-errloc-1] - 33); else phred += (double) (_msf_seqList[readNumber].qual[errloc] - 33); i++; } else if (md[i]=='^'){ /* insertion to the read / deletion from reference */ if (j!=0){ buf[j]=0; errloc += atoi(buf); buf[0] = 0; } j=0; i++; /* pass ^ */ while (isalpha(md[i++])) j++; errloc += j; j = 0; } } double indel_prob = 1; if (errcnt != err) indel_prob = 0.0002 * (err - errcnt); return pow(10, -1 * (phred / 10)) * indel_prob; } int mapQ(int readNumber) { int mapqual; double mapprob; mapprob = mapProb(readNumber, bestHitMappingInfo[readNumber].md, bestHitMappingInfo[readNumber].dir, bestHitMappingInfo[readNumber].err); if (mapprob == bestHitMappingInfo[readNumber].tprob) mapqual = 40; else mapqual = (int) (round(-10.0 * log10(1 - (mapprob / bestHitMappingInfo[readNumber].tprob)))); if (mapqual > 40) mapqual = 40; return mapqual; } void setPairFullMappingInfo(int readNumber, FullMappingInfo mi1, FullMappingInfo mi2) { bestHitMappingInfo[readNumber * 2].loc = mi1.loc; bestHitMappingInfo[readNumber * 2].dir = mi1.dir; bestHitMappingInfo[readNumber * 2].err = mi1.err; bestHitMappingInfo[readNumber * 2].score = mi1.score; snprintf(bestHitMappingInfo[readNumber * 2].chr, MAX_REF_SIZE, "%s", _msf_refGenName); strncpy(bestHitMappingInfo[readNumber * 2].md, mi1.md, strlen(mi1.md) + 1); strncpy(bestHitMappingInfo[readNumber * 2].cigar, mi1.cigar, strlen(mi1.cigar) + 1); bestHitMappingInfo[readNumber * 2 + 1].loc = mi2.loc; bestHitMappingInfo[readNumber * 2 + 1].dir = mi2.dir; bestHitMappingInfo[readNumber * 2 + 1].err = mi2.err; bestHitMappingInfo[readNumber * 2 + 1].score = mi2.score; snprintf(bestHitMappingInfo[readNumber * 2 + 1].chr, MAX_REF_SIZE, "%s", _msf_refGenName); strncpy(bestHitMappingInfo[readNumber * 2 + 1].md, mi2.md, strlen(mi2.md) + 1); strncpy(bestHitMappingInfo[readNumber * 2 + 1].cigar, mi2.cigar, strlen(mi2.cigar) + 1); } int outputPairedEnd(int pre_unmappedCnt) { int i = 0; char cigar[MAX_CIGAR_SIZE]; int tmpOut; FILE* in1[_msf_openFiles]; FILE* in2[_msf_openFiles]; char fname1[_msf_openFiles][FILE_NAME_LENGTH]; char fname2[_msf_openFiles][FILE_NAME_LENGTH]; // discordant FILE *out = NULL, *out1 = NULL; char fname3[FILE_NAME_LENGTH]; char fname4[FILE_NAME_LENGTH]; int meanDistanceMapping = 0; char rqual1[SEQ_LENGTH + 1]; char rqual2[SEQ_LENGTH + 1]; int tmp = 0; SAM _msf_output; OPT_FIELDS _msf_optionalFields[8]; //TODO loadRefGenome(&_msf_refGen, &_msf_refGenName, &tmpOut); if (pairedEndDiscordantMode) { sprintf(fname3, "%s__%s__disc", mappingOutputPath, mappingOutput); sprintf(fname4, "%s__%s__oea", mappingOutputPath, mappingOutput); out = fileOpen(fname3, "a"); out1 = fileOpen(fname4, "a"); } FullMappingInfo *mi1 = getMem(sizeof(FullMappingInfo) * _msf_maxLSize, "mi1 @outputPairedEnd()"); FullMappingInfo *mi2 = getMem(sizeof(FullMappingInfo) * _msf_maxRSize, "mi2 @outputPairedEnd()"); _msf_fileCount[_msf_maxFile] = 0; for (i = 0; i < _msf_openFiles; i++) { sprintf(fname1[i], "%s__%s__%s__%d__1.tmp", mappingOutputPath, _msf_refGenName, mappingOutput, i); sprintf(_msf_fileName[_msf_maxFile][_msf_fileCount[_msf_maxFile]][0], "%s", fname1[i]); sprintf(fname2[i], "%s__%s__%s__%d__2.tmp", mappingOutputPath, _msf_refGenName, mappingOutput, i); sprintf(_msf_fileName[_msf_maxFile][_msf_fileCount[_msf_maxFile]][1], "%s", fname2[i]); in1[i] = fileOpen(fname1[i], "r"); in2[i] = fileOpen(fname2[i], "r"); _msf_fileCount[_msf_maxFile]++; } _msf_maxFile++; int size; int j, k; int size1, size2; meanDistanceMapping = (pairedEndDiscordantMode == 1) ? (minPairEndedDiscordantDistance + maxPairEndedDiscordantDistance) / 2 + SEQ_LENGTH : (minPairEndedDistance + maxPairEndedDistance) / 2 + SEQ_LENGTH; for (i = 0; i < _msf_seqListSize / 2; i++) { size1 = size2 = 0; for (j = 0; j < _msf_openFiles; j++) { tmpOut = fread(&size, sizeof(int), 1, in1[j]); if (size > 0) { for (k = 0; k < size; k++) { mi1[size1 + k].dir = 1; tmpOut = fread(&(mi1[size1 + k].loc), sizeof(int), 1, in1[j]); tmpOut = fread(&(mi1[size1 + k].err), sizeof(int), 1,in1[j]); tmpOut = fread(&(mi1[size1 + k].cigarSize), sizeof(int), 1, in1[j]); tmpOut = fread((mi1[size1 + k].cigar), sizeof(char), mi1[size1 + k].cigarSize, in1[j]); mi1[size1 + k].cigar[mi1[size1 + k].cigarSize] = '\0'; tmpOut = fread(&(mi1[size1 + k].mdSize), sizeof(int), 1, in1[j]); tmpOut = fread((mi1[size1 + k].md), sizeof(char), (mi1[size1 + k].mdSize), in1[j]); mi1[size1 + k].md[mi1[size1 + k].mdSize] = '\0'; if (mi1[size1 + k].loc < 1) { mi1[size1 + k].loc *= -1; mi1[size1 + k].dir = -1; } } qsort(mi1 + size1, size, sizeof(FullMappingInfo), compareOut); size1 += size; } } for (j = 0; j < _msf_openFiles; j++) { tmpOut = fread(&size, sizeof(int), 1, in2[j]); if (size > 0) { for (k = 0; k < size; k++) { mi2[size2 + k].dir = 1; tmpOut = fread(&(mi2[size2 + k].loc), sizeof(int), 1, in2[j]); tmpOut = fread(&(mi2[size2 + k].err), sizeof(int), 1, in2[j]); tmpOut = fread(&(mi2[size2 + k].cigarSize), sizeof(int), 1, in2[j]); tmpOut = fread((mi2[size2 + k].cigar), sizeof(char), mi2[size2 + k].cigarSize, in2[j]); mi2[size2 + k].cigar[mi2[size2 + k].cigarSize] = '\0'; tmpOut = fread(&(mi2[size2 + k].mdSize), sizeof(int), 1, in2[j]); tmpOut = fread((mi2[size2 + k].md), sizeof(char), mi2[size2 + k].mdSize, in2[j]); mi2[size2 + k].md[mi2[size2 + k].mdSize] = '\0'; if (mi2[size2 + k].loc < 1) { mi2[size2 + k].loc *= -1; mi2[size2 + k].dir = -1; } } qsort(mi2 + size2, size, sizeof(FullMappingInfo), compareOut); size2 += size; } } int lm, ll, rl, rm; int pos = 0; if (pairedEndDiscordantMode) { for (j = 0; j < size1; j++) { lm = mi1[j].loc - maxPairEndedDiscordantDistance + 1; ll = mi1[j].loc - minPairEndedDiscordantDistance + 1; rl = mi1[j].loc + minPairEndedDiscordantDistance - 1; rm = mi1[j].loc + maxPairEndedDiscordantDistance - 1; while (pos < size2 && mi2[pos].loc < lm) { pos++; } k = pos; while (k < size2 && mi2[k].loc <= rm) { if (mi2[k].loc <= ll || mi2[k].loc >= rl) { if ( (mi1[j].dir == 1 && mi2[k].dir == 1) || (mi1[j].dir == -1 && mi2[k].dir == -1)) { _msf_seqList[i * 2].hits[0] = 1; _msf_seqList[i * 2 + 1].hits[0] = 1; if (nosamMode != 0) { size1 = 0; size2 = 0; } break; } } k++; } } _msf_seqHits[i * 2] += size1; _msf_seqHits[i * 2 + 1] += size2; if (_msf_seqHits[i * 2 + 1] * _msf_seqHits[i * 2] > DISCORDANT_CUT_OFF && nosamMode != 0) { _msf_seqList[i * 2].hits[0] = 1; _msf_seqList[i * 2 + 1].hits[0] = 1; size1 = 0; size2 = 0; } int rNo = 0; int loc = 0; int err = 0; float sc = 0; char l = 0; //write the OEA data if (_msf_seqHits[i * 2] == 0){ for (k = 0; k < size2 && _msf_oeaMapping[i * 2 + 1] < maxOEAOutput; k++) { rNo = i * 2 + 1; loc = mi2[k].loc * mi2[k].dir; err = mi2[k].err; sc = mi2[k].score; l = strlen(_msf_refGenName); tmp = fwrite(&rNo, sizeof(int), 1, out1); tmp = fwrite(&l, sizeof(char), 1, out1); tmp = fwrite(_msf_refGenName, sizeof(char), l, out1); tmp = fwrite(&loc, sizeof(int), 1, out1); tmp = fwrite(&err, sizeof(int), 1, out1); tmp = fwrite(&sc, sizeof(float), 1, out1); if (mi2[k].cigarSize > SEQ_LENGTH || mi2[k].cigarSize <= 0) printf("ERROR CIGAR size=%d %s\n", mi2[k].cigarSize, _msf_seqList[i * 2 + 1].seq); tmp = fwrite(&(mi2[k].cigarSize), sizeof(int), 1, out1); tmp = fwrite((mi2[k].cigar), sizeof(char), mi2[k].cigarSize, out1); tmp = fwrite(&(mi2[k].mdSize), sizeof(int), 1, out1); tmp = fwrite((mi2[k].md), sizeof(char), mi2[k].mdSize, out1); _msf_oeaMapping[i * 2 + 1]++; } } if (_msf_seqHits[i * 2 + 1] == 0){ for (j = 0; j < size1 && _msf_oeaMapping[i * 2] < maxOEAOutput; j++) { rNo = i * 2; loc = mi1[j].loc * mi1[j].dir; err = mi1[j].err; sc = mi1[j].score; l = strlen(_msf_refGenName); tmp = fwrite(&rNo, sizeof(int), 1, out1); tmp = fwrite(&l, sizeof(char), 1, out1); tmp = fwrite(_msf_refGenName, sizeof(char), l, out1); tmp = fwrite(&loc, sizeof(int), 1, out1); tmp = fwrite(&err, sizeof(int), 1, out1); tmp = fwrite(&sc, sizeof(float), 1, out1); if (mi1[j].cigarSize > SEQ_LENGTH || mi1[j].cigarSize <= 0) printf("ERROR %d %s\n", mi1[j].cigarSize, _msf_seqList[i * 2 + 1].seq); tmp = fwrite(&(mi1[j].cigarSize), sizeof(int), 1, out1); tmp = fwrite((mi1[j].cigar), sizeof(char), mi1[j].cigarSize, out1); tmp = fwrite(&(mi1[j].mdSize), sizeof(int), 1, out1); tmp = fwrite((mi1[j].md), sizeof(char), mi1[j].mdSize, out1); _msf_oeaMapping[i * 2]++; } } } char *seq1, *seq2, *rseq1, *rseq2, *qual1, *qual2; rqual1[SEQ_LENGTH] = '\0'; rqual2[SEQ_LENGTH] = '\0'; rqual1[0] = '\0'; rqual2[0] = '\0'; seq1 = _msf_seqList[i * 2].seq; rseq1 = _msf_seqList[i * 2].rseq; qual1 = _msf_seqList[i * 2].qual; strncpy(rqual1, _msf_seqList[i * 2].qual, SEQ_LENGTH); seq2 = _msf_seqList[i * 2 + 1].seq; rseq2 = _msf_seqList[i * 2 + 1].rseq; qual2 = _msf_seqList[i * 2 + 1].qual; strncpy(rqual2, _msf_seqList[i * 2 + 1].qual, SEQ_LENGTH); if (pairedEndDiscordantMode) { for (k = 0; k < size1; k++) { mi1[k].score = calculateScore(mi1[k].loc, (mi1[k].dir == -1) ? rseq1 : seq1, (mi1[k].dir == -1) ? rqual1 : qual1, mi1[k].cigar); } for (k = 0; k < size2; k++) { mi2[k].score = calculateScore(mi2[k].loc, (mi2[k].dir == -1) ? rseq2 : seq2, (mi2[k].dir == -1) ? rqual2 : qual2, mi2[k].cigar); } } /* CALKAN MAPQ FOR PE */ for (j = 0; j < size1; j++) { if (mi1[j].err != 0){ bestHitMappingInfo[i*2].tprob += mapProb(i*2, mi1[j].md, mi1[j].dir, mi1[j].err); } } for (k = 0; k < size2; k++) { if (mi2[k].err != 0){ bestHitMappingInfo[i*2+1].tprob += mapProb((i*2+1), mi2[k].md, mi2[k].dir, mi2[k].err); } } if (pairedEndDiscordantMode) { for (j = 0; j < size1; j++) { for (k = 0; k < size2; k++) { int dir1 = mi1[j].dir; int dir2 = mi2[k].dir; int loc1 = mi1[j].loc; int loc2 = mi2[k].loc; int best_err1 = bestHitMappingInfo[i * 2].err; int best_err2 = bestHitMappingInfo[i * 2+1].err; int best_loc1 = bestHitMappingInfo[i * 2].loc; int best_loc2 = bestHitMappingInfo[i * 2+1].loc; if ( (( dir1 > 0 && dir2 > 0 ) || (dir1 < 0 && dir2 < 0)) && (( loc1 != -1 || loc2 != -1) && (abs(loc1 - loc2) > minPairEndedDiscordantDistance) && (abs(loc1 - loc2) < maxPairEndedDiscordantDistance)) ) { //POSSIBLE CONCORDANT if(_msf_readHasConcordantMapping[i] == 0) { setPairFullMappingInfo(i, mi1[j], mi2[k]); _msf_readHasConcordantMapping[i] = 1; _msf_seqList[i * 2].hits[0] = 1; _msf_seqList[i * 2 + 1].hits[0] = 1; } else { if (best_err1+best_err2 >= mi1[j].err + mi2[k].err) { if ( best_err1+best_err2 == mi1[j].err + mi2[k].err && findNearest( abs(best_loc1-best_loc2), abs(loc2-loc1), meanDistanceMapping) == 0) { continue; } setPairFullMappingInfo(i, mi1[j], mi2[k]); } } } //DISCORDANT TO TEMP FILE FOR POST PROCESSING else if (_msf_readHasConcordantMapping[i] == 0 && _msf_seqHits[i * 2] != 0 && _msf_seqHits[i * 2 + 1] != 0) { int rNo = i; int loc = mi1[j].loc * mi1[j].dir; int err = mi1[j].err; float sc = mi1[j].score; char l = strlen(_msf_refGenName); if (_msf_discordantMapping[i * 2] < maxDiscordantOutput) { tmp = fwrite(&rNo, sizeof(int), 1, out); tmp = fwrite(&l, sizeof(char), 1, out); tmp = fwrite(_msf_refGenName, sizeof(char), l, out); tmp = fwrite(&loc, sizeof(int), 1, out); tmp = fwrite(&err, sizeof(int), 1, out); tmp = fwrite(&sc, sizeof(float), 1, out); tmp = fwrite(&(mi1[j].cigarSize), sizeof(int), 1, out); tmp = fwrite((mi1[j].cigar), sizeof(char), mi1[j].cigarSize, out); tmp = fwrite(&(mi1[j].mdSize), sizeof(int), 1, out); tmp = fwrite((mi1[j].md), sizeof(char), mi1[j].mdSize, out); loc = mi2[k].loc * mi2[k].dir; err = mi2[k].err; sc = mi2[k].score; tmp = fwrite(&loc, sizeof(int), 1, out); tmp = fwrite(&err, sizeof(int), 1, out); tmp = fwrite(&sc, sizeof(float), 1, out); tmp = fwrite(&(mi2[k].cigarSize), sizeof(int), 1, out); tmp = fwrite((mi2[k].cigar), sizeof(char), mi2[k].cigarSize, out); tmp = fwrite(&(mi2[k].mdSize), sizeof(int), 1, out); tmp = fwrite((mi2[k].md), sizeof(char), mi2[k].mdSize, out); _msf_discordantMapping[i * 2]++; } //SET THE BEST DISCORDANT //BEGIN {Farhad Hormozdiari} if (best_loc1 == -1 && best_loc2 == -1 && _msf_readHasConcordantMapping[i] == 0) { setPairFullMappingInfo(i, mi1[j], mi2[k]); _msf_seqList[i * 2].hits[0] = 1; _msf_seqList[i * 2 + 1].hits[0] = 1; } else if (best_err1 + best_err2 >= mi1[j].err + mi2[k].err && _msf_readHasConcordantMapping[i] == 0) { if (best_err1 + best_err2 == mi1[j].err + mi2[k].err && findNearest( abs(best_loc2-best_loc1), abs(loc1 - loc2), meanDistanceMapping) == 0) { continue; } setPairFullMappingInfo(i, mi1[j], mi2[k]); } //END {Farhad Hormozdiari} } } } } else { for (j = 0; j < size1; j++) { for (k = 0; k < size2; k++) { int dir1 = mi1[j].dir; int dir2 = mi2[k].dir; int loc1 = mi1[j].loc; int loc2 = mi2[k].loc; int best_err1 = bestHitMappingInfo[i * 2].err; int best_err2 = bestHitMappingInfo[i * 2+1].err; int best_loc1 = bestHitMappingInfo[i * 2].loc; int best_loc2 = bestHitMappingInfo[i * 2+1].loc; if ( abs (mi2[k].loc - mi1[j].loc) >= minPairEndedDistance && (abs(mi2[k].loc - mi1[j].loc) <= maxPairEndedDistance) && ( (dir1>0 && dir2>0) || ( dir1< 0 && dir2 <0) ) ) { char *seq; char *qual; char d1; char d2; int isize; int proper = 0; // ISIZE CALCULATION // The distance between outer edges isize = abs(mi1[j].loc - mi2[k].loc) + SEQ_LENGTH - 2; if (mi1[j].loc - mi2[k].loc > 0) { isize *= -1; } d1 = (mi1[j].dir == -1) ? 1 : 0; d2 = (mi2[k].dir == -1) ? 1 : 0; //SET THE READ HAS CONCORDANT MAPPING _msf_readHasConcordantMapping[i] = 1; if (d1) { seq = rseq1; qual = rqual1; } else { seq = seq1; qual = qual1; } if ((d1 && d2) || (!d1 && !d2)) { proper = 2; } else { proper = 0; } _msf_output.POS = mi1[j].loc; _msf_output.MPOS = mi2[k].loc; _msf_output.FLAG = 1 + proper + 16 * d1 + 32 * d2 + 64; _msf_output.ISIZE = isize; _msf_output.SEQ = seq; _msf_output.QUAL = qual; _msf_output.QNAME = _msf_seqList[i * 2].name; _msf_output.RNAME = _msf_refGenName; _msf_output.MAPQ = 255; _msf_output.CIGAR = cigar; _msf_output.MRNAME = "="; _msf_output.optSize = 2; _msf_output.optFields = _msf_optionalFields; _msf_optionalFields[0].tag = "NM"; _msf_optionalFields[0].type = 'i'; _msf_optionalFields[0].iVal = mi1[j].err; _msf_optionalFields[1].tag = "MD"; _msf_optionalFields[1].type = 'Z'; _msf_optionalFields[1].sVal = mi1[j].md; if (!bestMode) output(_msf_output, 0); if (d2) { seq = rseq2; qual = rqual2; } else { seq = seq2; qual = qual2; } _msf_output.POS = mi2[k].loc; _msf_output.MPOS = mi1[j].loc; _msf_output.FLAG = 1 + proper + 16 * d2 + 32 * d1 + 128; _msf_output.ISIZE = -isize; _msf_output.SEQ = seq; _msf_output.QUAL = qual; _msf_output.QNAME = _msf_seqList[i * 2].name; _msf_output.RNAME = _msf_refGenName; _msf_output.MAPQ = 255; _msf_output.CIGAR = cigar; _msf_output.MRNAME = "="; _msf_output.optSize = 2; _msf_output.optFields = _msf_optionalFields; _msf_optionalFields[0].tag = "NM"; _msf_optionalFields[0].type = 'i'; _msf_optionalFields[0].iVal = mi2[k].err; _msf_optionalFields[1].tag = "MD"; _msf_optionalFields[1].type = 'Z'; _msf_optionalFields[1].sVal = mi2[k].md; if (!bestMode) output(_msf_output,0); //SET THE BEST CONCORDANT //BEGIN {Farhad Hormozdiari} if (best_loc1 == -1 && best_loc2 == -1) { setPairFullMappingInfo(i, mi1[j], mi2[k]); } else { if (best_err1 + best_err2 >= mi1[j].err + mi2[k].err) { if (best_err1+best_err2 == mi1[j].err + mi2[k].err && findNearest( abs(best_loc2 - best_loc1), abs(loc2 - loc1), meanDistanceMapping) == 0) { continue; } setPairFullMappingInfo(i, mi1[j], mi2[k]); } } //END {Farhad Hormozdiari} } } } } } if (pairedEndDiscordantMode) { fclose(out); fclose(out1); } for (i = 0; i < _msf_openFiles; i++) { fclose(in1[i]); fclose(in2[i]); unlink(fname1[i]); unlink(fname2[i]); } tmp++; freeMem(mi1, sizeof(FullMappingInfo) * _msf_maxLSize, "mi1 @outputPairedEnd()"); freeMem(mi2, sizeof(FullMappingInfo) * _msf_maxRSize, "mi2 @outputPairedEnd()"); _msf_openFiles = 0; /* calkan counter */ int unmappedCnt = 0; for (i = 0; i < _msf_seqListSize; i++) { if (_msf_seqHits[i] == 0) unmappedCnt++; } unmappedCnt = unmappedCnt + pre_unmappedCnt; mappedSeqCnt = _msf_totalSeqListSize - unmappedCnt; return unmappedCnt; } float calculateScore(int index, char *seq, char *qual, char *md) { int i = 0; int j; char *ref; char *ver; float score = 1; char tmp[2 * SEQ_MAX_LENGTH]; int value = 0; int end = 0; int index1 = 0; int index2 = 0; ref = _msf_refGen + index - 1; ver = seq; while (1) { if (i >= strlen(md)) break; index1 = i; while (md[i] >= '0' && md[i] <= '9') { i++; } index2 = i; value = str2int(md, index1, index2); if (md[i] == 'M') { for (j = 0; j < value; j++) { tmp[end] = 'M'; end++; } } else if (md[i] == 'I') { for (j = 0; j < value; j++) { tmp[end] = 'I'; end++; } } else if (md[i] == 'D') { for (j = 0; j < value; j++) { tmp[end] = 'D'; end++; } } i++; } tmp[end] = '\0'; j = 0; for (i = 0; i < end; i++) { if (tmp[i] == 'M') { if (*ref != *ver) { score *= 0.001 + 1 / pow(10, ((qual[j] - 33) / 10.0)); } ref++; ver++; j++; } else if (tmp[i] == 'I') { ver++; j++; score *= 0.0003; // 0.0001 + 0.0002; 0.0001: indel rate in normal human, 0.0002: indel error rate in Illumina } else if (tmp[i] == 'D') { ref++; score *= 0.0003; // 0.0001 + 0.0002 } } return score; } int matoi(char *str, int start, int end) { int i = 0; char tmp[SEQ_MAX_LENGTH]; for (i = 0; i < end - start; i++) tmp[i] = str[start + i]; tmp[i] = '\0'; return atoi(tmp); } void convertCigarToMatrix(char *cigar, int cigar_size, char * matrix) { int i = 0; int j = 0; int start = 0; int size = 0; matrix[0] = '\0'; while (i < cigar_size) { if (cigar[i] >= '0' && cigar[i] <= '9') { start = i; while (cigar[i] >= '0' && cigar[i] <= '9' && i < cigar_size) i++; int value = matoi(cigar, start, i); for (j = 0; j < value; j++) { if (cigar[i] == 'M') matrix[size] = 'M'; else if (cigar[i] == 'D') matrix[size] = 'D'; else if (cigar[i] == 'I') matrix[size] = 'I'; size++; } } i++; } matrix[size] = '\0'; } void convertMDToMatrix(char *md, int md_size, char * matrix) { int i = 0; int j = 0; int start = 0; int size = 0; matrix[0] = '\0'; while (i < md_size) { if (md[i] >= '0' && md[i] <= '9') { start = i; while (md[i] >= '0' && md[i] <= '9' && i < md_size) i++; int value = matoi(md, start, i); for (j = 0; j < value; j++) { matrix[size] = 'M'; size++; } i--; } else if (md[i] == '^') { matrix[size] = 'D'; size++; } else { matrix[size] = md[i]; size++; } i++; } matrix[size] = '\0'; } void convertMDCigarToMatrix(char *cigar, int cigar_size, char *md, int md_size, char *matrix) { int i = 0; int j = 0; int size = 0; char tmp1[SEQ_MAX_LENGTH]; char tmp2[SEQ_MAX_LENGTH]; convertMDToMatrix(md, md_size, tmp2); convertCigarToMatrix(cigar, cigar_size, tmp1); while (i < strlen(tmp1)) { if (tmp1[i] == 'M') { if (j < strlen(tmp2)) { if (tmp2[j] == 'M') { matrix[size] = 'M'; size++; } if (tmp2[j] != 'M') { matrix[size] = tmp2[j]; size++; } } else { matrix[size] = 'M'; size++; } } else if (tmp1[i] == 'D') { matrix[size] = 'D'; size++; j++; matrix[size] = tmp2[j]; size++; } else if (tmp1[i] == 'I') { matrix[size] = 'I'; size++; } i++; if (j < strlen(tmp2)) j++; } if (strlen(tmp1)) matrix[size] = '\0'; } void convertInsertion(char * in_matrix, char * seq, char *out_matrix) { int i = 0; int j = 0; int size = 0; while (i < strlen(in_matrix)) { if (in_matrix[i] == 'M') { out_matrix[size] = 'M'; size++; j++; } else if (in_matrix[i] == 'D') { out_matrix[size] = 'D'; size++; i++; j++; out_matrix[size] = seq[j]; j++; size++; } else if (in_matrix[i] == 'I') { out_matrix[size] = 'I'; size++; out_matrix[size] = seq[j]; size++; j++; } else { out_matrix[size] = in_matrix[i]; size++; j++; } i++; } out_matrix[size] = '\0'; } FILE * initPairedEndDiscPP() { char fname2[FILE_NAME_LENGTH]; FILE * out; sprintf(fname2, "%s%s_DIVET.vh", mappingOutputPath, mappingOutput); out = fileOpen(fname2, "w"); return out; } void finalizePairedEndDiscPP(FILE * out) { fclose(out); } void operatePairedEndDiscPP(FILE * out) { char tmp_matrix1[SEQ_MAX_LENGTH]; char tmp_matrix2[SEQ_MAX_LENGTH]; char matrix1[SEQ_MAX_LENGTH]; char matrix2[SEQ_MAX_LENGTH]; char cigar1[MAX_CIGAR_SIZE]; char editString1[2 * SEQ_MAX_LENGTH]; char cigar2[MAX_CIGAR_SIZE]; char editString2[2 * SEQ_MAX_LENGTH]; char seq1[SEQ_LENGTH + 1]; char seq2[SEQ_LENGTH + 1]; char genName[SEQ_LENGTH]; char fname1[FILE_NAME_LENGTH]; char l; int l_size; int loc1, loc2; int err1, err2; char dir1, dir2; float sc1, sc2, lsc = 0; int flag = 0; int rNo, lrNo = -1; int tmp; FILE *in; sprintf(fname1, "%s__%s__disc", mappingOutputPath, mappingOutput); in = fileOpen(fname1, "r"); if (in != NULL) { flag = fread(&rNo, sizeof(int), 1, in); } else { flag = 0; } seq1[SEQ_LENGTH] = '\0'; seq2[SEQ_LENGTH] = '\0'; while (flag) { tmp = fread(&l, sizeof(char), 1, in); tmp = fread(genName, sizeof(char), l, in); genName[(int) l] = '\0'; tmp = fread(&loc1, sizeof(int), 1, in); tmp = fread(&err1, sizeof(int), 1, in); tmp = fread(&sc1, sizeof(float), 1, in); tmp = fread(&l_size, sizeof(int), 1, in); tmp = fread(cigar1, sizeof(char), l_size, in); cigar1[(int) l_size] = '\0'; tmp = fread(&l_size, sizeof(int), 1, in); tmp = fread(editString1, sizeof(char), l_size, in); editString1[(int) l_size] = '\0'; tmp = fread(&loc2, sizeof(int), 1, in); tmp = fread(&err2, sizeof(int), 1, in); tmp = fread(&sc2, sizeof(float), 1, in); tmp = fread(&l_size, sizeof(int), 1, in); tmp = fread(cigar2, sizeof(char), l_size, in); cigar2[(int) l_size] = '\0'; tmp = fread(&l_size, sizeof(int), 1, in); tmp = fread(editString2, sizeof(char), l_size, in); editString2[(int) l_size] = '\0'; convertMDCigarToMatrix(cigar1, strlen(cigar1), editString1, strlen(editString1), tmp_matrix1); convertMDCigarToMatrix(cigar2, strlen(cigar2), editString2, strlen(editString2), tmp_matrix2); /* CHECK FOR SIFAST */ /* CALKAN: GO OVER THIS VERY CAREFULLY FOR PE vs MP */ if (_msf_readHasConcordantMapping[rNo] == 0 && _msf_discordantMapping[rNo * 2] < maxDiscordantOutput ) { dir1 = dir2 = 'F'; strncpy(seq1, _msf_seqList[rNo * 2].seq, SEQ_LENGTH); strncpy(seq2, _msf_seqList[rNo * 2 + 1].seq, SEQ_LENGTH); if (loc1 < 0) { dir1 = 'R'; loc1 = -loc1; strncpy(seq1, _msf_seqList[rNo * 2].rseq, SEQ_LENGTH); } if (loc2 < 0) { dir2 = 'R'; loc2 = -loc2; strncpy(seq2, _msf_seqList[rNo * 2 + 1].rseq, SEQ_LENGTH); } convertInsertion(tmp_matrix1, seq1, matrix1); convertInsertion(tmp_matrix2, seq2, matrix2); if (rNo != lrNo) { int j; for (j = 0; j < SEQ_LENGTH; j++) { lsc += _msf_seqList[rNo * 2].qual[j] + _msf_seqList[rNo * 2 + 1].qual[j]; } lsc /= 2 * SEQ_LENGTH; lsc -= 33; lrNo = rNo; } char event = '\0'; if (dir1 == dir2) { event = 'V'; } else { if (pairedEndModePE && loc1 < loc2 && dir1 == 'R' && dir2 == 'F') event = 'E'; else if (pairedEndModeMP && loc1 < loc2 && dir1 == 'F' && dir2 == 'R') event = 'E'; else if (pairedEndModePE && loc2 < loc1 && dir1 == 'F' && dir2 == 'R') event = 'E'; else if (pairedEndModeMP && loc2 < loc1 && dir1 == 'R' && dir2 == 'F') event = 'E'; else if (abs(loc2 - loc1) >= maxPairEndedDiscordantDistance) event = 'D'; else event = 'I'; } _msf_seqList[rNo * 2].hits[0] = 2; fprintf(out, "%s\t%s\t%d\t%d\t%c\t=\t%d\t%d\t%c\t%c\t%d\t%0.0f\t%e\n", _msf_seqList[rNo * 2].name, genName, loc1, (loc1 + SEQ_LENGTH - 1), dir1, loc2, (loc2 + SEQ_LENGTH - 1), dir2, event, (err1 + err2), lsc, sc1 * sc2); } flag = fread(&rNo, sizeof(int), 1, in); } tmp++; fclose(in); unlink(fname1); } FILE * initOEAReads(char *fileName) { FILE *fp_out1; char fname1[FILE_NAME_LENGTH]; sprintf(fname1, "%s%s_OEA.sam", mappingOutputPath, mappingOutput); fp_out1 = fileOpen(fname1, "w"); return fp_out1; } void finalizeOEAReads(FILE * fp_out1) { fclose(fp_out1); return; } void operateOEAReads(FILE * fp_out1) { FILE * in; char genName[SEQ_LENGTH]; char fname2[FILE_NAME_LENGTH]; char l = 0; int loc1 = 0; int err1; char d; float sc1 = 0; int flag = 0; int rNo = -1; int tmp = 0; int cigarSize = 0; int mdSize = 0; char cigar[SEQ_LENGTH + 1]; char md[SEQ_LENGTH + 1]; char *seq1, *seq2, *qual1, *qual2; char rqual1[SEQ_LENGTH + 1]; SAM _msf_output; OPT_FIELDS _msf_optionalFields[8]; seq1 = NULL; seq2 = NULL; qual1 = NULL; qual2 = NULL; rqual1[0] = '\0'; SAMheaderTX(fp_out1, 0); in = NULL; if (pairedEndDiscordantMode) { sprintf(fname2, "%s__%s__oea", mappingOutputPath, mappingOutput); in = fileOpen(fname2, "r"); } if (in != NULL) { flag = fread(&rNo, sizeof(int), 1, in); } else { flag = 0; } while (flag) { cigar[0] = '\0'; md[0] = '\0'; tmp = fread(&l, sizeof(char), 1, in); tmp = fread(genName, sizeof(char), l, in); genName[(int) l] = '\0'; tmp = fread(&loc1, sizeof(int), 1, in); tmp = fread(&err1, sizeof(int), 1, in); tmp = fread(&sc1, sizeof(float), 1, in); tmp = fread(&cigarSize, sizeof(int), 1, in); tmp = fread(cigar, sizeof(char), cigarSize, in); cigar[cigarSize] = '\0'; tmp = fread(&mdSize, sizeof(int), 1, in); tmp = fread(md, sizeof(char), mdSize, in); md[mdSize] = '\0'; d = 1; if (loc1 < 0) { d = -1; loc1 *= -1; seq1 = _msf_seqList[rNo].rseq; reverse(_msf_seqList[rNo].qual, rqual1, SEQ_LENGTH); rqual1[SEQ_LENGTH] = '\0'; qual1 = rqual1; } else { seq1 = _msf_seqList[rNo].seq; qual1 = _msf_seqList[rNo].qual; } if (rNo % 2 == 0) { seq2 = _msf_seqList[rNo + 1].seq; qual2 = _msf_seqList[rNo + 1].qual; } else { seq2 = _msf_seqList[rNo - 1].seq; qual2 = _msf_seqList[rNo - 1].qual; } if (_msf_seqHits[rNo] != 0 && _msf_seqHits[rNo] < maxOEAOutput && _msf_seqHits[(rNo % 2 == 0) ? rNo + 1 : rNo - 1] == 0) { _msf_output.POS = loc1; _msf_output.MPOS = 0; _msf_output.FLAG = (rNo % 2 == 0) ? 1 + 4 + 32 * d + 128 : 1 + 8 + 16 * d + 64; _msf_output.ISIZE = 0; _msf_output.SEQ = seq1; _msf_output.QUAL = qual1; _msf_output.QNAME = _msf_seqList[rNo].name; _msf_output.RNAME = genName; _msf_output.MAPQ = 255; _msf_output.CIGAR = cigar; _msf_output.MRNAME = "="; _msf_output.optSize = 4; _msf_output.optFields = _msf_optionalFields; _msf_optionalFields[0].tag = "NM"; _msf_optionalFields[0].type = 'i'; _msf_optionalFields[0].iVal = err1; _msf_optionalFields[1].tag = "MD"; _msf_optionalFields[1].type = 'Z'; _msf_optionalFields[1].sVal = md; //for the OEA reads _msf_optionalFields[2].tag = "NS"; _msf_optionalFields[2].type = 'Z'; _msf_optionalFields[2].sVal = seq2; _msf_optionalFields[3].tag = "NQ"; _msf_optionalFields[3].type = 'Z'; _msf_optionalFields[3].sVal = qual2; outputSAM(fp_out1, _msf_output); _msf_seqList[rNo].hits[0] = -1; _msf_seqList[(rNo % 2 == 0) ? rNo + 1 : rNo - 1].hits[0] = -1; } else if(_msf_seqHits[rNo] != 0 && _msf_seqHits[(rNo % 2 == 0) ? rNo + 1 : rNo - 1] == 0) { _msf_seqList[rNo].hits[0] = -1; _msf_seqList[(rNo % 2 == 0) ? rNo + 1 : rNo - 1].hits[0] = -1; } flag = fread(&rNo, sizeof(int), 1, in); } tmp++; fclose(in); unlink(fname2); } void outputAllTransChromosomal(int flag) { return; /* disabled until completed int i = 0; int j = 0; int k = 0; int l = 0; FILE *fp_out = NULL; char fname1[FILE_NAME_LENGTH]; if(flag) { fp_out = fileOpen(fname1, "w"); sprintf(fname1, "%s%s_TRANSCHROMOSOMAL", mappingOutputPath, mappingOutput); i = 0; for(j = i+1; j < _msf_maxFile; j++) { if(i != j) { for(k = 0; k < _msf_fileCount[i]; k++) { for(l = 0; l < _msf_fileCount[j]; l++) { outputTransChromosomal(_msf_fileName[i][k][0], _msf_fileName[j][l][1], fp_out); }// for l }// for k }// if }// for j } for(i = 0; i < _msf_maxFile; i++) { for(j = 0; j < _msf_fileCount[i]; j++) { unlink(_msf_fileName[i][j][0]); unlink(_msf_fileName[i][j][1]); } } if(flag) fclose(fp_out); */ } void initFASTCG(Read *seqList, int seqListSize, char *genFileName, int AccReads, int first_try) { // Optional Field Memory Allocation: _msf_optionalFields int i, j; if (_msf_samplingLocsEnds == NULL) { // DHL FIRE OK _msf_samplingLocsEnds = getMem(1, "_msf_samplingLocsEnds @initFASTCG()"); _msf_seqList = seqList; _msf_seqListSize = seqListSize; _msf_totalSeqListSize = AccReads; preProcessReads(); _msf_oeaMapping = getMem(_msf_seqListSize * sizeof(int), "_msf_oeaMapping @initFASTCG()"); for (i = 0; i < _msf_seqListSize; i++) _msf_oeaMapping[i] = 0; _msf_discordantMapping = getMem(_msf_seqListSize * sizeof(int), "_msf_discordantMapping @initFASTCG()"); for (i = 0; i < _msf_seqListSize; i++) _msf_discordantMapping[i] = 0; } // Reference Genome Name Update if (_msf_refGenName == NULL) { // DHL FIRE OK _msf_refGenName = getMem(4*SEQ_LENGTH, "_msf_refGenName @initFASTCG()"); } if (_msf_verifiedLocs != NULL) { for (i=0;i<number_of_threads;i++) if (_msf_verifiedLocs[i] != NULL) freeMem(_msf_verifiedLocs[i], sizeof(int) * (_msf_refGenLength+1), "_msf_verifiedLocs[i] @initFASTCG()"); freeMem(_msf_verifiedLocs, sizeof(int *) * number_of_threads, "_msf_verifiedLocs @initFASTCG()"); } _msf_refGen = getRefGenome(); // DHL VERIFY _msf_refGenLength = strlen(_msf_refGen); _msf_refGenOffset = getRefGenomeOffset(); snprintf(_msf_refGenName, 4*SEQ_LENGTH,"%s%c", getRefGenomeName(), '\0'); _msf_refGenName[strlen(getRefGenomeName())] = '\0'; _msf_verifiedLocs = (int **) getMem(sizeof(int *) * number_of_threads, "_msf_verifiedLocs @initFASTCG()"); for (i=0;i<number_of_threads;i++) _msf_verifiedLocs[i] = (int *) getMem(sizeof(int) * (_msf_refGenLength+1), "_msf_verifiedLocs[i] @initFASTCG()"); for (i=0;i<number_of_threads;i++){ for (j=0; j<=_msf_refGenLength; j++) { _msf_verifiedLocs[i][j] = _msf_seqListSize*10+1; } } if (pairedEndMode && _msf_seqHits == NULL) { _msf_mappingInfo = getMem(seqListSize * sizeof (MappingInfo), "_msf_mappingInfo @initFASTCG()"); for (i=0; i<seqListSize; i++) { _msf_mappingInfo[i].next = NULL; _msf_mappingInfo[i].size = 0; } _msf_seqHits = getMem((_msf_seqListSize) * sizeof(int), "_msf_seqHits @initFASTCG()"); for (i=0; i<_msf_seqListSize; i++) { _msf_seqHits[i] = 0; } _msf_readHasConcordantMapping = getMem(_msf_seqListSize / 2 * sizeof(char), "_msf_readHasConcordantMapping @initFASTCG()"); for(i = 0; i < _msf_seqListSize/2; i++) { _msf_readHasConcordantMapping[i] = 0; } if (first_try) initLoadingRefGenome(genFileName); } if (_msf_refGenOffset == 0) { // DHL FIRE _msf_refGenBeg = 1; } else { _msf_refGenBeg = CONTIG_OVERLAP - SEQ_LENGTH + 2; } _msf_refGenEnd = _msf_refGenLength - SEQ_LENGTH + 1; } void mapPairedEndSeqCG(Read *seqList, unsigned int seqListSize, unsigned int AccReads) { _msf_totalSeqListSize = AccReads; _msf_seqListSize = seqListSize; _msf_seqList = seqList; int i = 0; int j = 0; int k = 0; int m = 0; int tid = 0; int key_hash[TEST_KEY_NUM]; unsigned int *locs = NULL; key_struct* key_input = getMem(TEST_KEY_NUM*sizeof(key_struct), "key_input @mapPairedEndSeqCG()"); // First sequence in Forward for(i = 0; i < TEST_KEY_NUM; i++) { key_input[i].key_entry = NULL; key_input[i].key_locs = NULL; key_input[i].key_number = 0; key_input[i].key_entry_size = 0; key_hash[i] = 0; } for(i = 0; i < _msf_seqListSize; i++) { k = _msf_sort_seqList[i].readNumber; for (m = 0; m < TEST_KEY_NUM; m++) { key_hash[m] = hashVal(_msf_seqList[k].seq + m*10 + 5); // forward 5-10-10-10 locs = getCandidates(key_hash[m]); key_input[m].key_number = m; key_input[m].key_entry = locs; key_input[m].key_locs = locs + 1; if (locs != NULL) key_input[m].key_entry_size = locs[0]; else key_input[m].key_entry_size = -1; } for (j = 0; j < TEST_KEY_NUM - 1; j++) { if (key_input[j].key_entry_size > 0) { mapPairEndSeqCG_forward( key_input[j].key_locs, // l1 key_input[j].key_entry_size,// s1 k, // readNumber key_input[j].key_number, // readSegment j, // index key_input, // key_input 0, 0, tid); } } } // First sequence in Forward for(i = 0; i < TEST_KEY_NUM; i++) { key_input[i].key_entry = NULL; key_input[i].key_locs = NULL; key_input[i].key_number = 0; key_input[i].key_entry_size = 0; key_hash[i] = 0; } for(i = 0; i < _msf_seqListSize; i++) { //printf("%d\n", i); k = _msf_sort_seqList[i].readNumber; for (m = 0; m < TEST_KEY_NUM; m++) { key_hash[m] = hashVal(_msf_seqList[k].rseq + m*10 + 5); // forward 5-10-10-10 locs = getCandidates(key_hash[m]); key_input[m].key_number = m; key_input[m].key_entry = locs; key_input[m].key_locs = locs + 1; if (locs != NULL) key_input[m].key_entry_size = locs[0]; else key_input[m].key_entry_size = -1; } for (j = 0; j < TEST_KEY_NUM - 1; j++) { if (key_input[j].key_entry_size > 0) { mapPairEndSeqCG_forward( key_input[j].key_locs, // l1 key_input[j].key_entry_size,// s1 k, // readNumber key_input[j].key_number, // readSegment j, // index key_input, // key_input 1, 0, tid); } } } // First read in Reverse for(i = 0; i < TEST_KEY_NUM; i++) { key_input[i].key_entry = NULL; key_input[i].key_locs = NULL; key_input[i].key_number = 0; key_input[i].key_entry_size = 0; key_hash[i] = 0; } for(i = 0; i < _msf_seqListSize; i++) { k = _msf_sort_seqList[i].readNumber; for (m = 0; m < TEST_KEY_NUM; m++) { key_hash[m] = hashVal(_msf_seqList[k].rseq + m*10); // reverse 10-10-10-5 locs = getCandidates(key_hash[m]); key_input[m].key_number = m; key_input[m].key_entry = locs; key_input[m].key_locs = locs + 1; if (locs != NULL) key_input[m].key_entry_size = locs[0]; else key_input[m].key_entry_size = -1; } for (j = 0; j < TEST_KEY_NUM - 1; j++) { if (key_input[j].key_entry_size > 0) { mapPairEndSeqCG_reverse( key_input[j].key_locs, // l1 key_input[j].key_entry_size,// s1 k, // readNumber key_input[j].key_number, // readSegment j, // index key_input, // key_input 1, 0, tid); } } } for(i = 0; i < TEST_KEY_NUM; i++) { key_input[i].key_entry = NULL; key_input[i].key_locs = NULL; key_input[i].key_number = 0; key_input[i].key_entry_size = 0; key_hash[i] = 0; } for(i = 0; i < _msf_seqListSize; i++) { k = _msf_sort_seqList[i].readNumber; for (m = 0; m < TEST_KEY_NUM; m++) { key_hash[m] = hashVal(_msf_seqList[k].seq + m*10); // reverse 10-10-10-5 locs = getCandidates(key_hash[m]); key_input[m].key_number = m; key_input[m].key_entry = locs; key_input[m].key_locs = locs + 1; if (locs != NULL) key_input[m].key_entry_size = locs[0]; else key_input[m].key_entry_size = -1; } for (j = 0; j < TEST_KEY_NUM - 1; j++) { if (key_input[j].key_entry_size > 0) { mapPairEndSeqCG_reverse( key_input[j].key_locs, // l1 key_input[j].key_entry_size,// s1 k, // readNumber key_input[j].key_number, // readSegment j, // index key_input, // key_input 0, 0, tid); } } } freeMem(key_input, TEST_KEY_NUM * sizeof(key_struct), "key_input @mapPairedEndSeqCG"); char fname1[FILE_NAME_LENGTH]; char fname2[FILE_NAME_LENGTH]; MappingLocations *cur; int tmpOut; int lmax = 0, rmax = 0; sprintf(fname1, "%s__%s__%s__%d__1.tmp", mappingOutputPath, _msf_refGenName, mappingOutput, _msf_openFiles); sprintf(fname2, "%s__%s__%s__%d__2.tmp", mappingOutputPath, _msf_refGenName, mappingOutput, _msf_openFiles); FILE* out; FILE* out1 = fileOpen(fname1, "w"); FILE* out2 = fileOpen(fname2, "w"); _msf_openFiles++; for (i = 0; i < _msf_seqListSize; i++) { if (i % 2 == 0) { out = out1; if (lmax < _msf_mappingInfo[i].size) { lmax = _msf_mappingInfo[i].size; } } else { out = out2; if (rmax < _msf_mappingInfo[i].size) { rmax = _msf_mappingInfo[i].size; } } tmpOut = fwrite(&(_msf_mappingInfo[i].size), sizeof(int), 1, out); if (_msf_mappingInfo[i].size > 0) { cur = _msf_mappingInfo[i].next; for (j = 0; j < _msf_mappingInfo[i].size; j++) { if (j > 0 && j % MAP_CHUNKS == 0) { cur = cur->next; } if(debugMode && (cur->cigarSize[j % MAP_CHUNKS] > SEQ_LENGTH || cur->mdSize[j % MAP_CHUNKS] > SEQ_LENGTH)) { printf("ERROR in %d read size exceeds cigar=%d md =%d cigar=%s md =%s\n", i, cur->cigarSize[j % MAP_CHUNKS], cur->mdSize[j % MAP_CHUNKS], cur->cigar[j % MAP_CHUNKS], cur->md[j % MAP_CHUNKS]); } tmpOut = fwrite(&(cur->loc[j % MAP_CHUNKS]), sizeof(int), 1, out); tmpOut = fwrite(&(cur->err[j % MAP_CHUNKS]), sizeof(int), 1, out); tmpOut = fwrite(&(cur->cigarSize[j % MAP_CHUNKS]), sizeof(int), 1, out); tmpOut = fwrite((cur->cigar[j % MAP_CHUNKS]), sizeof(char), (cur->cigarSize[j % MAP_CHUNKS]), out); tmpOut = fwrite(&(cur->mdSize[j % MAP_CHUNKS]), sizeof(int), 1, out); tmpOut = fwrite((cur->md[j % MAP_CHUNKS]), sizeof(char), (cur->mdSize[j % MAP_CHUNKS]), out); } //TODO: if freeAllMapping exist the next line should be comment //_msf_mappingInfo[i].size = 0; } } freeAllMapping(); _msf_maxLSize += lmax; _msf_maxRSize += rmax; tmpOut++; fclose(out1); fclose(out2); } int searchKeyCG(int target_coor, unsigned int* entry_coor, int entry_size, int range) { if (entry_size <= 0) return -1; int lower_bound = 1; int upper_bound = entry_size; int mid = lower_bound + entry_size / 2; while (lower_bound < upper_bound) { if (entry_coor[mid] == target_coor) return entry_coor[mid]; else if (entry_coor[mid] < target_coor) lower_bound = mid + 1; else upper_bound = mid - 1; mid = lower_bound + (upper_bound - lower_bound) / 2; if (entry_coor[upper_bound] == target_coor) { return entry_coor[upper_bound]; } if (entry_coor[lower_bound] == target_coor) { return entry_coor[lower_bound]; } if (entry_coor[mid] == target_coor) { return entry_coor[mid]; } } if (entry_coor[mid] <= (target_coor + range) && entry_coor[mid] >= target_coor) { return entry_coor[mid]; } else if (entry_coor[mid+1] <= (target_coor + range) && entry_coor[mid+1] >= target_coor) { return entry_coor[mid+1]; } else return -1; } int verifySingleEndCG1(int refIndex, char* seq1, int * tmp_offset, int variable, int length) { int i = 0; int err = 0; int errCnt = 0; char* ref = _msf_refGen + refIndex + variable; char* seq = seq1; for (i = 0; i < KEY_LENGTH; i++) { err = *ref != *seq; errCnt += err; seq++; ref++; } if (DEBUG == 1) { fprintf(stdout, "##### Hamming Distance Verification: Segment 1 LOC:%d #####\n", refIndex); for (i = 0; i < KEY_LENGTH; i++) fprintf(stdout, "(%c:%c)", *(ref-10+i), *(seq-10+i)); fprintf(stdout, " Error :%d", errCnt); fprintf(stdout, " OFFSET:%d\n", tmp_offset[1]); } return errCnt; } int verifySingleEndCG(int refIndex, int readIndex, char * seq1, int * offset, int variable, int length) { int i = 0; int j = 0; int err = 0; int errCnt = 0; int errMin = 10; char* ref; char* seq; for (i = 0; i < variable; i++) { errCnt = 0; ref = _msf_refGen + refIndex + i; seq = seq1 + readIndex; if (DEBUG == 1) { fprintf(stdout, "## Hamming: General LOC: %s SEQ: %s ##### ", ref, seq); } for (j = 0; j < length; j++) { err = *ref != *seq; errCnt += err; seq++; ref++; } if (errCnt < errMin) { errMin = errCnt; offset[0] = i; } if (DEBUG == 1) { for (j = 0; j < length; j++) fprintf(stdout, "(%c:%c)", *(_msf_refGen + refIndex + i + j), *(seq1 + readIndex + j)); fprintf(stdout, " ERROR : %d", errCnt); fprintf(stdout, " OFFSET: %d\n", i); } } if (DEBUG == 1) { fprintf(stdout, " ErrorMin: %d\n", errMin); } return errMin; } void mapAllSingleEndSeqCG() { int i = 0; int j = 0; int k = 0; int m = 0; int key_hash[TEST_KEY_NUM]; unsigned int *locs = NULL; key_struct key_input[TEST_KEY_NUM]; int tid = 0; omp_set_num_threads(number_of_threads); #pragma omp parallel { // Forward #pragma omp for private(k,m,j,key_hash,key_input,locs,tid) for(i = 0; i < _msf_seqListSize; i++) { k = _msf_sort_seqList[i].readNumber; tid = omp_get_thread_num(); for (m = 0; m < TEST_KEY_NUM; m++) { key_hash[m] = hashVal(_msf_seqList[k].seq + m*10 + 5); // forward 5-10-10-10 locs = getCandidates(key_hash[m]); key_input[m].key_number = m; key_input[m].key_entry = locs; key_input[m].key_locs = locs + 1; if (locs != NULL) { key_input[m].key_entry_size = locs[0]; } else { key_input[m].key_entry_size = -1; } } for (j = 0; j < TEST_KEY_NUM - 1; j++) { if (key_input[j].key_entry_size > 0) { mapSingleEndSeqCG_forward( key_input[j].key_locs, // l1 key_input[j].key_entry_size,// s1 k, // readNumber key_input[j].key_number, // readSegment j, // index key_input, // key_input 0, 0, tid); } } } #pragma omp for private(k,m,j,key_hash,key_input,locs,tid) for(i = 0; i < _msf_seqListSize; i++) { k = _msf_sort_seqList[i].readNumber; tid = omp_get_thread_num(); for (m = 0; m < TEST_KEY_NUM; m++) { key_hash[m] = hashVal(_msf_seqList[k].rseq + m*10 + 5); // forward 5-10-10-10 locs = getCandidates(key_hash[m]); key_input[m].key_number = m; key_input[m].key_entry = locs; key_input[m].key_locs = locs + 1; if (locs != NULL) key_input[m].key_entry_size = locs[0]; else key_input[m].key_entry_size = -1; } for (j = 0; j < TEST_KEY_NUM - 1; j++) { if (key_input[j].key_entry_size > 0) { mapSingleEndSeqCG_forward( key_input[j].key_locs, // l1 key_input[j].key_entry_size,// s1 k, // readNumber key_input[j].key_number, // readSegment j, // index key_input, // key_input 1, 0, tid); } } } #pragma omp for private(k,m,j,key_hash,key_input,locs,tid) for(i = 0; i < _msf_seqListSize; i++) { k = _msf_sort_seqList[i].readNumber; tid = omp_get_thread_num(); for (m = 0; m < TEST_KEY_NUM; m++) { key_hash[m] = hashVal(_msf_seqList[k].rseq + m*10); // forward 5-10-10-10 locs = getCandidates(key_hash[m]); key_input[m].key_number = m; key_input[m].key_entry = locs; key_input[m].key_locs = locs + 1; if (locs != NULL) key_input[m].key_entry_size = locs[0]; else key_input[m].key_entry_size = -1; } for (j = 0; j < TEST_KEY_NUM - 1; j++) { if (key_input[j].key_entry_size > 0) { mapSingleEndSeqCG_reverse( key_input[j].key_locs, // l1 key_input[j].key_entry_size,// s1 k, // readNumber key_input[j].key_number, // readSegment j, // index key_input, // key_input 1, 0, tid); } } } #pragma omp for private(k,m,j,key_hash,key_input,locs,tid) for(i = 0; i < _msf_seqListSize; i++) { k = _msf_sort_seqList[i].readNumber; tid = omp_get_thread_num(); for (m = 0; m < TEST_KEY_NUM; m++) { key_hash[m] = hashVal(_msf_seqList[k].seq + m*10); // forward 5-10-10-10 locs = getCandidates(key_hash[m]); key_input[m].key_number = m; key_input[m].key_entry = locs; key_input[m].key_locs = locs + 1; if (locs != NULL) key_input[m].key_entry_size = locs[0]; else key_input[m].key_entry_size = -1; } for (j = 0; j < TEST_KEY_NUM - 1; j++) { if (key_input[j].key_entry_size > 0) { mapSingleEndSeqCG_reverse( key_input[j].key_locs, // l1 key_input[j].key_entry_size,// s1 k, // readNumber key_input[j].key_number, // readSegment j, // index key_input, // key_input 0, 0, tid); } } } } return ; } void mapPairEndSeqCG_reverse(unsigned int *l1, int s1, int readNumber, int readSegment, int index, key_struct* key_input, int direction, int first_mate, int thread_id) { char matrix[200]; char editString[200]; char cigar[MAX_CIGAR_SIZE]; int r = readNumber; int d = (direction==1?-1:1); int readId = 2*readNumber + direction; char *_tmpSeq; char rqual[SEQ_LENGTH+1]; rqual[SEQ_LENGTH]='\0'; if (direction) { reverse(_msf_seqList[readNumber].qual, rqual, SEQ_LENGTH); _tmpSeq = _msf_seqList[readNumber].rseq; } else { _tmpSeq = _msf_seqList[readNumber].seq; } int i = 0; int j = 0; int genLoc = 0; int *locs = (int *) l1; for (j = 0; j < s1; j++) { genLoc = locs[j]; int af_pass[4]; int af_offset[4]; int err = -1; if (key_input[index].key_number == 0) { if (genLoc - 1 < _msf_refGenBeg || genLoc - 1 + 35 + 8 > _msf_refGenEnd || _msf_verifiedLocs[thread_id][genLoc] == readId ) { continue; } } else if (key_input[index].key_number == 1) { if (genLoc - 1 - 10 - 7 < _msf_refGenBeg || genLoc - 1 + 25 - 1 > _msf_refGenEnd || _msf_verifiedLocs[thread_id][genLoc - 10 - 6] == readId ) { continue; } } err = verifySingleEndSeqCG_backward(&genLoc, af_offset, _tmpSeq, af_pass, key_input, index); if (err <= errThreshold && err >= 0) { generateAlignmentMatrxCG_backward(genLoc, af_offset, _tmpSeq, af_pass, err, matrix); generateSNPSAM(matrix, strlen(matrix), editString); sprintf(cigar, "5M%dS10M%dN10M%dN10M", -af_offset[2], af_offset[1], af_offset[0]); } else { err = -1; } //##### mrfast code ##### if(err != -1 && !bestMode) { int offset_range = 3; for(i = -offset_range ; i <= offset_range ; i++) { if(genLoc + i >= _msf_refGenBeg && genLoc + i <= _msf_refGenEnd) { _msf_verifiedLocs[thread_id][genLoc + i] = readId; } } /* calkan counter */ mappingCnt++; MappingLocations *parent = NULL; MappingLocations *child = _msf_mappingInfo[r].next; for (i = 0; i < (_msf_mappingInfo[r].size / MAP_CHUNKS); i++) { parent = child; child = child->next; } if (child == NULL) { MappingLocations *tmp = getMem(sizeof(MappingLocations), "_msf_mappingInfo.next or tmp @mapPairEndSeqCG_reverse()"); tmp->next = NULL; tmp->loc[0] = (genLoc+_msf_refGenOffset) * d; tmp->err[0] = err; tmp->cigarSize[0] = strlen(cigar); sprintf(tmp->cigar[0], "%s", cigar); tmp->mdSize[0] = strlen(editString); sprintf(tmp->md[0], "%s", editString); if (parent == NULL) _msf_mappingInfo[r].next = tmp; else parent->next = tmp; } else { if (strlen(cigar) > SEQ_LENGTH || strlen(editString) > SEQ_LENGTH) { printf( "ERROR in %d read size(After mapping) exceeds cigar=%d md =%d cigar=%s md =%s\n", r, (int) strlen(cigar), (int) strlen(editString), cigar, editString); } child->loc[_msf_mappingInfo[r].size % MAP_CHUNKS] = (genLoc+_msf_refGenOffset) * d; child->err[_msf_mappingInfo[r].size % MAP_CHUNKS] = err; child->cigarSize[_msf_mappingInfo[r].size % MAP_CHUNKS] = strlen(cigar); sprintf(child->cigar[_msf_mappingInfo[r].size % MAP_CHUNKS], "%s", cigar); child->mdSize[_msf_mappingInfo[r].size % MAP_CHUNKS] = strlen(editString); sprintf(child->md[_msf_mappingInfo[r].size % MAP_CHUNKS], "%s", editString); } _msf_mappingInfo[r].size++; } } } void freeAllMapping() { int i = 0; int j = 0; MappingLocations *prev; MappingLocations *cur; for(i = 0; i < _msf_seqListSize; i++) { if (_msf_mappingInfo[i].size > 0) { cur = _msf_mappingInfo[i].next; for(j = 0; j < _msf_mappingInfo[i].size; j++) { if(j>0 && j % MAP_CHUNKS == 0) { prev = cur; cur = cur->next; freeMem(prev, sizeof(MappingLocations), "prev @freeAllMapping()"); } } if(cur != NULL) freeMem(cur, sizeof(MappingLocations), "cur @freeAllMapping()"); } _msf_mappingInfo[i].next = NULL; _msf_mappingInfo[i].size = 0; } } void mapPairEndSeqCG_forward(unsigned int *l1, int s1, int readNumber, int readSegment, int index, key_struct* key_input, int direction, int first_mate, int thread_id) { char matrix[MAX_CIGAR_SIZE]; char editString[MAX_CIGAR_SIZE]; char cigar[MAX_CIGAR_SIZE]; int r = readNumber; int d = (direction==1?-1:1); int readId = 2*readNumber+direction; char *_tmpSeq; char rqual[SEQ_LENGTH+1]; int i = 0; rqual[SEQ_LENGTH]='\0'; if (direction) { reverse(_msf_seqList[readNumber].qual, rqual, SEQ_LENGTH); _tmpSeq = _msf_seqList[readNumber].rseq; } else { _tmpSeq = _msf_seqList[readNumber].seq; } int j = 0; int genLoc = 0; int *locs = (int *) l1; for (j = 0; j < s1; j++) { genLoc = locs[j]; int af_pass[4]; int af_offset[4]; int err = -1; if (key_input[index].key_number == 0) { if (genLoc - 1 - 5 + 1 < _msf_refGenBeg || genLoc - 1 + 30 + 9 > _msf_refGenEnd || _msf_verifiedLocs[thread_id][genLoc - 5] == readId ) { continue; } } else if (key_input[index].key_number == 1) { if (genLoc - 1 - 15 - 1 < _msf_refGenBeg || genLoc - 1 + 20 + 7 > _msf_refGenEnd || _msf_verifiedLocs[thread_id][genLoc - 15] == readId ) { continue; } } err = verifySingleEndSeqCG_forward(&genLoc, af_offset, _tmpSeq, af_pass, key_input, index); if (err <= errThreshold && err >= 0) { generateAlignmentMatrxCG_forward(genLoc, af_offset, _tmpSeq, af_pass, err, matrix); generateSNPSAM(matrix, strlen(matrix), editString); sprintf(cigar, "5M%dS10M%dN10M%dN10M", -af_offset[0], af_offset[1], af_offset[2]); } else { err = -1; } //##### mrfast code ##### if(err != -1 && !bestMode) { int offset_range = 3; for(i = -offset_range ; i <= offset_range ; i++) { if(genLoc + i >= _msf_refGenBeg && genLoc + i <= _msf_refGenEnd) { _msf_verifiedLocs[thread_id][genLoc + i] = readId; } } /* calkan counter */ mappingCnt++; MappingLocations *parent = NULL; MappingLocations *child = _msf_mappingInfo[r].next; for (i = 0; i < (_msf_mappingInfo[r].size / MAP_CHUNKS); i++) { parent = child; child = child->next; } if (child == NULL) { MappingLocations *tmp = getMem(sizeof(MappingLocations), "MappingLocations @mapPairEndSeqCG_forward()"); tmp->next = NULL; tmp->loc[0] = (genLoc+_msf_refGenOffset) * d; tmp->err[0] = err; tmp->cigarSize[0] = strlen(cigar); sprintf(tmp->cigar[0], "%s", cigar); tmp->mdSize[0] = strlen(editString); sprintf(tmp->md[0], "%s", editString); if (parent == NULL) _msf_mappingInfo[r].next = tmp; else parent->next = tmp; } else { if (strlen(cigar) > SEQ_LENGTH || strlen(editString) > SEQ_LENGTH) { printf( "ERROR in %d read size(After mapping) exceeds cigar=%d md =%d cigar=%s md =%s\n", r, (int) strlen(cigar), (int) strlen(editString), cigar, editString); } child->loc[_msf_mappingInfo[r].size % MAP_CHUNKS] = (genLoc + _msf_refGenOffset) * d; child->err[_msf_mappingInfo[r].size % MAP_CHUNKS] = err; child->cigarSize[_msf_mappingInfo[r].size % MAP_CHUNKS] = strlen(cigar); sprintf(child->cigar[_msf_mappingInfo[r].size % MAP_CHUNKS], "%s", cigar); child->mdSize[_msf_mappingInfo[r].size % MAP_CHUNKS] = strlen(editString); sprintf(child->md[_msf_mappingInfo[r].size % MAP_CHUNKS], "%s", editString); } _msf_mappingInfo[r].size++; } } } void generateAlignmentMatrxCG_backward(int genLoc, int * af_offset, char * seq, int * af_pass, int error, char * matrix) { char * ref; int ix = 0; ref = _msf_refGen + genLoc; for(ix = 0; ix < 10; ix++) { if(ref[ix] == seq[ix]) matrix[ix] = 'M'; else matrix[ix] = ref[ix]; } for(ix = 10; ix < 20; ix++) { if(ref[ix+af_offset[0]] == seq[ix]) matrix[ix] = 'M'; else matrix[ix] = ref[ix+af_offset[0]]; } for(ix = 20; ix < 30; ix++) { if(ref[ix+af_offset[0]+af_offset[1]] == seq[ix]) matrix[ix] = 'M'; else matrix[ix] = ref[ix+af_offset[0]+af_offset[1]]; } for(ix = 30; ix < 35; ix++) { if(ref[ix+af_offset[0]+af_offset[1]+af_offset[2]] == seq[ix]) matrix[ix] = 'M'; else matrix[ix] = ref[ix+af_offset[0]+af_offset[1]+af_offset[2]]; } matrix[ix] = '\0'; } void generateAlignmentMatrxCG_forward(int genLoc, int * af_offset, char * seq, int * af_pass, int error, char * matrix) { char * ref; int ix = 0; ref = _msf_refGen + genLoc; for(ix = 0; ix < 5; ix++) { if(ref[ix] == seq[ix]) matrix[ix] = 'M'; else matrix[ix] = ref[ix]; } for(ix = 5; ix < 15; ix++) { if(ref[ix+af_offset[0]] == seq[ix]) matrix[ix] = 'M'; else matrix[ix] = ref[ix+af_offset[0]]; } for(ix = 15; ix < 25; ix++) { if(ref[ix+af_offset[0]+af_offset[1]] == seq[ix]) matrix[ix] = 'M'; else matrix[ix] = ref[ix+af_offset[0]+af_offset[1]]; } for(ix = 25; ix < 35; ix++) { if(ref[ix+af_offset[0]+af_offset[1]+af_offset[2]] == seq[ix]) matrix[ix] = 'M'; else matrix[ix] = ref[ix+af_offset[0]+af_offset[1]+af_offset[2]]; } matrix[ix] = '\0'; } int verifySingleEndSeqCG_forward(int * locs, int * af_offset, char * seq1, int * af_pass, key_struct* key_input, int index) { int err = -1; int test = 0; int target = 0; int offset[1] = {0}; int x = 0; int minErr = 0; int minErrLoc = 0; int tmp_offset[3] = {0, 0, 0}; int genLoc = *locs; if (key_input[index].key_number == 0) { target = genLoc + KEY_LENGTH; test = searchKeyCG(target, key_input[1].key_entry, key_input[1].key_entry_size, EXPECTED_GAP); if (test != -1) { af_offset[1] = test - target; err = verifySingleEndCG(genLoc + KEY_LENGTH*2 - 1 + af_offset[1] + INITIAL_GAP23, 25, seq1, offset, INDEL_GAP, 10); if (err > errThreshold) { return -1; } af_pass[0] = 1; af_pass[1] = 1; af_pass[2] = 0; af_offset[2] = INITIAL_GAP23 + offset[0]; err += verifySingleEndCG(genLoc - KEY_LENGTH0 - 1 - INITIAL_GAP01, 0, seq1, offset, INDEL_GAP, 5); af_offset[0] = -1 - offset[0]; genLoc = genLoc - KEY_LENGTH0 - 1 - af_offset[0]; } else { target = genLoc + KEY_LENGTH*2 + INITIAL_GAP23; test = searchKeyCG(target, key_input[2].key_entry, key_input[2].key_entry_size, EXPECTED_GAP*2); if (test != -1) { af_offset[2] = test - target + INITIAL_GAP23; if (af_offset[2] == 5) { err = verifySingleEndCG(genLoc + KEY_LENGTH - 1 + INITIAL_GAP12, 15, seq1, offset, INDEL_GAP - 2, 10); af_offset[2] = 5 + offset[0]; af_offset[1] = 5 - (5 + offset[0]); } else if (af_offset[2] == 6) { err = verifySingleEndCG(genLoc + KEY_LENGTH - 1 + INITIAL_GAP12, 15, seq1, offset, INDEL_GAP - 1, 10); af_offset[2] = 5 + offset[0]; af_offset[1] = 6 - (5 + offset[0]); } else if (af_offset[2] == 7) { err = verifySingleEndCG(genLoc + KEY_LENGTH - 1 + INITIAL_GAP12, 15, seq1, offset, INDEL_GAP, 10); af_offset[2] = 5 + offset[0]; af_offset[1] = 7 - (5 + offset[0]); } else if (af_offset[2] == 8) { err = verifySingleEndCG(genLoc + KEY_LENGTH - 1 + INITIAL_GAP12 + 1, 15, seq1, offset, INDEL_GAP - 1, 10); af_offset[2] = 6 + offset[0]; af_offset[1] = 8 - (6 + offset[0]); } else if (af_offset[2] == 9) { err = verifySingleEndCG(genLoc + KEY_LENGTH - 1 + INITIAL_GAP12 + 2, 15, seq1, offset, INDEL_GAP - 2, 10); af_offset[2] = 7 + offset[0]; af_offset[1] = 9 - (7 + offset[0]); } if (err > errThreshold) { return -1; } af_pass[0] = 1; af_pass[1] = 0; af_pass[2] = 1; err += verifySingleEndCG(genLoc - KEY_LENGTH0 - 1 - INITIAL_GAP01, 0, seq1, offset, INDEL_GAP, 5); af_offset[0] = INITIAL_GAP01 - offset[0]; genLoc = genLoc - KEY_LENGTH0 - 1 - af_offset[0]; } } } else if (key_input[index].key_number == 1) { target = genLoc + KEY_LENGTH + INITIAL_GAP23; test = searchKeyCG(target, key_input[2].key_entry, key_input[2].key_entry_size, EXPECTED_GAP); if (test != -1) { af_pass[0] = 0; af_pass[1] = 1; af_pass[2] = 1; af_offset[2] = test - target + INITIAL_GAP23; for (x = 0; x < INDEL_GAP; x++) { minErr = verifySingleEndCG1(genLoc - 13, seq1 + 5, offset, x, 10); // gap12 0 ~ 2 --> 2 tmp_offset[1] = 2 - x; if (minErr <= errThreshold) { minErr += verifySingleEndCG(genLoc - 15 - tmp_offset[1], 0, seq1, offset, INDEL_GAP, 5); tmp_offset[0] = -1 - offset[0]; } if (err > minErr || err < 0) { err = minErr; minErrLoc = genLoc - (KEY_LENGTH0 + KEY_LENGTH)- 1 - tmp_offset[0] - tmp_offset[1]; af_offset[0] = tmp_offset[0]; af_offset[1] = tmp_offset[1]; } } genLoc = minErrLoc; } } *locs = genLoc; return err; } int verifySingleEndSeqCG_backward(int * locs, int * af_offset, char * seq1, int * af_pass, key_struct* key_input, int index) { int err = -1; int test = 0; int target = 0; int x = 0; int minErr = 0; int tmp_offset[3] = {0, 0, 0}; int offset[1] = {0}; int genLoc = *locs; if (key_input[index].key_number == 0) { target = genLoc + KEY_LENGTH + INITIAL_GAP23; test = searchKeyCG(target, key_input[1].key_entry, key_input[1].key_entry_size, EXPECTED_GAP); if (test != -1) { af_pass[0] = 1; af_pass[1] = 1; af_pass[2] = 0; af_offset[0] = test - target + INITIAL_GAP23; for (x = 0; x <INDEL_GAP; x++) { minErr = verifySingleEndCG1(genLoc + 20 + af_offset[0] - 1, seq1 + 20, offset, x, 10); tmp_offset[1] = x; if (minErr <= errThreshold) { minErr += verifySingleEndCG(genLoc + 30 + af_offset[0] + tmp_offset[1] - 4, 30, seq1, offset, INDEL_GAP, 5); tmp_offset[2] = offset[0] - 3; } if (err > minErr || err < 0) { err = minErr; af_offset[1] = tmp_offset[1]; af_offset[2] = tmp_offset[2]; } } } else { target = genLoc + KEY_LENGTH*2 + INITIAL_GAP12 + INITIAL_GAP23; test = searchKeyCG(target, key_input[2].key_entry, key_input[2].key_entry_size, EXPECTED_GAP*2); if (test != -1) { af_offset[0] = test - target + INITIAL_GAP23; if (af_offset[0] == 5) { err = verifySingleEndCG(genLoc + KEY_LENGTH + 5 - 1, 10, seq1, offset, INDEL_GAP - 2, 10); af_offset[0] = 5 + offset[0]; af_offset[1] = 5 - (5 + offset[0]); } else if (af_offset[0] == 6) { err = verifySingleEndCG(genLoc + KEY_LENGTH + 5 - 1, 10, seq1, offset, INDEL_GAP - 1, 10); af_offset[0] = 5 + offset[0]; af_offset[1] = 6 - (5 + offset[0]); } else if (af_offset[0] == 7) { err = verifySingleEndCG(genLoc + KEY_LENGTH + 5 - 1, 10, seq1, offset, INDEL_GAP, 10); af_offset[0] = 5 + offset[0]; af_offset[1] = 7 - (5 + offset[0]); } else if (af_offset[0] == 8) { err = verifySingleEndCG(genLoc + KEY_LENGTH + 6 - 1, 10, seq1, offset, INDEL_GAP - 1, 10); af_offset[0] = 6 + offset[0]; af_offset[1] = 8 - (6 + offset[0]); } else if (af_offset[0] == 9) { err = verifySingleEndCG(genLoc + KEY_LENGTH + 7 - 1, 10, seq1, offset, INDEL_GAP - 2, 10); af_offset[0] = 7 + offset[0]; af_offset[1] = 9 - (7 + offset[0]); } if (err > errThreshold) { return -1; } af_pass[0] = 1; af_pass[1] = 0; af_pass[2] = 1; err += verifySingleEndCG(genLoc+30+af_offset[0]+af_offset[1] - 4, 30, seq1, offset, INDEL_GAP, 5); af_offset[2] = offset[0] - 3; } } genLoc = genLoc - 1 ; } else if (key_input[index].key_number == 1) { target = genLoc + KEY_LENGTH; test = searchKeyCG(target, key_input[2].key_entry, key_input[2].key_entry_size, EXPECTED_GAP); if (test != -1) { err = verifySingleEndCG(genLoc - 10 - 1 - 7, 0, seq1, offset, INDEL_GAP, 10); if (err > errThreshold) { return -1; } af_pass[0] = 0; af_pass[1] = 1; af_pass[2] = 1; af_offset[1] = test - target; af_offset[0] = 7 - offset[0]; err += verifySingleEndCG(genLoc + 20 + af_offset[1] - 4, 30, seq1, offset, INDEL_GAP, 5); af_offset[2] = offset[0] - 3; genLoc = genLoc - KEY_LENGTH - af_offset[0] - 1; } } *locs = genLoc; return err; } // sirfast: mapSingleEndSeqCG_forward // first_mate 0 or 1, 0 the read is the first part and 1 is the second part. void mapSingleEndSeqCG_forward(unsigned int *l1, int s1, int readNumber, int readSegment, int index, key_struct* key_input, int direction, int first_mate, int thread_id) { char matrix[MAX_CIGAR_SIZE]; char editString[MAX_CIGAR_SIZE]; char cigar[MAX_CIGAR_SIZE]; int readId = 2*readNumber+direction; char *_tmpSeq; char *_tmpQual; char rqual[SEQ_LENGTH+1]; SAM _msf_output; OPT_FIELDS _msf_optionalFields[2]; rqual[SEQ_LENGTH]='\0'; int i = 0; if (direction) { reverse(_msf_seqList[readNumber].qual, rqual, SEQ_LENGTH); _tmpQual = rqual; _tmpSeq = _msf_seqList[readNumber].rseq; } else { _tmpQual = _msf_seqList[readNumber].qual; _tmpSeq = _msf_seqList[readNumber].seq; } int j = 0; int genLoc = 0; int *locs = (int *) l1; for (j = 0; j < s1; j++) { genLoc = locs[j]; int af_pass[4]; int af_offset[4]; int err = -1; if (key_input[index].key_number == 0) { if (genLoc - 1 - 5 + 1 < _msf_refGenBeg || genLoc - 1 + 30 + 9 > _msf_refGenEnd || _msf_verifiedLocs[thread_id][genLoc - 5] == readId ) { continue; } } else if (key_input[index].key_number == 1) { if (genLoc - 1 - 15 - 1 < _msf_refGenBeg || genLoc - 1 + 20 + 7 > _msf_refGenEnd || _msf_verifiedLocs[thread_id][genLoc - 15] == readId ) { continue; } } err = verifySingleEndSeqCG_forward(&genLoc, af_offset, _tmpSeq, af_pass, key_input, index); if (err <= errThreshold && err >= 0) { generateAlignmentMatrxCG_forward(genLoc, af_offset, _tmpSeq, af_pass, err, matrix); generateSNPSAM(matrix, strlen(matrix), editString); sprintf(cigar, "5M%dS10M%dN10M%dN10M", -af_offset[0], af_offset[1], af_offset[2]); } else { err = -1; } if(err != -1 && !bestMode) { mappingCnt++; int offset_range = 3; for(i = -offset_range ; i <= offset_range ; i++) { if(genLoc + i >= _msf_refGenBeg && genLoc + i <= _msf_refGenEnd) { _msf_verifiedLocs[thread_id][genLoc + i] = readId; } } _msf_seqList[readNumber].hits[0]++; _msf_output.QNAME = _msf_seqList[readNumber].name; _msf_output.FLAG = 16 * direction; _msf_output.RNAME = _msf_refGenName; _msf_output.POS = genLoc + _msf_refGenOffset; _msf_output.MAPQ = 255; _msf_output.CIGAR = cigar; _msf_output.MRNAME = "*"; _msf_output.MPOS = 0; _msf_output.ISIZE = 0; _msf_output.SEQ = _tmpSeq; _msf_output.QUAL = _tmpQual; _msf_output.optSize = 2; _msf_output.optFields = _msf_optionalFields; _msf_optionalFields[0].tag = "NM"; _msf_optionalFields[0].type = 'i'; _msf_optionalFields[0].iVal = err; _msf_optionalFields[1].tag = "MD"; _msf_optionalFields[1].type = 'Z'; _msf_optionalFields[1].sVal = editString; output(_msf_output, thread_id); if (_msf_seqList[readNumber].hits[0] == 1) { mappedSeqCnt++; } if ( maxHits == 0 ) { _msf_seqList[readNumber].hits[0] = 2; } if ( maxHits!=0 && _msf_seqList[readNumber].hits[0] == maxHits) { completedSeqCnt++; break; } } } } // sirfast: mapSingleEndSeqCG_reverse // first_mate 0 or 1, 0 the read is the first part and 1 is the second part. void mapSingleEndSeqCG_reverse(unsigned int *l1, int s1, int readNumber, int readSegment, int index, key_struct* key_input, int direction, int first_mate, int thread_id) { char matrix[200]; char editString[200]; char cigar[MAX_CIGAR_SIZE]; int readId = 2 * readNumber + direction; char *_tmpSeq, *_tmpQual; char rqual[SEQ_LENGTH+1]; SAM _msf_output; OPT_FIELDS _msf_optionalFields[2]; rqual[SEQ_LENGTH]='\0'; int i = 0; if (direction) { reverse(_msf_seqList[readNumber].qual, rqual, SEQ_LENGTH); _tmpQual = rqual; _tmpSeq = _msf_seqList[readNumber].rseq; } else { _tmpQual = _msf_seqList[readNumber].qual; _tmpSeq = _msf_seqList[readNumber].seq; } int j = 0; int genLoc = 0; int *locs = (int *) l1; for (j = 0; j < s1; j++) { genLoc = locs[j]; int af_pass[4]; int af_offset[4]; int err = -1; if (key_input[index].key_number == 0) { if (genLoc - 1 < _msf_refGenBeg || genLoc - 1 + 35 + 8 > _msf_refGenEnd || _msf_verifiedLocs[thread_id][genLoc] == readId ) { continue; } } else if (key_input[index].key_number == 1) { if (genLoc - 1 - 10 - 7 < _msf_refGenBeg || genLoc - 1 + 25 - 1 > _msf_refGenEnd || _msf_verifiedLocs[thread_id][genLoc - 10 - 6] == readId ) { continue; } } err = verifySingleEndSeqCG_backward(&genLoc, af_offset, _tmpSeq, af_pass, key_input, index); if (err <= errThreshold && err >= 0) { generateAlignmentMatrxCG_backward(genLoc, af_offset, _tmpSeq, af_pass, err, matrix); generateSNPSAM(matrix, strlen(matrix), editString); sprintf(cigar, "10M%dN10M%dN10M%dS5M", af_offset[0], af_offset[1], -af_offset[2]); } else { err = -1; } if(err != -1 && !bestMode) { mappingCnt++; int offset_range = 3; for(i = -offset_range ; i <= offset_range ; i++) { if(genLoc + i >= _msf_refGenBeg && genLoc + i <= _msf_refGenEnd) { _msf_verifiedLocs[thread_id][genLoc + i] = readId; } } _msf_seqList[readNumber].hits[0]++; _msf_output.QNAME = _msf_seqList[readNumber].name; _msf_output.FLAG = 16 * direction; _msf_output.RNAME = _msf_refGenName; _msf_output.POS = genLoc + _msf_refGenOffset; _msf_output.MAPQ = 255; _msf_output.CIGAR = cigar; _msf_output.MRNAME = "*"; _msf_output.MPOS = 0; _msf_output.ISIZE = 0; _msf_output.SEQ = _tmpSeq; _msf_output.QUAL = _tmpQual; _msf_output.optSize = 2; _msf_output.optFields = _msf_optionalFields; _msf_optionalFields[0].tag = "NM"; _msf_optionalFields[0].type = 'i'; _msf_optionalFields[0].iVal = err; _msf_optionalFields[1].tag = "MD"; _msf_optionalFields[1].type = 'Z'; _msf_optionalFields[1].sVal = editString; output(_msf_output, thread_id); // single if (_msf_seqList[readNumber].hits[0] == 1) { mappedSeqCnt++; } if ( maxHits == 0 ) { _msf_seqList[readNumber].hits[0] = 2; } if ( maxHits!=0 && _msf_seqList[readNumber].hits[0] == maxHits) { completedSeqCnt++; break; } } } }
spmm.h
/*! * Copyright (c) 2020 by Contributors * \file array/cpu/spmm.h * \brief SPMM CPU kernel function header. */ #ifndef DGL_ARRAY_CPU_SPMM_H_ #define DGL_ARRAY_CPU_SPMM_H_ #include <dgl/array.h> #include <dgl/bcast.h> #include <limits> #include <algorithm> namespace dgl { namespace aten { namespace cpu { /*! * \brief CPU kernel of SpMM on Csr format. * \param bcast Broadcast information. * \param csr The Csr matrix. * \param ufeat The feature on source nodes. * \param efeat The feature on edges. * \param out The result feature on destination nodes. * \note it uses node parallel strategy, different threads are responsible * for the computation of different nodes. */ template <typename IdType, typename DType, typename Op> void SpMMSumCsr( const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out) { const bool has_idx = !IsNullArray(csr.data); const IdType* indptr = csr.indptr.Ptr<IdType>(); const IdType* indices = csr.indices.Ptr<IdType>(); const IdType* edges = csr.data.Ptr<IdType>(); const DType* X = ufeat.Ptr<DType>(); const DType* W = efeat.Ptr<DType>(); int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len, rhs_dim = bcast.rhs_len; DType* O = out.Ptr<DType>(); #pragma omp parallel for for (IdType rid = 0; rid < csr.num_rows; ++rid) { const IdType row_start = indptr[rid], row_end = indptr[rid + 1]; DType* out_off = O + rid * dim; for (int64_t k = 0; k < dim; ++k) { DType accum = 0; for (IdType j = row_start; j < row_end; ++j) { const IdType cid = indices[j]; const IdType eid = has_idx? edges[j] : j; const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k; const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k; const DType* lhs_off = Op::use_lhs? X + cid * lhs_dim + lhs_add : nullptr; const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr; accum += Op::Call(lhs_off, rhs_off); } out_off[k] = accum; } } } /*! * \brief CPU kernel of SpMM on Coo format. * \param bcast Broadcast information. * \param coo The Coo matrix. * \param ufeat The feature on source nodes. * \param efeat The feature on edges. * \param out The result feature on destination nodes. * \note it uses node parallel strategy, different threads are responsible * for the computation of different nodes. To avoid possible data hazard, * we use atomic operators in the reduction phase. */ template <typename IdType, typename DType, typename Op> void SpMMSumCoo( const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out) { const bool has_idx = !IsNullArray(coo.data); const IdType* row = coo.row.Ptr<IdType>(); const IdType* col = coo.col.Ptr<IdType>(); const IdType* edges = coo.data.Ptr<IdType>(); const DType* X = ufeat.Ptr<DType>(); const DType* W = efeat.Ptr<DType>(); int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len, rhs_dim = bcast.rhs_len; DType* O = out.Ptr<DType>(); const int64_t nnz = coo.row->shape[0]; // fill zero elements memset(O, 0, out.GetSize()); // spmm #pragma omp parallel for for (IdType i = 0; i < nnz; ++i) { const IdType rid = row[i]; const IdType cid = col[i]; const IdType eid = has_idx? edges[i] : i; DType* out_off = O + cid * dim; for (int64_t k = 0; k < dim; ++k) { const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k; const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k; const DType* lhs_off = Op::use_lhs? X + rid * lhs_dim + lhs_add : nullptr; const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr; const DType val = Op::Call(lhs_off, rhs_off); #pragma omp atomic out_off[k] += val; } } } /*! * \brief CPU kernel of SpMM-Min/Max on Csr format. * \param bcast Broadcast information. * \param csr The Csr matrix. * \param ufeat The feature on source nodes. * \param efeat The feature on edges. * \param out The result feature on destination nodes. * \param argu Arg-Min/Max on source nodes, which refers the source node indices * correspond to the minimum/maximum values of reduction result on * destination nodes. It's useful in computing gradients of Min/Max reducer. * \param arge Arg-Min/Max on edges. which refers the source node indices * correspond to the minimum/maximum values of reduction result on * destination nodes. It's useful in computing gradients of Min/Max reducer. * \note it uses node parallel strategy, different threads are responsible * for the computation of different nodes. */ template <typename IdType, typename DType, typename Op, typename Cmp> void SpMMCmpCsr( const BcastOff& bcast, const CSRMatrix& csr, NDArray ufeat, NDArray efeat, NDArray out, NDArray argu, NDArray arge) { const bool has_idx = !IsNullArray(csr.data); const IdType* indptr = static_cast<IdType*>(csr.indptr->data); const IdType* indices = static_cast<IdType*>(csr.indices->data); const IdType* edges = has_idx ? static_cast<IdType*>(csr.data->data) : nullptr; const DType* X = Op::use_lhs? static_cast<DType*>(ufeat->data) : nullptr; const DType* W = Op::use_rhs? static_cast<DType*>(efeat->data) : nullptr; const int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len, rhs_dim = bcast.rhs_len; DType* O = static_cast<DType*>(out->data); IdType* argX = Op::use_lhs? static_cast<IdType*>(argu->data) : nullptr; IdType* argW = Op::use_rhs? static_cast<IdType*>(arge->data) : nullptr; #pragma omp parallel for for (IdType rid = 0; rid < csr.num_rows; ++rid) { const IdType row_start = indptr[rid], row_end = indptr[rid + 1]; DType* out_off = O + rid * dim; IdType* argx_off = argX + rid * dim; IdType* argw_off = argW + rid * dim; for (int64_t k = 0; k < dim; ++k) { DType accum = Cmp::zero; IdType ax = 0, aw = 0; for (IdType j = row_start; j < row_end; ++j) { const IdType cid = indices[j]; const IdType eid = has_idx? edges[j] : j; const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k; const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k; const DType* lhs_off = Op::use_lhs? X + cid * lhs_dim + lhs_add : nullptr; const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr; const DType val = Op::Call(lhs_off, rhs_off); if (Cmp::Call(accum, val)) { accum = val; if (Op::use_lhs) ax = cid; if (Op::use_rhs) aw = eid; } } out_off[k] = accum; if (Op::use_lhs) argx_off[k] = ax; if (Op::use_rhs) argw_off[k] = aw; } } } /*! * \brief CPU kernel of SpMM-Min/Max on Coo format. * \param bcast Broadcast information. * \param coo The Coo matrix. * \param ufeat The feature on source nodes. * \param efeat The feature on edges. * \param out The result feature on destination nodes. * \param argu Arg-Min/Max on source nodes, which refers the source node indices * correspond to the minimum/maximum values of reduction result on * destination nodes. It's useful in computing gradients of Min/Max reducer. * \param arge Arg-Min/Max on edges. which refers the source node indices * correspond to the minimum/maximum values of reduction result on * destination nodes. It's useful in computing gradients of Min/Max reducer. * \note it uses node parallel strategy, different threads are responsible * for the computation of different nodes. To avoid possible data hazard, * we use atomic operators in the reduction phase. */ template <typename IdType, typename DType, typename Op, typename Cmp> void SpMMCmpCoo( const BcastOff& bcast, const COOMatrix& coo, NDArray ufeat, NDArray efeat, NDArray out, NDArray argu, NDArray arge) { const bool has_idx = !IsNullArray(coo.data); const IdType* row = static_cast<IdType*>(coo.row->data); const IdType* col = static_cast<IdType*>(coo.col->data); const IdType* edges = has_idx? static_cast<IdType*>(coo.data->data) : nullptr; const DType* X = Op::use_lhs? static_cast<DType*>(ufeat->data) : nullptr; const DType* W = Op::use_rhs? static_cast<DType*>(efeat->data) : nullptr; const int64_t dim = bcast.out_len, lhs_dim = bcast.lhs_len, rhs_dim = bcast.rhs_len; DType* O = static_cast<DType*>(out->data); IdType* argX = Op::use_lhs? static_cast<IdType*>(argu->data) : nullptr; IdType* argW = Op::use_rhs? static_cast<IdType*>(arge->data) : nullptr; const int64_t nnz = coo.row->shape[0]; // fill zero elements std::fill(O, O + out.NumElements(), Cmp::zero); // spmm #pragma omp parallel for for (IdType i = 0; i < nnz; ++i) { const IdType rid = row[i]; const IdType cid = col[i]; const IdType eid = has_idx? edges[i] : i; DType* out_off = O + cid * dim; IdType* argx_off = Op::use_lhs? argX + cid * dim : nullptr; IdType* argw_off = Op::use_rhs? argW + cid * dim : nullptr; for (int64_t k = 0; k < dim; ++k) { const int64_t lhs_add = bcast.use_bcast ? bcast.lhs_offset[k] : k; const int64_t rhs_add = bcast.use_bcast ? bcast.rhs_offset[k] : k; const DType* lhs_off = Op::use_lhs? X + rid * lhs_dim + lhs_add : nullptr; const DType* rhs_off = Op::use_rhs? W + eid * rhs_dim + rhs_add : nullptr; const DType val = Op::Call(lhs_off, rhs_off); #pragma omp critical if (Cmp::Call(out_off[k], val)) { out_off[k] = val; if (Op::use_lhs) argx_off[k] = rid; if (Op::use_rhs) argw_off[k] = eid; } } } } namespace op { //////////////////////////////// binary operators on CPU //////////////////////////////// template <typename DType> struct Add { static constexpr bool use_lhs = true; static constexpr bool use_rhs = true; inline static DType Call(const DType* lhs_off, const DType* rhs_off) { return *lhs_off + *rhs_off; } }; template <typename DType> constexpr bool Add<DType>::use_lhs; template <typename DType> constexpr bool Add<DType>::use_rhs; template <typename DType> struct Sub { static constexpr bool use_lhs = true; static constexpr bool use_rhs = true; inline static DType Call(const DType* lhs_off, const DType* rhs_off) { return *lhs_off - *rhs_off; } }; template <typename DType> constexpr bool Sub<DType>::use_lhs; template <typename DType> constexpr bool Sub<DType>::use_rhs; template <typename DType> struct Mul { static constexpr bool use_lhs = true; static constexpr bool use_rhs = true; inline static DType Call(const DType* lhs_off, const DType* rhs_off) { return *lhs_off * *rhs_off; } }; template <typename DType> constexpr bool Mul<DType>::use_lhs; template <typename DType> constexpr bool Mul<DType>::use_rhs; template <typename DType> struct Div { static constexpr bool use_lhs = true; static constexpr bool use_rhs = true; inline static DType Call(const DType* lhs_off, const DType* rhs_off) { return *lhs_off / *rhs_off; } }; template <typename DType> constexpr bool Div<DType>::use_lhs; template <typename DType> constexpr bool Div<DType>::use_rhs; template <typename DType> struct CopyLhs { static constexpr bool use_lhs = true; static constexpr bool use_rhs = false; inline static DType Call(const DType* lhs_off, const DType* ) { return *lhs_off; } }; template <typename DType> constexpr bool CopyLhs<DType>::use_lhs; template <typename DType> constexpr bool CopyLhs<DType>::use_rhs; template <typename DType> struct CopyRhs { static constexpr bool use_lhs = false; static constexpr bool use_rhs = true; inline static DType Call(const DType* , const DType* rhs_off) { return *rhs_off; } }; template <typename DType> constexpr bool CopyRhs<DType>::use_lhs; template <typename DType> constexpr bool CopyRhs<DType>::use_rhs; //////////////////////////////// Reduce operators on CPU //////////////////////////////// template <typename DType> struct Max { static constexpr DType zero = std::numeric_limits<DType>::lowest(); // return true if accum should be replaced inline static DType Call(DType accum, DType val) { return accum < val; } }; template <typename DType> constexpr DType Max<DType>::zero; template <typename DType> struct Min { static constexpr DType zero = std::numeric_limits<DType>::max(); // return true if accum should be replaced inline static DType Call(DType accum, DType val) { return accum > val; } }; template <typename DType> constexpr DType Min<DType>::zero; #define SWITCH_OP(op, Op, ...) \ do { \ if ((op) == "add") { \ typedef dgl::aten::cpu::op::Add<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "sub") { \ typedef dgl::aten::cpu::op::Sub<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "mul") { \ typedef dgl::aten::cpu::op::Mul<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "div") { \ typedef dgl::aten::cpu::op::Div<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "copy_lhs") { \ typedef dgl::aten::cpu::op::CopyLhs<DType> Op; \ { __VA_ARGS__ } \ } else if ((op) == "copy_rhs") { \ typedef dgl::aten::cpu::op::CopyRhs<DType> Op; \ { __VA_ARGS__ } \ } else { \ LOG(FATAL) << "Unsupported SpMM binary operator: " << op; \ } \ } while (0) } // namespace op } // namespace cpu } // namespace aten } // namespace dgl #endif // DGL_ARRAY_CPU_SPMM_H_
deconvolution_packnto1_fp16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_packnto1_fp16s_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat32m2_t _sum = vfmv_v_f_f32m2(0.f, vl); const __fp16* kptr = (const __fp16*)weight_data_fp16 + maxk * channels * p * packn; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const __fp16* sptr = m.row<const __fp16>(sy) + sx * packn; int k = y * kernel_w + x; vfloat16m1_t _val = vle16_v_f16m1(sptr, vl); vfloat16m1_t _w = vle16_v_f16m1(kptr + k * packn, vl); _sum = vfwmacc_vv_f32m2(_sum, _val, _w, vl); } } kptr += maxk * packn; } #ifdef RVV_SPEC_0_7 // TODO std::vector<float> ss(packn); vse32_v_f32m2((float*)ss.data(), _sum, vl); for (int i = 0; i < packn; i++) { sum += ss[i]; } #else sum = vfmv_f_s_f32m1_f32(vfredsum_vs_f32m2_f32m1(vfloat32m1_t(), _sum, vfmv_s_f_f32m1(vfloat32m1_t(), sum, vl), vl)); #endif sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } } static void deconvolution_packnto1_fp16sa_rvv(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_fp16, const Mat& bias_data_fp16, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { const int packn = csrr_vlenb() / 2; const word_type vl = vsetvl_e16m1(packn); int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const __fp16* bias_data_ptr = bias_data_fp16; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { __fp16* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { __fp16 sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } vfloat16m1_t _sum = vfmv_v_f_f16m1(0.f, vl); const __fp16* kptr = (const __fp16*)weight_data_fp16 + maxk * channels * p * packn; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const __fp16* sptr = m.row<const __fp16>(sy) + sx * packn; int k = y * kernel_w + x; vfloat16m1_t _val = vle16_v_f16m1(sptr, vl); vfloat16m1_t _w = vle16_v_f16m1(kptr + k * packn, vl); _sum = vfmacc_vv_f16m1(_sum, _val, _w, vl); } } kptr += maxk * packn; } sum = vfmv_f_s_f16m1_f16(vfredsum_vs_f16m1_f16m1(vfloat16m1_t(), _sum, vfmv_s_f_f16m1(vfloat16m1_t(), sum, vl), vl)); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }
filter_range.h
// MIT License // Copyright (c) 2019 Edward Liu // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #pragma once #include <memory> #include "pre_processors/filter_interface.h" namespace static_map { namespace pre_processers { namespace filter { template <typename PointT> class Range : public Interface<PointT> { public: USE_POINTCLOUD; Range() : Interface<PointT>(), min_range_(0.), max_range_(100.) { // float params INIT_INNER_PARAM(Interface<PointT>::kFloatParam, 0, "min_range", min_range_); INIT_INNER_PARAM(Interface<PointT>::kFloatParam, 1, "max_range", max_range_); } ~Range() {} Range(const Range &) = delete; Range &operator=(const Range &) = delete; std::shared_ptr<Interface<PointT>> CreateNewInstance() override { return std::make_shared<Range<PointT>>(); } void Filter(const PointCloudPtr &cloud) override { if (!cloud || !Interface<PointT>::inner_cloud_) { LOG(WARNING) << "nullptr cloud, do nothing!" << std::endl; return; } this->FilterPrepare(cloud); const int size = this->inner_cloud_->size(); bool is_inlier[size]; #ifdef _OPENMP #pragma omp parallel for num_threads(LOCAL_OMP_THREADS_NUM) #endif for (int i = 0; i < size; ++i) { auto &point = this->inner_cloud_->points[i]; float range = std::sqrt(point.x * point.x + point.y * point.y + point.z * point.z); if (range >= min_range_ && range <= max_range_) { is_inlier[i] = true; } else { is_inlier[i] = false; } } // first, reserve // then, push_back // finally, shrink to fit // to get best efficiency and space usage this->inliers_.reserve(size); this->outliers_.reserve(size); cloud->points.reserve(size); for (int i = 0; i < size; ++i) { if (is_inlier[i]) { this->inliers_.push_back(i); cloud->push_back(this->inner_cloud_->points[i]); } else { this->outliers_.push_back(i); } } this->inliers_.shrink_to_fit(); this->outliers_.shrink_to_fit(); cloud->points.shrink_to_fit(); } void DisplayAllParams() override { PARAM_INFO(min_range_); PARAM_INFO(max_range_); } private: float min_range_; float max_range_; }; } // namespace filter } // namespace pre_processers } // namespace static_map
jump-openmp.c
/* { dg-do compile } */ /* { dg-options "-fcilkplus -fopenmp" } */ /* { dg-require-effective-target fopenmp } */ int *a, *b, c; void foo() { #pragma simd for (int i=0; i < 1000; ++i) { a[i] = b[i]; if (c == 5) return; /* { dg-error "invalid branch to/from Cilk Plus structured block" } */ } } void bar() { #pragma simd for (int i=0; i < 1000; ++i) { lab: a[i] = b[i]; } if (c == 6) goto lab; /* { dg-error "invalid entry to Cilk Plus structured block" } */ } void baz() { bad1: #pragma omp parallel goto bad1; /* { dg-error "invalid branch to/from OpenMP structured block" } */ goto bad2; /* { dg-error "invalid entry to OpenMP structured block" } */ #pragma omp parallel { bad2: ; } #pragma omp parallel { int i; goto ok1; for (i = 0; i < 10; ++i) { ok1: break; } } }
fill_nr_3c.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <[email protected]> */ #include <stdlib.h> #include <stdio.h> #include "config.h" #include "cint.h" #include "np_helper/np_helper.h" #define BLKSIZE 8 int GTOmax_shell_dim(int *ao_loc, int *shls_slice, int ncenter); int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); /* * out[naoi,naoj,naok,comp] in F-order */ void GTOnr3c_fill_s1(int (*intor)(), double *out, double *buf, int comp, int jobid, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int nksh = ksh1 - ksh0; const int ksh = jobid % nksh + ksh0; const int jstart = jobid / nksh * BLKSIZE + jsh0; const int jend = MIN(jstart + BLKSIZE, jsh1); if (jstart >= jend) { return; } const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const int dims[] = {naoi, naoj, naok}; const int k0 = ao_loc[ksh] - ao_loc[ksh0]; out += naoi * naoj * k0; int ish, jsh, i0, j0; int shls[3] = {0, 0, ksh}; for (jsh = jstart; jsh < jend; jsh++) { for (ish = ish0; ish < ish1; ish++) { shls[0] = ish; shls[1] = jsh; i0 = ao_loc[ish] - ao_loc[ish0]; j0 = ao_loc[jsh] - ao_loc[jsh0]; (*intor)(out+j0*naoi+i0, dims, shls, atm, natm, bas, nbas, env, cintopt, buf); } } } static void dcopy_s2_igtj(double *out, double *in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j < dj; j++) { pout[j] = pin[j*di+i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } static void dcopy_s2_ieqj(double *out, double *in, int comp, int ip, int nij, int nijk, int di, int dj, int dk) { const size_t dij = di * dj; const size_t ip1 = ip + 1; int i, j, k, ic; double *pout, *pin; for (ic = 0; ic < comp; ic++) { for (k = 0; k < dk; k++) { pout = out + k * nij; pin = in + k * dij; for (i = 0; i < di; i++) { for (j = 0; j <= i; j++) { pout[j] = pin[j*di+i]; } pout += ip1 + i; } } out += nijk; in += dij * dk; } } /* * out[comp,naok,nij] in C-order * nij = i1*(i1+1)/2 - i0*(i0+1)/2 * [ \ ] * [**** ] * [***** ] * [*****. ] <= . may not be filled, if jsh-upper-bound < ish-upper-bound * [ \] */ void GTOnr3c_fill_s2ij(int (*intor)(), double *out, double *buf, int comp, int jobid, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int nksh = ksh1 - ksh0; const int ksh = jobid % nksh + ksh0; const int istart = jobid / nksh * BLKSIZE + ish0; const int iend = MIN(istart + BLKSIZE, ish1); if (istart >= iend) { return; } const int i0 = ao_loc[ish0]; const int i1 = ao_loc[ish1]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t off = i0 * (i0 + 1) / 2; const size_t nij = i1 * (i1 + 1) / 2 - off; const size_t nijk = nij * naok; const int dk = ao_loc[ksh+1] - ao_loc[ksh]; const int k0 = ao_loc[ksh] - ao_loc[ksh0]; out += nij * k0; int ish, jsh, ip, jp, di, dj; int shls[3] = {0, 0, ksh}; di = GTOmax_shell_dim(ao_loc, shls_slice, 2); double *cache = buf + di * di * dk * comp; double *pout; for (ish = istart; ish < iend; ish++) { for (jsh = jsh0; jsh < jsh1; jsh++) { ip = ao_loc[ish]; jp = ao_loc[jsh] - ao_loc[jsh0]; if (ip < jp) { continue; } shls[0] = ish; shls[1] = jsh; di = ao_loc[ish+1] - ao_loc[ish]; dj = ao_loc[jsh+1] - ao_loc[jsh]; (*intor)(buf, NULL, shls, atm, natm, bas, nbas, env, cintopt, cache); pout = out + ip * (ip + 1) / 2 - off + jp; if (ip != jp) { dcopy_s2_igtj(pout, buf, comp, ip, nij, nijk, di, dj, dk); } else { dcopy_s2_ieqj(pout, buf, comp, ip, nij, nijk, di, dj, dk); } } } } void GTOnr3c_fill_s2jk(int (*intor)(), double *out, double *buf, int comp, int jobid, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { fprintf(stderr, "GTOnr3c_fill_s2jk not implemented\n"); exit(1); } void GTOnr3c_drv(int (*intor)(), void (*fill)(), double *eri, int comp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const int nksh = ksh1 - ksh0; const int di = GTOmax_shell_dim(ao_loc, shls_slice, 3); const int cache_size = GTOmax_cache_size(intor, shls_slice, 3, atm, natm, bas, nbas, env); const int njobs = (MAX(nish,njsh) / BLKSIZE + 1) * nksh; #pragma omp parallel { int jobid; double *buf = malloc(sizeof(double) * (di*di*di*comp + cache_size)); #pragma omp for nowait schedule(dynamic) for (jobid = 0; jobid < njobs; jobid++) { (*fill)(intor, eri, buf, comp, jobid, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); } }
conv_dw_kernel_x86.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2021, OPEN AI LAB * Author: [email protected] */ #include "conv_dw_kernel_x86.h" #include "graph/tensor.h" #include "graph/node.h" #include "graph/graph.h" #include "utility/sys_port.h" #include "utility/float.h" #include "utility/log.h" #include "device/cpu/cpu_node.h" #include "device/cpu/cpu_graph.h" #include "device/cpu/cpu_module.h" #include <stdint.h> #include <stdlib.h> #include <string.h> #include <math.h> #if __SSE2__ #include <emmintrin.h> #endif #if __AVX__ #include <immintrin.h> #endif #define max(a, b) ((a) > (b) ? (a) : (b)) #define min(a, b) ((a) < (b) ? (a) : (b)) static void relu(float* data, int size, int activation) { for (int i = 0; i < size; i++) { data[i] = max(data[i], ( float )0); if (activation > 0) { data[i] = min(data[i], ( float )activation); } } } static void pad(float* input, float* output, int in_h, int in_w, int out_h, int out_w, int top, int left, float v) { float* ptr = input; float* outptr = output; int y = 0; // fill top for (; y < top; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } // fill center for (; y < (top + in_h); y++) { int x = 0; for (; x < left; x++) { outptr[x] = v; } if (in_w < 12) { for (; x < (left + in_w); x++) { outptr[x] = ptr[x - left]; } } else { memcpy(outptr + left, ptr, in_w * sizeof(float)); x += in_w; } for (; x < out_w; x++) { outptr[x] = v; } ptr += in_w; outptr += out_w; } // fill bottom for (; y < out_h; y++) { int x = 0; for (; x < out_w; x++) { outptr[x] = v; } outptr += out_w; } } #if __AVX__ static void convdw3x3s1(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 3; int channel_remain = inc - (channel_count << 3); // generate the image tmp float* img_tmp = ( float* )sys_malloc(8 * (unsigned long)inwh * (channel_count + 1) * sizeof(float)); float* kernel_tmp = ( float* )sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float)); float* bias_tmp = ( float* )sys_malloc(8 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 8; const float* k0 = img_data + (ii + 0) * inwh; const float* k1 = img_data + (ii + 1) * inwh; const float* k2 = img_data + (ii + 2) * inwh; const float* k3 = img_data + (ii + 3) * inwh; const float* k4 = img_data + (ii + 4) * inwh; const float* k5 = img_data + (ii + 5) * inwh; const float* k6 = img_data + (ii + 6) * inwh; const float* k7 = img_data + (ii + 7) * inwh; const float* f0 = kernel_data + (ii + 0) * 9; const float* f1 = kernel_data + (ii + 1) * 9; const float* f2 = kernel_data + (ii + 2) * 9; const float* f3 = kernel_data + (ii + 3) * 9; const float* f4 = kernel_data + (ii + 4) * 9; const float* f5 = kernel_data + (ii + 5) * 9; const float* f6 = kernel_data + (ii + 6) * 9; const float* f7 = kernel_data + (ii + 7) * 9; const float* b0 = bias_data + (ii + 0); const float* b1 = bias_data + (ii + 1); const float* b2 = bias_data + (ii + 2); const float* b3 = bias_data + (ii + 3); const float* b4 = bias_data + (ii + 4); const float* b5 = bias_data + (ii + 5); const float* b6 = bias_data + (ii + 6); const float* b7 = bias_data + (ii + 7); float* tmp0 = img_tmp + ii * inwh; float* tmp1 = kernel_tmp + ii * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0[4] = k4[0]; tmp0[5] = k5[0]; tmp0[6] = k6[0]; tmp0[7] = k7[0]; tmp0 += 8; k0++; k1++; k2++; k3++; k4++; k5++; k6++; k7++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1[4] = f4[0]; tmp1[5] = f5[0]; tmp1[6] = f6[0]; tmp1[7] = f7[0]; tmp1 += 8; f0++; f1++; f2++; f3++; f4++; f5++; f6++; f7++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; tmp2[4] = b4[0]; tmp2[5] = b5[0]; tmp2[6] = b6[0]; tmp2[7] = b7[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; tmp2[4] = 0; tmp2[5] = 0; tmp2[6] = 0; tmp2[7] = 0; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float* k0 = img_data + (ii + 0) * inwh; float* k1 = img_data + (ii + 1) * inwh; float* k2 = img_data + (ii + 2) * inwh; float* k3 = img_data + (ii + 3) * inwh; float* f0 = kernel_data + (ii + 0) * 9; float* f1 = kernel_data + (ii + 1) * 9; float* f2 = kernel_data + (ii + 2) * 9; float* f3 = kernel_data + (ii + 3) * 9; float* b0 = bias_data + (ii + 0); float* b1 = bias_data + (ii + 1); float* b2 = bias_data + (ii + 2); float* b3 = bias_data + (ii + 3); float* tmp0 = img_tmp + channel_count * 8 * inwh; float* tmp1 = kernel_tmp + channel_count * 8 * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 8; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 8; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float* k0 = img_data + ii * inwh; float* f0 = kernel_data + ii * 9; float* b0 = bias_data + ii; float* tmp0 = img_tmp + channel_count * 8 * inwh; float* tmp1 = kernel_tmp + channel_count * 8 * 9; float* tmp2 = bias_tmp + channel_count * 8; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 8; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 8; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float* output_tmp = ( float* )sys_malloc((unsigned long)outwh * (channel_count + 1) * 8 * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float* ktmp = kernel_tmp + c * 8 * 9; float* btmp = bias_tmp + c * 8; for (int i = 0; i < outh; i++) { int j = 0; float* itmp0 = img_tmp + c * 8 * inwh + 8 * i * inw; float* itmp1 = img_tmp + c * 8 * inwh + 8 * (i + 1) * inw; float* itmp2 = img_tmp + c * 8 * inwh + 8 * (i + 2) * inw; float* otmp = output_tmp + c * 8 * outwh + 8 * i * outw; for (; j + 7 < outw; j += 8) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _sum4 = _mm256_loadu_ps(btmp); __m256 _sum5 = _mm256_loadu_ps(btmp); __m256 _sum6 = _mm256_loadu_ps(btmp); __m256 _sum7 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _va6 = _mm256_loadu_ps(itmp0 + 48); __m256 _va7 = _mm256_loadu_ps(itmp0 + 56); __m256 _va8 = _mm256_loadu_ps(itmp0 + 64); __m256 _va9 = _mm256_loadu_ps(itmp0 + 72); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _va6 = _mm256_loadu_ps(itmp1 + 48); _va7 = _mm256_loadu_ps(itmp1 + 56); _va8 = _mm256_loadu_ps(itmp1 + 64); _va9 = _mm256_loadu_ps(itmp1 + 72); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _va6 = _mm256_loadu_ps(itmp2 + 48); _va7 = _mm256_loadu_ps(itmp2 + 56); _va8 = _mm256_loadu_ps(itmp2 + 64); _va9 = _mm256_loadu_ps(itmp2 + 72); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum4 = _mm256_fmadd_ps(_va4, _vb0, _sum4); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _sum5 = _mm256_fmadd_ps(_va5, _vb0, _sum5); _sum4 = _mm256_fmadd_ps(_va5, _vb1, _sum4); _sum5 = _mm256_fmadd_ps(_va6, _vb1, _sum5); _sum4 = _mm256_fmadd_ps(_va6, _vb2, _sum4); _sum6 = _mm256_fmadd_ps(_va6, _vb0, _sum6); _sum7 = _mm256_fmadd_ps(_va7, _vb0, _sum7); _sum5 = _mm256_fmadd_ps(_va7, _vb2, _sum5); _sum6 = _mm256_fmadd_ps(_va7, _vb1, _sum6); _sum7 = _mm256_fmadd_ps(_va8, _vb1, _sum7); _sum6 = _mm256_fmadd_ps(_va8, _vb2, _sum6); _sum7 = _mm256_fmadd_ps(_va9, _vb2, _sum7); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); _mm256_storeu_ps(otmp + 32, _sum4); _mm256_storeu_ps(otmp + 40, _sum5); _mm256_storeu_ps(otmp + 48, _sum6); _mm256_storeu_ps(otmp + 56, _sum7); itmp0 += 64; itmp1 += 64; itmp2 += 64; otmp += 64; } for (; j + 3 < outw; j += 4) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum2 = _mm256_fmadd_ps(_va2, _vb0, _sum2); _sum3 = _mm256_fmadd_ps(_va3, _vb0, _sum3); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va3, _vb1, _sum2); _sum3 = _mm256_fmadd_ps(_va4, _vb1, _sum3); _sum2 = _mm256_fmadd_ps(_va4, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va5, _vb2, _sum3); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum1 = _mm256_fmadd_ps(_va1, _vb0, _sum1); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb1, _sum1); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va3, _vb2, _sum1); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 16; } for (; j < outw; j++) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _mm256_storeu_ps(otmp, _sum0); itmp0 += 8; itmp1 += 8; itmp2 += 8; otmp += 8; } } } // load_data { for (int i = 0; i < channel_count; i++) { float* otmp = output_tmp + i * 8 * outwh; float* tmp0 = output + i * 8 * outwh; float* tmp1 = output + i * 8 * outwh + 1 * outwh; float* tmp2 = output + i * 8 * outwh + 2 * outwh; float* tmp3 = output + i * 8 * outwh + 3 * outwh; float* tmp4 = output + i * 8 * outwh + 4 * outwh; float* tmp5 = output + i * 8 * outwh + 5 * outwh; float* tmp6 = output + i * 8 * outwh + 6 * outwh; float* tmp7 = output + i * 8 * outwh + 7 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; tmp4[0] = otmp[4]; tmp5[0] = otmp[5]; tmp6[0] = otmp[6]; tmp7[0] = otmp[7]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; tmp4++; tmp5++; tmp6++; tmp7++; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float* otmp = output_tmp + ii * outwh; float* tmp0 = output + ii * outwh; float* tmp1 = output + ii * outwh + 1 * outwh; float* tmp2 = output + ii * outwh + 2 * outwh; float* tmp3 = output + ii * outwh + 3 * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float* otmp = output_tmp + channel_count * 8 * outwh; float* tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 8; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } static void convdw3x3s2(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 3; int channel_remain = inc - (channel_count << 3); // generate the image tmp float* img_tmp = ( float* )sys_malloc(8 * (unsigned long)inwh * (channel_count + 1) * sizeof(float)); float* kernel_tmp = ( float* )sys_malloc(8 * 9 * (channel_count + 1) * sizeof(float)); float* bias_tmp = ( float* )sys_malloc(8 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 8; const float* k0 = img_data + (ii + 0) * inwh; const float* k1 = img_data + (ii + 1) * inwh; const float* k2 = img_data + (ii + 2) * inwh; const float* k3 = img_data + (ii + 3) * inwh; const float* k4 = img_data + (ii + 4) * inwh; const float* k5 = img_data + (ii + 5) * inwh; const float* k6 = img_data + (ii + 6) * inwh; const float* k7 = img_data + (ii + 7) * inwh; const float* f0 = kernel_data + (ii + 0) * 9; const float* f1 = kernel_data + (ii + 1) * 9; const float* f2 = kernel_data + (ii + 2) * 9; const float* f3 = kernel_data + (ii + 3) * 9; const float* f4 = kernel_data + (ii + 4) * 9; const float* f5 = kernel_data + (ii + 5) * 9; const float* f6 = kernel_data + (ii + 6) * 9; const float* f7 = kernel_data + (ii + 7) * 9; const float* b0 = bias_data + (ii + 0); const float* b1 = bias_data + (ii + 1); const float* b2 = bias_data + (ii + 2); const float* b3 = bias_data + (ii + 3); const float* b4 = bias_data + (ii + 4); const float* b5 = bias_data + (ii + 5); const float* b6 = bias_data + (ii + 6); const float* b7 = bias_data + (ii + 7); float* tmp0 = img_tmp + ii * inwh; float* tmp1 = kernel_tmp + ii * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0[4] = k4[0]; tmp0[5] = k5[0]; tmp0[6] = k6[0]; tmp0[7] = k7[0]; tmp0 += 8; k0++; k1++; k2++; k3++; k4++; k5++; k6++; k7++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1[4] = f4[0]; tmp1[5] = f5[0]; tmp1[6] = f6[0]; tmp1[7] = f7[0]; tmp1 += 8; f0++; f1++; f2++; f3++; f4++; f5++; f6++; f7++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; tmp2[4] = b4[0]; tmp2[5] = b5[0]; tmp2[6] = b6[0]; tmp2[7] = b7[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; tmp2[4] = 0; tmp2[5] = 0; tmp2[6] = 0; tmp2[7] = 0; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float* k0 = img_data + (ii + 0) * inwh; float* k1 = img_data + (ii + 1) * inwh; float* k2 = img_data + (ii + 2) * inwh; float* k3 = img_data + (ii + 3) * inwh; float* f0 = kernel_data + (ii + 0) * 9; float* f1 = kernel_data + (ii + 1) * 9; float* f2 = kernel_data + (ii + 2) * 9; float* f3 = kernel_data + (ii + 3) * 9; float* b0 = bias_data + (ii + 0); float* b1 = bias_data + (ii + 1); float* b2 = bias_data + (ii + 2); float* b3 = bias_data + (ii + 3); float* tmp0 = img_tmp + channel_count * 8 * inwh; float* tmp1 = kernel_tmp + channel_count * 8 * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 8; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 8; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float* k0 = img_data + ii * inwh; float* f0 = kernel_data + ii * 9; float* b0 = bias_data + ii; float* tmp0 = img_tmp + channel_count * 8 * inwh; float* tmp1 = kernel_tmp + channel_count * 8 * 9; float* tmp2 = bias_tmp + channel_count * 8; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 8; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 8; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float* output_tmp = ( float* )sys_malloc((unsigned long)outwh * (channel_count + 1) * 8 * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float* ktmp = kernel_tmp + c * 8 * 9; float* btmp = bias_tmp + c * 8; for (int i = 0; i < outh; i++) { int j = 0; float* itmp0 = img_tmp + c * 8 * inwh + 8 * i * 2 * inw; float* itmp1 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 1) * inw; float* itmp2 = img_tmp + c * 8 * inwh + 8 * (i * 2 + 2) * inw; float* otmp = output_tmp + c * 8 * outwh + 8 * i * outw; for (; j + 3 < outw; j += 4) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _sum2 = _mm256_loadu_ps(btmp); __m256 _sum3 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _va5 = _mm256_loadu_ps(itmp0 + 40); __m256 _va6 = _mm256_loadu_ps(itmp0 + 48); __m256 _va7 = _mm256_loadu_ps(itmp0 + 56); __m256 _va8 = _mm256_loadu_ps(itmp0 + 64); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _va5 = _mm256_loadu_ps(itmp1 + 40); _va6 = _mm256_loadu_ps(itmp1 + 48); _va7 = _mm256_loadu_ps(itmp1 + 56); _va8 = _mm256_loadu_ps(itmp1 + 64); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _va5 = _mm256_loadu_ps(itmp2 + 40); _va6 = _mm256_loadu_ps(itmp2 + 48); _va7 = _mm256_loadu_ps(itmp2 + 56); _va8 = _mm256_loadu_ps(itmp2 + 64); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _sum2 = _mm256_fmadd_ps(_va4, _vb0, _sum2); _sum2 = _mm256_fmadd_ps(_va5, _vb1, _sum2); _sum2 = _mm256_fmadd_ps(_va6, _vb2, _sum2); _sum3 = _mm256_fmadd_ps(_va6, _vb0, _sum3); _sum3 = _mm256_fmadd_ps(_va7, _vb1, _sum3); _sum3 = _mm256_fmadd_ps(_va8, _vb2, _sum3); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); _mm256_storeu_ps(otmp + 16, _sum2); _mm256_storeu_ps(otmp + 24, _sum3); itmp0 += 64; itmp1 += 64; itmp2 += 64; otmp += 32; } for (; j + 1 < outw; j += 2) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _sum1 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _va3 = _mm256_loadu_ps(itmp0 + 24); __m256 _va4 = _mm256_loadu_ps(itmp0 + 32); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _va3 = _mm256_loadu_ps(itmp1 + 24); _va4 = _mm256_loadu_ps(itmp1 + 32); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _va3 = _mm256_loadu_ps(itmp2 + 24); _va4 = _mm256_loadu_ps(itmp2 + 32); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _sum1 = _mm256_fmadd_ps(_va2, _vb0, _sum1); _sum1 = _mm256_fmadd_ps(_va3, _vb1, _sum1); _sum1 = _mm256_fmadd_ps(_va4, _vb2, _sum1); _mm256_storeu_ps(otmp, _sum0); _mm256_storeu_ps(otmp + 8, _sum1); itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 16; } for (; j < outw; j++) { __m256 _sum0 = _mm256_loadu_ps(btmp); __m256 _va0 = _mm256_loadu_ps(itmp0); __m256 _va1 = _mm256_loadu_ps(itmp0 + 8); __m256 _va2 = _mm256_loadu_ps(itmp0 + 16); __m256 _vb0 = _mm256_loadu_ps(ktmp); __m256 _vb1 = _mm256_loadu_ps(ktmp + 8); __m256 _vb2 = _mm256_loadu_ps(ktmp + 16); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp1); _va1 = _mm256_loadu_ps(itmp1 + 8); _va2 = _mm256_loadu_ps(itmp1 + 16); _vb0 = _mm256_loadu_ps(ktmp + 24); _vb1 = _mm256_loadu_ps(ktmp + 32); _vb2 = _mm256_loadu_ps(ktmp + 40); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _va0 = _mm256_loadu_ps(itmp2); _va1 = _mm256_loadu_ps(itmp2 + 8); _va2 = _mm256_loadu_ps(itmp2 + 16); _vb0 = _mm256_loadu_ps(ktmp + 48); _vb1 = _mm256_loadu_ps(ktmp + 56); _vb2 = _mm256_loadu_ps(ktmp + 64); _sum0 = _mm256_fmadd_ps(_va0, _vb0, _sum0); _sum0 = _mm256_fmadd_ps(_va1, _vb1, _sum0); _sum0 = _mm256_fmadd_ps(_va2, _vb2, _sum0); _mm256_storeu_ps(otmp, _sum0); itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 8; } } } // load_data { for (int i = 0; i < channel_count; i++) { float* otmp = output_tmp + i * 8 * outwh; float* tmp0 = output + i * 8 * outwh; float* tmp1 = output + i * 8 * outwh + 1 * outwh; float* tmp2 = output + i * 8 * outwh + 2 * outwh; float* tmp3 = output + i * 8 * outwh + 3 * outwh; float* tmp4 = output + i * 8 * outwh + 4 * outwh; float* tmp5 = output + i * 8 * outwh + 5 * outwh; float* tmp6 = output + i * 8 * outwh + 6 * outwh; float* tmp7 = output + i * 8 * outwh + 7 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; tmp4[0] = otmp[4]; tmp5[0] = otmp[5]; tmp6[0] = otmp[6]; tmp7[0] = otmp[7]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; tmp4++; tmp5++; tmp6++; tmp7++; } } int i = 0; for (; i + 3 < channel_remain; i += 4) { int ii = channel_count * 8 + i; float* otmp = output_tmp + ii * outwh; float* tmp0 = output + ii * outwh; float* tmp1 = output + ii * outwh + 1 * outwh; float* tmp2 = output + ii * outwh + 2 * outwh; float* tmp3 = output + ii * outwh + 3 * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 8; tmp0++; tmp1++; tmp2++; tmp3++; } } for (; i < channel_remain; i++) { int ii = channel_count * 8 + i; float* otmp = output_tmp + channel_count * 8 * outwh; float* tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 8; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } #elif __SSE2__ static void convdw3x3s1(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 2; int channel_remain = inc - (channel_count << 2); // generate the image tmp float* img_tmp = ( float* )sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float)); float* kernel_tmp = ( float* )sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float)); float* bias_tmp = ( float* )sys_malloc(4 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 4; float* k0 = img_data + (ii + 0) * inwh; float* k1 = img_data + (ii + 1) * inwh; float* k2 = img_data + (ii + 2) * inwh; float* k3 = img_data + (ii + 3) * inwh; float* f0 = kernel_data + (ii + 0) * 9; float* f1 = kernel_data + (ii + 1) * 9; float* f2 = kernel_data + (ii + 2) * 9; float* f3 = kernel_data + (ii + 3) * 9; float* b0 = bias_data + (ii + 0); float* b1 = bias_data + (ii + 1); float* b2 = bias_data + (ii + 2); float* b3 = bias_data + (ii + 3); float* tmp0 = img_tmp + ii * inwh; float* tmp1 = kernel_tmp + ii * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 4; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 4; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float* k0 = img_data + ii * inwh; float* f0 = kernel_data + ii * 9; float* b0 = bias_data + ii; float* tmp0 = img_tmp + channel_count * 4 * inwh; float* tmp1 = kernel_tmp + channel_count * 4 * 9; float* tmp2 = bias_tmp + channel_count * 4; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 4; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 4; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float* output_tmp = ( float* )sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float* ktmp = kernel_tmp + c * 4 * 9; float* btmp = bias_tmp + c * 4; for (int i = 0; i < outh; i++) { int j = 0; float* itmp0 = img_tmp + c * 4 * inwh + 4 * i * inw; float* itmp1 = img_tmp + c * 4 * inwh + 4 * (i + 1) * inw; float* itmp2 = img_tmp + c * 4 * inwh + 4 * (i + 2) * inw; float* otmp = output_tmp + c * 4 * outwh + 4 * i * outw; for (; j + 7 < outw; j += 8) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _sum4 = _mm_loadu_ps(btmp); __m128 _sum5 = _mm_loadu_ps(btmp); __m128 _sum6 = _mm_loadu_ps(btmp); __m128 _sum7 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _va6 = _mm_loadu_ps(itmp0 + 24); __m128 _va7 = _mm_loadu_ps(itmp0 + 28); __m128 _va8 = _mm_loadu_ps(itmp0 + 32); __m128 _va9 = _mm_loadu_ps(itmp0 + 36); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _va6 = _mm_loadu_ps(itmp1 + 24); _va7 = _mm_loadu_ps(itmp1 + 28); _va8 = _mm_loadu_ps(itmp1 + 32); _va9 = _mm_loadu_ps(itmp1 + 36); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _va6 = _mm_loadu_ps(itmp2 + 24); _va7 = _mm_loadu_ps(itmp2 + 28); _va8 = _mm_loadu_ps(itmp2 + 32); _va9 = _mm_loadu_ps(itmp2 + 36); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum4 = _mm_add_ps(_mm_mul_ps(_va4, _vb0), _sum4); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _sum5 = _mm_add_ps(_mm_mul_ps(_va5, _vb0), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va5, _vb1), _sum4); _sum5 = _mm_add_ps(_mm_mul_ps(_va6, _vb1), _sum5); _sum4 = _mm_add_ps(_mm_mul_ps(_va6, _vb2), _sum4); _sum6 = _mm_add_ps(_mm_mul_ps(_va6, _vb0), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va7, _vb0), _sum7); _sum5 = _mm_add_ps(_mm_mul_ps(_va7, _vb2), _sum5); _sum6 = _mm_add_ps(_mm_mul_ps(_va7, _vb1), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va8, _vb1), _sum7); _sum6 = _mm_add_ps(_mm_mul_ps(_va8, _vb2), _sum6); _sum7 = _mm_add_ps(_mm_mul_ps(_va9, _vb2), _sum7); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); _mm_storeu_ps(otmp + 16, _sum4); _mm_storeu_ps(otmp + 20, _sum5); _mm_storeu_ps(otmp + 24, _sum6); _mm_storeu_ps(otmp + 28, _sum7); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum4[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum5[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum6[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum7[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 4] * ktmp[k]; sum1[k] += itmp1[k + 4] * ktmp[k + 12]; sum1[k] += itmp2[k + 4] * ktmp[k + 24]; sum1[k] += itmp0[k + 8] * ktmp[k + 4]; sum1[k] += itmp1[k + 8] * ktmp[k + 16]; sum1[k] += itmp2[k + 8] * ktmp[k + 28]; sum1[k] += itmp0[k + 12] * ktmp[k + 8]; sum1[k] += itmp1[k + 12] * ktmp[k + 20]; sum1[k] += itmp2[k + 12] * ktmp[k + 32]; sum2[k] += itmp0[k + 8] * ktmp[k]; sum2[k] += itmp1[k + 8] * ktmp[k + 12]; sum2[k] += itmp2[k + 8] * ktmp[k + 24]; sum2[k] += itmp0[k + 12] * ktmp[k + 4]; sum2[k] += itmp1[k + 12] * ktmp[k + 16]; sum2[k] += itmp2[k + 12] * ktmp[k + 28]; sum2[k] += itmp0[k + 16] * ktmp[k + 8]; sum2[k] += itmp1[k + 16] * ktmp[k + 20]; sum2[k] += itmp2[k + 16] * ktmp[k + 32]; sum3[k] += itmp0[k + 12] * ktmp[k]; sum3[k] += itmp1[k + 12] * ktmp[k + 12]; sum3[k] += itmp2[k + 12] * ktmp[k + 24]; sum3[k] += itmp0[k + 16] * ktmp[k + 4]; sum3[k] += itmp1[k + 16] * ktmp[k + 16]; sum3[k] += itmp2[k + 16] * ktmp[k + 28]; sum3[k] += itmp0[k + 20] * ktmp[k + 8]; sum3[k] += itmp1[k + 20] * ktmp[k + 20]; sum3[k] += itmp2[k + 20] * ktmp[k + 32]; sum4[k] += itmp0[k + 16] * ktmp[k]; sum4[k] += itmp1[k + 16] * ktmp[k + 12]; sum4[k] += itmp2[k + 16] * ktmp[k + 24]; sum4[k] += itmp0[k + 20] * ktmp[k + 4]; sum4[k] += itmp1[k + 20] * ktmp[k + 16]; sum4[k] += itmp2[k + 20] * ktmp[k + 28]; sum4[k] += itmp0[k + 24] * ktmp[k + 8]; sum4[k] += itmp1[k + 24] * ktmp[k + 20]; sum4[k] += itmp2[k + 24] * ktmp[k + 32]; sum5[k] += itmp0[k + 20] * ktmp[k]; sum5[k] += itmp1[k + 20] * ktmp[k + 12]; sum5[k] += itmp2[k + 20] * ktmp[k + 24]; sum5[k] += itmp0[k + 24] * ktmp[k + 4]; sum5[k] += itmp1[k + 24] * ktmp[k + 16]; sum5[k] += itmp2[k + 24] * ktmp[k + 28]; sum5[k] += itmp0[k + 28] * ktmp[k + 8]; sum5[k] += itmp1[k + 28] * ktmp[k + 20]; sum5[k] += itmp2[k + 28] * ktmp[k + 32]; sum6[k] += itmp0[k + 24] * ktmp[k]; sum6[k] += itmp1[k + 24] * ktmp[k + 12]; sum6[k] += itmp2[k + 24] * ktmp[k + 24]; sum6[k] += itmp0[k + 28] * ktmp[k + 4]; sum6[k] += itmp1[k + 28] * ktmp[k + 16]; sum6[k] += itmp2[k + 28] * ktmp[k + 28]; sum6[k] += itmp0[k + 32] * ktmp[k + 8]; sum6[k] += itmp1[k + 32] * ktmp[k + 20]; sum6[k] += itmp2[k + 32] * ktmp[k + 32]; sum7[k] += itmp0[k + 28] * ktmp[k]; sum7[k] += itmp1[k + 28] * ktmp[k + 12]; sum7[k] += itmp2[k + 28] * ktmp[k + 24]; sum7[k] += itmp0[k + 32] * ktmp[k + 4]; sum7[k] += itmp1[k + 32] * ktmp[k + 16]; sum7[k] += itmp2[k + 32] * ktmp[k + 28]; sum7[k] += itmp0[k + 36] * ktmp[k + 8]; sum7[k] += itmp1[k + 36] * ktmp[k + 20]; sum7[k] += itmp2[k + 36] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; otmp[k + 16] = sum4[k]; otmp[k + 20] = sum5[k]; otmp[k + 24] = sum6[k]; otmp[k + 28] = sum7[k]; } #endif itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 32; } for (; j + 3 < outw; j += 4) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va1, _vb0), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum1 = _mm_add_ps(_mm_mul_ps(_va2, _vb1), _sum1); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _sum2 = _mm_add_ps(_mm_mul_ps(_va2, _vb0), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va3, _vb0), _sum3); _sum1 = _mm_add_ps(_mm_mul_ps(_va3, _vb2), _sum1); _sum2 = _mm_add_ps(_mm_mul_ps(_va3, _vb1), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va4, _vb1), _sum3); _sum2 = _mm_add_ps(_mm_mul_ps(_va4, _vb2), _sum2); _sum3 = _mm_add_ps(_mm_mul_ps(_va5, _vb2), _sum3); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 4] * ktmp[k]; sum1[k] += itmp1[k + 4] * ktmp[k + 12]; sum1[k] += itmp2[k + 4] * ktmp[k + 24]; sum1[k] += itmp0[k + 8] * ktmp[k + 4]; sum1[k] += itmp1[k + 8] * ktmp[k + 16]; sum1[k] += itmp2[k + 8] * ktmp[k + 28]; sum1[k] += itmp0[k + 12] * ktmp[k + 8]; sum1[k] += itmp1[k + 12] * ktmp[k + 20]; sum1[k] += itmp2[k + 12] * ktmp[k + 32]; sum2[k] += itmp0[k + 8] * ktmp[k]; sum2[k] += itmp1[k + 8] * ktmp[k + 12]; sum2[k] += itmp2[k + 8] * ktmp[k + 24]; sum2[k] += itmp0[k + 12] * ktmp[k + 4]; sum2[k] += itmp1[k + 12] * ktmp[k + 16]; sum2[k] += itmp2[k + 12] * ktmp[k + 28]; sum2[k] += itmp0[k + 16] * ktmp[k + 8]; sum2[k] += itmp1[k + 16] * ktmp[k + 20]; sum2[k] += itmp2[k + 16] * ktmp[k + 32]; sum3[k] += itmp0[k + 12] * ktmp[k]; sum3[k] += itmp1[k + 12] * ktmp[k + 12]; sum3[k] += itmp2[k + 12] * ktmp[k + 24]; sum3[k] += itmp0[k + 16] * ktmp[k + 4]; sum3[k] += itmp1[k + 16] * ktmp[k + 16]; sum3[k] += itmp2[k + 16] * ktmp[k + 28]; sum3[k] += itmp0[k + 20] * ktmp[k + 8]; sum3[k] += itmp1[k + 20] * ktmp[k + 20]; sum3[k] += itmp2[k + 20] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; } #endif itmp0 += 16; itmp1 += 16; itmp2 += 16; otmp += 16; } for (; j < outw; j++) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_mm_mul_ps(_va0, _vb0), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va1, _vb1), _sum0); _sum0 = _mm_add_ps(_mm_mul_ps(_va2, _vb2), _sum0); _mm_storeu_ps(otmp, _sum0); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; } #endif itmp0 += 4; itmp1 += 4; itmp2 += 4; otmp += 4; } } } { for (int i = 0; i < channel_count; i++) { float* otmp = output_tmp + i * 4 * outwh; float* tmp0 = output + i * 4 * outwh; float* tmp1 = output + i * 4 * outwh + 1 * outwh; float* tmp2 = output + i * 4 * outwh + 2 * outwh; float* tmp3 = output + i * 4 * outwh + 3 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 4; tmp0++; tmp1++; tmp2++; tmp3++; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float* otmp = output_tmp + channel_count * 4 * outwh; float* tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 4; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } static void convdw3x3s2(float* output, float* img_data, float* kernel_data, float* bias_data, int inc, int inh, int inw, int outh, int outw, int num_thread) { int inwh = inw * inh; int outwh = outw * outh; int channel_count = inc >> 2; int channel_remain = inc - (channel_count << 2); // generate the image tmp float* img_tmp = ( float* )sys_malloc(4 * inwh * (channel_count + 1) * sizeof(float)); float* kernel_tmp = ( float* )sys_malloc(4 * 9 * (channel_count + 1) * sizeof(float)); float* bias_tmp = ( float* )sys_malloc(4 * (channel_count + 1) * sizeof(float)); { for (int i = 0; i < channel_count; i++) { int ii = i * 4; float* k0 = img_data + (ii + 0) * inwh; float* k1 = img_data + (ii + 1) * inwh; float* k2 = img_data + (ii + 2) * inwh; float* k3 = img_data + (ii + 3) * inwh; float* f0 = kernel_data + (ii + 0) * 9; float* f1 = kernel_data + (ii + 1) * 9; float* f2 = kernel_data + (ii + 2) * 9; float* f3 = kernel_data + (ii + 3) * 9; float* b0 = bias_data + (ii + 0); float* b1 = bias_data + (ii + 1); float* b2 = bias_data + (ii + 2); float* b3 = bias_data + (ii + 3); float* tmp0 = img_tmp + ii * inwh; float* tmp1 = kernel_tmp + ii * 9; float* tmp2 = bias_tmp + ii; for (int j = 0; j < inwh; j++) { tmp0[0] = k0[0]; tmp0[1] = k1[0]; tmp0[2] = k2[0]; tmp0[3] = k3[0]; tmp0 += 4; k0++; k1++; k2++; k3++; } for (int j = 0; j < 9; j++) { tmp1[0] = f0[0]; tmp1[1] = f1[0]; tmp1[2] = f2[0]; tmp1[3] = f3[0]; tmp1 += 4; f0++; f1++; f2++; f3++; } if (bias_data) { tmp2[0] = b0[0]; tmp2[1] = b1[0]; tmp2[2] = b2[0]; tmp2[3] = b3[0]; } else { tmp2[0] = 0; tmp2[1] = 0; tmp2[2] = 0; tmp2[3] = 0; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float* k0 = img_data + ii * inwh; float* f0 = kernel_data + ii * 9; float* b0 = bias_data + ii; float* tmp0 = img_tmp + channel_count * 4 * inwh; float* tmp1 = kernel_tmp + channel_count * 4 * 9; float* tmp2 = bias_tmp + channel_count * 4; for (int j = 0; j < inwh; j++) { tmp0[i] = k0[0]; tmp0 += 4; k0++; } for (int j = 0; j < 9; j++) { tmp1[i] = f0[0]; tmp1 += 4; f0++; } if (bias_data) { tmp2[i] = b0[0]; } else { tmp2[i] = 0; } } } float* output_tmp = ( float* )sys_malloc(outwh * 4 * (channel_count + 1) * sizeof(float)); for (int c = 0; c < channel_count + 1; c++) { float* ktmp = kernel_tmp + c * 4 * 9; float* btmp = bias_tmp + c * 4; for (int i = 0; i < outh; i++) { int j = 0; float* itmp0 = img_tmp + c * 4 * inwh + 4 * i * 2 * inw; float* itmp1 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 1) * inw; float* itmp2 = img_tmp + c * 4 * inwh + 4 * (i * 2 + 2) * inw; float* otmp = output_tmp + c * 4 * outwh + 4 * i * outw; for (; j + 3 < outw; j += 4) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _sum1 = _mm_loadu_ps(btmp); __m128 _sum2 = _mm_loadu_ps(btmp); __m128 _sum3 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _va3 = _mm_loadu_ps(itmp0 + 12); __m128 _va4 = _mm_loadu_ps(itmp0 + 16); __m128 _va5 = _mm_loadu_ps(itmp0 + 20); __m128 _va6 = _mm_loadu_ps(itmp0 + 24); __m128 _va7 = _mm_loadu_ps(itmp0 + 28); __m128 _va8 = _mm_loadu_ps(itmp0 + 32); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _va3 = _mm_loadu_ps(itmp1 + 12); _va4 = _mm_loadu_ps(itmp1 + 16); _va5 = _mm_loadu_ps(itmp1 + 20); _va6 = _mm_loadu_ps(itmp1 + 24); _va7 = _mm_loadu_ps(itmp1 + 28); _va8 = _mm_loadu_ps(itmp1 + 32); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _va3 = _mm_loadu_ps(itmp2 + 12); _va4 = _mm_loadu_ps(itmp2 + 16); _va5 = _mm_loadu_ps(itmp2 + 20); _va6 = _mm_loadu_ps(itmp2 + 24); _va7 = _mm_loadu_ps(itmp2 + 28); _va8 = _mm_loadu_ps(itmp2 + 32); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va2, _vb0)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va3, _vb1)); _sum1 = _mm_add_ps(_sum1, _mm_mul_ps(_va4, _vb2)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va4, _vb0)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va5, _vb1)); _sum2 = _mm_add_ps(_sum2, _mm_mul_ps(_va6, _vb2)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va6, _vb0)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va7, _vb1)); _sum3 = _mm_add_ps(_sum3, _mm_mul_ps(_va8, _vb2)); _mm_storeu_ps(otmp, _sum0); _mm_storeu_ps(otmp + 4, _sum1); _mm_storeu_ps(otmp + 8, _sum2); _mm_storeu_ps(otmp + 12, _sum3); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum1[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum2[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; float sum3[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; sum1[k] += itmp0[k + 8] * ktmp[k]; sum1[k] += itmp1[k + 8] * ktmp[k + 12]; sum1[k] += itmp2[k + 8] * ktmp[k + 24]; sum1[k] += itmp0[k + 12] * ktmp[k + 4]; sum1[k] += itmp1[k + 12] * ktmp[k + 16]; sum1[k] += itmp2[k + 12] * ktmp[k + 28]; sum1[k] += itmp0[k + 16] * ktmp[k + 8]; sum1[k] += itmp1[k + 16] * ktmp[k + 20]; sum1[k] += itmp2[k + 16] * ktmp[k + 32]; sum2[k] += itmp0[k + 16] * ktmp[k]; sum2[k] += itmp1[k + 16] * ktmp[k + 12]; sum2[k] += itmp2[k + 16] * ktmp[k + 24]; sum2[k] += itmp0[k + 20] * ktmp[k + 4]; sum2[k] += itmp1[k + 20] * ktmp[k + 16]; sum2[k] += itmp2[k + 20] * ktmp[k + 28]; sum2[k] += itmp0[k + 24] * ktmp[k + 8]; sum2[k] += itmp1[k + 24] * ktmp[k + 20]; sum2[k] += itmp2[k + 24] * ktmp[k + 32]; sum3[k] += itmp0[k + 24] * ktmp[k]; sum3[k] += itmp1[k + 24] * ktmp[k + 12]; sum3[k] += itmp2[k + 24] * ktmp[k + 24]; sum3[k] += itmp0[k + 28] * ktmp[k + 4]; sum3[k] += itmp1[k + 28] * ktmp[k + 16]; sum3[k] += itmp2[k + 28] * ktmp[k + 28]; sum3[k] += itmp0[k + 32] * ktmp[k + 8]; sum3[k] += itmp1[k + 32] * ktmp[k + 20]; sum3[k] += itmp2[k + 32] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; otmp[k + 4] = sum1[k]; otmp[k + 8] = sum2[k]; otmp[k + 12] = sum3[k]; } #endif itmp0 += 32; itmp1 += 32; itmp2 += 32; otmp += 16; } for (; j < outw; j++) { #if __SSE__ __m128 _sum0 = _mm_loadu_ps(btmp); __m128 _va0 = _mm_loadu_ps(itmp0); __m128 _va1 = _mm_loadu_ps(itmp0 + 4); __m128 _va2 = _mm_loadu_ps(itmp0 + 8); __m128 _vb0 = _mm_loadu_ps(ktmp); __m128 _vb1 = _mm_loadu_ps(ktmp + 4); __m128 _vb2 = _mm_loadu_ps(ktmp + 8); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _va0 = _mm_loadu_ps(itmp1); _va1 = _mm_loadu_ps(itmp1 + 4); _va2 = _mm_loadu_ps(itmp1 + 8); _vb0 = _mm_loadu_ps(ktmp + 12); _vb1 = _mm_loadu_ps(ktmp + 16); _vb2 = _mm_loadu_ps(ktmp + 20); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _va0 = _mm_loadu_ps(itmp2); _va1 = _mm_loadu_ps(itmp2 + 4); _va2 = _mm_loadu_ps(itmp2 + 8); _vb0 = _mm_loadu_ps(ktmp + 24); _vb1 = _mm_loadu_ps(ktmp + 28); _vb2 = _mm_loadu_ps(ktmp + 32); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va0, _vb0)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va1, _vb1)); _sum0 = _mm_add_ps(_sum0, _mm_mul_ps(_va2, _vb2)); _mm_storeu_ps(otmp, _sum0); #else float sum0[4] = {btmp[0], btmp[1], btmp[2], btmp[3]}; for (int k = 0; k < 4; k++) { sum0[k] += itmp0[k] * ktmp[k]; sum0[k] += itmp1[k] * ktmp[k + 12]; sum0[k] += itmp2[k] * ktmp[k + 24]; sum0[k] += itmp0[k + 4] * ktmp[k + 4]; sum0[k] += itmp1[k + 4] * ktmp[k + 16]; sum0[k] += itmp2[k + 4] * ktmp[k + 28]; sum0[k] += itmp0[k + 8] * ktmp[k + 8]; sum0[k] += itmp1[k + 8] * ktmp[k + 20]; sum0[k] += itmp2[k + 8] * ktmp[k + 32]; } for (int k = 0; k < 4; k++) { otmp[k] = sum0[k]; } #endif itmp0 += 8; itmp1 += 8; itmp2 += 8; otmp += 4; } } } { for (int i = 0; i < channel_count; i++) { float* otmp = output_tmp + i * 4 * outwh; float* tmp0 = output + i * 4 * outwh; float* tmp1 = output + i * 4 * outwh + 1 * outwh; float* tmp2 = output + i * 4 * outwh + 2 * outwh; float* tmp3 = output + i * 4 * outwh + 3 * outwh; for (int i = 0; i < outwh; i++) { tmp0[0] = otmp[0]; tmp1[0] = otmp[1]; tmp2[0] = otmp[2]; tmp3[0] = otmp[3]; otmp += 4; tmp0++; tmp1++; tmp2++; tmp3++; } } for (int i = 0; i < channel_remain; i++) { int ii = channel_count * 4 + i; float* otmp = output_tmp + channel_count * 4 * outwh; float* tmp0 = output + ii * outwh; for (int j = 0; j < outwh; j++) { tmp0[0] = otmp[i]; otmp += 4; tmp0++; } } } sys_free(output_tmp); sys_free(img_tmp); sys_free(kernel_tmp); sys_free(bias_tmp); } #else static void convdw3x3s1(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w, int out_h, int out_w, int num_thread) { int w = in_w; int h = in_h; int c_step_in = w * h; int outw = out_w; int outh = out_h; int c_step_out = outw * outh; const int group = channel; const float* kernel = _kernel; #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < group; g++) { float* out = output + g * c_step_out; float* outptr = out; float* outptr2 = outptr + outw; const float bias0 = _bias ? _bias[g] : 0.f; const float* kernel0 = kernel + g * 9; const float* img0 = input + g * c_step_in; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* r3 = img0 + w * 3; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i + 1 < outh; i += 2) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; float sum2 = bias0; sum2 += r1[0] * k0[0]; sum2 += r1[1] * k0[1]; sum2 += r1[2] * k0[2]; sum2 += r2[0] * k1[0]; sum2 += r2[1] * k1[1]; sum2 += r2[2] * k1[2]; sum2 += r3[0] * k2[0]; sum2 += r3[1] * k2[1]; sum2 += r3[2] * k2[2]; *outptr = sum; *outptr2 = sum2; r0++; r1++; r2++; r3++; outptr++; outptr2++; } r0 += 2 + w; r1 += 2 + w; r2 += 2 + w; r3 += 2 + w; outptr += outw; outptr2 += outw; } for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0++; r1++; r2++; outptr++; } r0 += 2; r1 += 2; r2 += 2; } } } static void convdw3x3s2(float* output, float* input, float* _kernel, float* _bias, int channel, int in_h, int in_w, int out_h, int out_w, int num_thread) { int w = in_w; int h = in_h; int c_step_in = w * h; int outw = out_w; int outh = out_h; int c_step_out = outw * outh; const int group = channel; const int tailstep = w - 2 * outw + w; const float* kernel = _kernel; #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < group; g++) { float* out = output + g * c_step_out; float* outptr = out; const float* kernel0 = kernel + g * 9; const float bias0 = _bias ? _bias[g] : 0.f; const float* img0 = input + g * c_step_in; const float* r0 = img0; const float* r1 = img0 + w; const float* r2 = img0 + w * 2; const float* k0 = kernel0; const float* k1 = kernel0 + 3; const float* k2 = kernel0 + 6; int i = 0; for (; i < outh; i++) { int remain = outw; for (; remain > 0; remain--) { float sum = bias0; sum += r0[0] * k0[0]; sum += r0[1] * k0[1]; sum += r0[2] * k0[2]; sum += r1[0] * k1[0]; sum += r1[1] * k1[1]; sum += r1[2] * k1[2]; sum += r2[0] * k2[0]; sum += r2[1] * k2[1]; sum += r2[2] * k2[2]; *outptr = sum; r0 += 2; r1 += 2; r2 += 2; outptr++; } r0 += tailstep; r1 += tailstep; r2 += tailstep; } } } #endif int conv_dw_run(struct tensor* input_tensor, struct tensor* weight_tensor, struct tensor* bias_tensor, struct tensor* output_tensor, struct conv_priv_info* conv_info, struct conv_param* param, int num_thread, int cpu_affinity) { float* input = ( float* )input_tensor->data; float* output = ( float* )output_tensor->data; float* kernel = ( float* )weight_tensor->data; float* biases = NULL; if (bias_tensor) biases = ( float* )bias_tensor->data; int batch_number = input_tensor->dims[0]; int inc = input_tensor->dims[1]; int inh = input_tensor->dims[2]; int inw = input_tensor->dims[3]; int in_chw = inc * inh * inw; int outc = output_tensor->dims[1]; int outh = output_tensor->dims[2]; int outw = output_tensor->dims[3]; int out_hw = outh * outw; int out_chw = out_hw * outc; int ksize_h = param->kernel_h; int ksize_w = param->kernel_w; int pad_w = param->pad_w0; int pad_h = param->pad_h0; int stride_w = param->stride_w; int stride_h = param->stride_h; int dilation_w = param->dilation_w; int dilation_h = param->dilation_h; int group = param->group; int activation = param->activation; /* pading */ int inh_tmp = inh + pad_h + pad_h; int inw_tmp = inw + pad_w + pad_w; float* input_tmp = NULL; if (inh_tmp == inh && inw_tmp == inw) input_tmp = input; else { input_tmp = ( float* )sys_malloc((size_t)inh_tmp * inw_tmp * group * sizeof(float)); #pragma omp parallel for num_threads(num_thread) for (int g = 0; g < group; g++) { float* pad_in = input + g * inh * inw; float* pad_out = input_tmp + g * inh_tmp * inw_tmp; pad(pad_in, pad_out, inh, inw, inh_tmp, inw_tmp, pad_h, pad_w, 0.f); } } /* process */ for (int i = 0; i < batch_number; i++) { if (stride_h == 1) convdw3x3s1(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread); else convdw3x3s2(output, input_tmp, kernel, biases, group, inh_tmp, inw_tmp, outh, outw, num_thread); } /* relu */ if (activation >= 0) relu(output, batch_number * out_chw, activation); if (!(inh_tmp == inh && inw_tmp == inw)) sys_free(input_tmp); return 0; }
functionparameter-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: [email protected], [email protected], [email protected], [email protected], [email protected]) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ // Arrays passed as function parameters void foo1(double o1[], double c[], int len) { int i ; #pragma omp parallel for for (i = 0; i < len; ++i) { double volnew_o8 = 0.5 * c[i]; o1[i] = volnew_o8; } } double o1[100]; double c[100]; int main() { foo1 (o1, c, 100); return 0; }
GB_unop__one_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__one_fp64_fp64 // op(A') function: GB_unop_tran__one_fp64_fp64 // C type: double // A type: double // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = 1 ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__one_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = 1 ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; ; ; ; ; Cx [p] = 1 ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__one_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
volumeramprecision.h
/********************************************************************************* * * Inviwo - Interactive Visualization Workshop * * Copyright (c) 2013-2017 Inviwo Foundation * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * *********************************************************************************/ #ifndef IVW_VOLUMERAMPRECISION_H #define IVW_VOLUMERAMPRECISION_H #include <inviwo/core/datastructures/volume/volumeram.h> #include <inviwo/core/datastructures/volume/volumeramhistogram.h> #include <inviwo/core/util/glm.h> #include <inviwo/core/util/stdextensions.h> #include <inviwo/core/datastructures/volume/volume.h> namespace inviwo { /** * \ingroup datastructures */ template <typename T> class VolumeRAMPrecision : public VolumeRAM { public: using type = T; VolumeRAMPrecision(size3_t dimensions = size3_t(128, 128, 128)); VolumeRAMPrecision(T* data, size3_t dimensions = size3_t(128, 128, 128)); VolumeRAMPrecision(const VolumeRAMPrecision<T>& rhs); VolumeRAMPrecision<T>& operator=(const VolumeRAMPrecision<T>& that); virtual VolumeRAMPrecision<T>* clone() const override; virtual ~VolumeRAMPrecision(); T* getDataTyped(); const T* getDataTyped() const; virtual void* getData() override; virtual const void* getData() const override; virtual void* getData(size_t) override; virtual const void* getData(size_t) const override; virtual void setData(void* data, size3_t dimensions) override; virtual void removeDataOwnership() override; virtual const size3_t& getDimensions() const override; virtual void setDimensions(size3_t dimensions) override; virtual bool hasHistograms() const override; virtual HistogramContainer* getHistograms(size_t bins = 2048u, size3_t sampleRate = size3_t(1)) override; virtual const HistogramContainer* getHistograms(size_t bins = 2048u, size3_t sampleRate = size3_t(1)) const override; virtual void calculateHistograms(size_t bins, size3_t sampleRate, const bool& stop) const override; virtual double getAsDouble(const size3_t& pos) const override; virtual dvec2 getAsDVec2(const size3_t& pos) const override; virtual dvec3 getAsDVec3(const size3_t& pos) const override; virtual dvec4 getAsDVec4(const size3_t& pos) const override; virtual void setFromDouble(const size3_t& pos, double val) override; virtual void setFromDVec2(const size3_t& pos, dvec2 val) override; virtual void setFromDVec3(const size3_t& pos, dvec3 val) override; virtual void setFromDVec4(const size3_t& pos, dvec4 val) override; virtual double getAsNormalizedDouble(const size3_t& pos) const override; virtual dvec2 getAsNormalizedDVec2(const size3_t& pos) const override; virtual dvec3 getAsNormalizedDVec3(const size3_t& pos) const override; virtual dvec4 getAsNormalizedDVec4(const size3_t& pos) const override; virtual void setFromNormalizedDouble(const size3_t& pos, double val) override; virtual void setFromNormalizedDVec2(const size3_t& pos, dvec2 val) override; virtual void setFromNormalizedDVec3(const size3_t& pos, dvec3 val) override; virtual void setFromNormalizedDVec4(const size3_t& pos, dvec4 val) override; void setValuesFromVolume(const VolumeRAM* src, const size3_t& dstOffset, const size3_t& subSize, const size3_t& subOffset) override; virtual size_t getNumberOfBytes() const override; private: size3_t dimensions_; bool ownsDataPtr_; std::unique_ptr<T[]> data_; mutable HistogramContainer histCont_; }; /** * Factory for volumes. * Creates an VolumeRAM with data type specified by format. * * @param dimensions of volume to create. * @param format of volume to create. * @param dataPtr optional pointer to data to be handed into the volume. * @return nullptr if no valid format was specified. */ IVW_CORE_API std::shared_ptr<VolumeRAM> createVolumeRAM(const size3_t& dimensions, const DataFormatBase* format, void* dataPtr = nullptr); template <typename T> VolumeRAMPrecision<T>::VolumeRAMPrecision(size3_t dimensions) : VolumeRAM(DataFormat<T>::get()) , dimensions_(dimensions) , ownsDataPtr_(true) , data_(new T[dimensions_.x * dimensions_.y * dimensions_.z]()) {} template <typename T> VolumeRAMPrecision<T>::VolumeRAMPrecision(T* data, size3_t dimensions) : VolumeRAM(DataFormat<T>::get()) , dimensions_(dimensions) , ownsDataPtr_(true) , data_(data ? data : new T[dimensions_.x * dimensions_.y * dimensions_.z]()) {} template <typename T> VolumeRAMPrecision<T>::VolumeRAMPrecision(const VolumeRAMPrecision<T>& rhs) : VolumeRAM(rhs) , dimensions_(rhs.dimensions_) , ownsDataPtr_(true) , data_(new T[dimensions_.x * dimensions_.y * dimensions_.z]) { std::memcpy(data_.get(), rhs.data_.get(), dimensions_.x * dimensions_.y * dimensions_.z * sizeof(T)); } template <typename T> VolumeRAMPrecision<T>& VolumeRAMPrecision<T>::operator=(const VolumeRAMPrecision<T>& that) { if (this != &that) { VolumeRAM::operator=(that); auto dim = that.dimensions_; auto data = util::make_unique<T[]>(dim.x * dim.y * dim.z); std::memcpy(data.get(), that.data_.get(), dim.x * dim.y * dim.z * sizeof(T)); data_.swap(data); std::swap(dim, dimensions_); ownsDataPtr_ = true; } return *this; } template <typename T> VolumeRAMPrecision<T>::~VolumeRAMPrecision() { if (!ownsDataPtr_) data_.release(); } template <typename T> VolumeRAMPrecision<T>* VolumeRAMPrecision<T>::clone() const { return new VolumeRAMPrecision<T>(*this); } template <typename T> const T* inviwo::VolumeRAMPrecision<T>::getDataTyped() const { return data_.get(); } template <typename T> T* inviwo::VolumeRAMPrecision<T>::getDataTyped() { return data_.get(); } template <typename T> void* VolumeRAMPrecision<T>::getData() { return data_.get(); } template <typename T> const void* VolumeRAMPrecision<T>::getData() const { return const_cast<const T*>(data_.get()); } template <typename T> void* VolumeRAMPrecision<T>::getData(size_t pos) { return data_.get() + pos; } template <typename T> const void* VolumeRAMPrecision<T>::getData(size_t pos) const { return const_cast<const T*>(data_.get()) + pos; } template <typename T> void VolumeRAMPrecision<T>::setData(void* d, size3_t dimensions) { std::unique_ptr<T[]> data(static_cast<T*>(d)); data_.swap(data); std::swap(dimensions_, dimensions); if (!ownsDataPtr_) data.release(); ownsDataPtr_ = true; } template <typename T> void VolumeRAMPrecision<T>::removeDataOwnership() { ownsDataPtr_ = false; } template <typename T> const size3_t& VolumeRAMPrecision<T>::getDimensions() const { return dimensions_; } template <typename T> size_t VolumeRAMPrecision<T>::getNumberOfBytes() const { return dimensions_.x * dimensions_.y * dimensions_.z * sizeof(T); } template <typename T> void VolumeRAMPrecision<T>::setDimensions(size3_t dimensions) { auto data = util::make_unique<T[]>(dimensions.x * dimensions.y * dimensions.z); data_.swap(data); dimensions_ = dimensions; if (!ownsDataPtr_) data.release(); ownsDataPtr_ = true; } template <typename T> double VolumeRAMPrecision<T>::getAsDouble(const size3_t& pos) const { return util::glm_convert<double>(data_[posToIndex(pos, dimensions_)]); } template <typename T> dvec2 VolumeRAMPrecision<T>::getAsDVec2(const size3_t& pos) const { return util::glm_convert<dvec2>(data_[posToIndex(pos, dimensions_)]); } template <typename T> dvec3 VolumeRAMPrecision<T>::getAsDVec3(const size3_t& pos) const { return util::glm_convert<dvec3>(data_[posToIndex(pos, dimensions_)]); } template <typename T> dvec4 VolumeRAMPrecision<T>::getAsDVec4(const size3_t& pos) const { return util::glm_convert<dvec4>(data_[posToIndex(pos, dimensions_)]); } template <typename T> void VolumeRAMPrecision<T>::setFromDouble(const size3_t& pos, double val) { data_[posToIndex(pos, dimensions_)] = util::glm_convert<T>(val); } template <typename T> void VolumeRAMPrecision<T>::setFromDVec2(const size3_t& pos, dvec2 val) { data_[posToIndex(pos, dimensions_)] = util::glm_convert<T>(val); } template <typename T> void VolumeRAMPrecision<T>::setFromDVec3(const size3_t& pos, dvec3 val) { data_[posToIndex(pos, dimensions_)] = util::glm_convert<T>(val); } template <typename T> void VolumeRAMPrecision<T>::setFromDVec4(const size3_t& pos, dvec4 val) { data_[posToIndex(pos, dimensions_)] = util::glm_convert<T>(val); } template <typename T> double VolumeRAMPrecision<T>::getAsNormalizedDouble(const size3_t& pos) const { return util::glm_convert_normalized<double>(data_[posToIndex(pos, dimensions_)]); } template <typename T> dvec2 VolumeRAMPrecision<T>::getAsNormalizedDVec2(const size3_t& pos) const { return util::glm_convert_normalized<dvec2>(data_[posToIndex(pos, dimensions_)]); } template <typename T> dvec3 VolumeRAMPrecision<T>::getAsNormalizedDVec3(const size3_t& pos) const { return util::glm_convert_normalized<dvec3>(data_[posToIndex(pos, dimensions_)]); } template <typename T> dvec4 VolumeRAMPrecision<T>::getAsNormalizedDVec4(const size3_t& pos) const { return util::glm_convert_normalized<dvec4>(data_[posToIndex(pos, dimensions_)]); } template <typename T> void VolumeRAMPrecision<T>::setFromNormalizedDouble(const size3_t& pos, double val) { data_[posToIndex(pos, dimensions_)] = util::glm_convert_normalized<T>(val); } template <typename T> void VolumeRAMPrecision<T>::setFromNormalizedDVec2(const size3_t& pos, dvec2 val) { data_[posToIndex(pos, dimensions_)] = util::glm_convert_normalized<T>(val); } template <typename T> void VolumeRAMPrecision<T>::setFromNormalizedDVec3(const size3_t& pos, dvec3 val) { data_[posToIndex(pos, dimensions_)] = util::glm_convert_normalized<T>(val); } template <typename T> void VolumeRAMPrecision<T>::setFromNormalizedDVec4(const size3_t& pos, dvec4 val) { data_[posToIndex(pos, dimensions_)] = util::glm_convert_normalized<T>(val); } template <typename T> void VolumeRAMPrecision<T>::setValuesFromVolume(const VolumeRAM* src, const size3_t& dstOffset, const size3_t& subSize, const size3_t& subOffset) { const T* srcData = reinterpret_cast<const T*>(src->getData()); size_t initialStartPos = (dstOffset.z * (dimensions_.x * dimensions_.y)) + (dstOffset.y * dimensions_.x) + dstOffset.x; size3_t srcDims = src->getDimensions(); size_t dataSize = subSize.x * getDataFormat()->getSize(); size_t volumePos; size_t subVolumePos; ivec3 subSizeI = ivec3(subSize); #pragma omp parallel for for (int zy = 0; zy < subSizeI.z * subSizeI.y; ++zy) { int z = zy / subSizeI.y; int y = zy % subSizeI.y; volumePos = (y * dimensions_.x) + (z * dimensions_.x * dimensions_.y); subVolumePos = ((y + subOffset.y) * srcDims.x) + ((z + subOffset.z) * srcDims.x * srcDims.y) + subOffset.x; std::memcpy((data_.get() + volumePos + initialStartPos), (srcData + subVolumePos), dataSize); } } template <typename T> const HistogramContainer* VolumeRAMPrecision<T>::getHistograms(size_t bins, size3_t sampleRate) const { if (!hasHistograms()) { bool stop = false; calculateHistograms(bins, sampleRate, stop); } return &histCont_; } template <typename T> HistogramContainer* VolumeRAMPrecision<T>::getHistograms(size_t bins, size3_t sampleRate) { if (!hasHistograms()) { bool stop = false; calculateHistograms(bins, sampleRate, stop); } return &histCont_; } template <typename T> void VolumeRAMPrecision<T>::calculateHistograms(size_t bins, size3_t sampleRate, const bool& stop) const { if (const auto volume = getOwner()) { dvec2 dataRange = volume->dataMap_.dataRange; histCont_ = util::calculateVolumeHistogram(data_.get(), dimensions_, dataRange, stop, bins, sampleRate); } } template <typename T> bool VolumeRAMPrecision<T>::hasHistograms() const { return !histCont_.empty() && histCont_.isValid(); } } // namespace #endif // IVW_VOLUMERAMPRECISION_H
maxwell_PNedelec.c
/*BHEADER********************************************************************** * Copyright (c) 2008, Lawrence Livermore National Security, LLC. * Produced at the Lawrence Livermore National Laboratory. * This file is part of HYPRE. See file COPYRIGHT for details. * * HYPRE is free software; you can redistribute it and/or modify it under the * terms of the GNU Lesser General Public License (as published by the Free * Software Foundation) version 2.1 dated February 1999. * * $Revision$ ***********************************************************************EHEADER*/ /****************************************************************************** * OpenMP Problems * * Need to fix the way these variables are set and incremented in loops: * j, k (only where they are listed at the end of SMP_PRIVATE) * ******************************************************************************/ #include "_hypre_sstruct_ls.h" hypre_IJMatrix * hypre_Maxwell_PNedelec( hypre_SStructGrid *fgrid_edge, hypre_SStructGrid *cgrid_edge, hypre_Index rfactor ) { MPI_Comm comm= (fgrid_edge-> comm); HYPRE_IJMatrix edge_Edge; hypre_SStructPGrid *p_cgrid, *p_fgrid; hypre_StructGrid *var_cgrid, *var_fgrid; hypre_BoxArray *cboxes, *fboxes, *box_array; hypre_Box *cbox, *fbox, *cellbox, *vbox, copy_box; hypre_BoxArray **contract_fedgeBoxes; hypre_Index **Edge_cstarts, **upper_shifts, **lower_shifts; HYPRE_Int **cfbox_mapping, **fcbox_mapping; hypre_BoxManEntry *entry; HYPRE_Int rank, rank2; HYPRE_Int start_rank1, start_rank2; HYPRE_Int nedges; HYPRE_Int *iedgeEdge; HYPRE_Int *jedge_Edge; HYPRE_Real *vals_edgeEdge; HYPRE_Real fCedge_ratio; HYPRE_Int *ncols_edgeEdge; hypre_Index cindex; hypre_Index findex; hypre_Index var_index, *boxoffset, *suboffset; hypre_Index loop_size, start, cstart, stride, hi_index, lindex; hypre_Index ishift, jshift, kshift, zero_index, one_index; HYPRE_Int n_boxoffsets; HYPRE_Int nparts= hypre_SStructGridNParts(fgrid_edge); HYPRE_Int ndim = hypre_SStructGridNDim(fgrid_edge); HYPRE_SStructVariable *vartypes, *Edge_vartypes; hypre_Index *varoffsets; HYPRE_Int *vartype_map; HYPRE_Int matrix_type= HYPRE_PARCSR; HYPRE_Int nvars, Edge_nvars, part, var; HYPRE_Int tot_vars= 8; HYPRE_Int t, i, j, k, l, m, n, p, r, size; HYPRE_Int ilower, iupper; HYPRE_Int jlower, jupper; HYPRE_Int **lower_ranks, **upper_ranks; HYPRE_Int ***n_CtoVbox, ****CtoVboxnums; HYPRE_Int *num_vboxes, **vboxnums; HYPRE_Int trueV = 1; HYPRE_Int falseV= 0; HYPRE_Int row_in; HYPRE_Int myproc; hypre_BoxInit(&copy_box, ndim); hypre_MPI_Comm_rank(comm, &myproc); hypre_SetIndex3(ishift, 1, 0, 0); hypre_SetIndex3(jshift, 0, 1, 0); hypre_SetIndex3(kshift, 0, 0, 1); hypre_SetIndex3(zero_index, 0, 0, 0); hypre_SetIndex3(one_index, 0, 0, 0); for (i= 0; i< ndim; i++) { one_index[i]= 1; } /* set rfactor[2]= 1 if ndim=2. */ if (ndim == 2) { rfactor[2]= 1; } /*------------------------------------------------------------------- * Find the coarse-fine connection pattern, i.e., the topology * needed to create the interpolation operators. * These connections are determined using the cell-centred grids. * Note that we are assuming the variable type enumeration * given in hypre_SStructVariable_enum. * * We consider both 2-d and 3-d cases. In 2-d, the edges are faces. * We will continue to call them edges, but use the face variable * enumeration. *-------------------------------------------------------------------*/ varoffsets= hypre_CTAlloc(hypre_Index, tot_vars); /* total of 8 variable types. Create a mapping between user enumeration to hypre enumeration. Only need for edge grids. */ vartype_map= hypre_CTAlloc(HYPRE_Int, tot_vars); part= 0; p_cgrid = hypre_SStructGridPGrid(cgrid_edge, part); nvars = hypre_SStructPGridNVars(p_cgrid); vartypes= hypre_SStructPGridVarTypes(p_cgrid); for (i= 0; i< nvars; i++) { t= vartypes[i]; hypre_SStructVariableGetOffset((hypre_SStructVariable) t, ndim, varoffsets[t]); switch(t) { case 2: { vartype_map[2]= i; break; } case 3: { vartype_map[3]= i; break; } case 5: { vartype_map[5]= i; break; } case 6: { vartype_map[6]= i; break; } case 7: { vartype_map[7]= i; break; } } } /* local sizes */ nedges = 0; for (part= 0; part< nparts; part++) { /* same for 2-d & 3-d, assuming that fgrid_edge= fgrid_face in input */ p_fgrid = hypre_SStructGridPGrid(fgrid_edge, part); /* edge fgrid */ nvars = hypre_SStructPGridNVars(p_fgrid); for (var= 0; var< nvars; var++) { var_fgrid= hypre_SStructPGridSGrid(p_fgrid, var); nedges += hypre_StructGridLocalSize(var_fgrid); } } /*-------------------------------------------------------------------------- * Form mappings between the c & f box numbers. Note that a cbox * can land inside only one fbox since the latter was contracted. Without * the extraction, a cbox can land in more than 1 fboxes (e.g., cbox * boundary extending into other fboxes). *--------------------------------------------------------------------------*/ cfbox_mapping= hypre_TAlloc(HYPRE_Int *, nparts); fcbox_mapping= hypre_TAlloc(HYPRE_Int *, nparts); for (i= 0; i< nparts; i++) { p_fgrid = hypre_SStructGridPGrid(fgrid_edge, i); var_fgrid= hypre_SStructPGridCellSGrid(p_fgrid); fboxes = hypre_StructGridBoxes(var_fgrid); j = hypre_BoxArraySize(fboxes); fcbox_mapping[i]= hypre_CTAlloc(HYPRE_Int, j); p_cgrid = hypre_SStructGridPGrid(cgrid_edge, i); var_cgrid= hypre_SStructPGridCellSGrid(p_cgrid); cboxes = hypre_StructGridBoxes(var_cgrid); j = hypre_BoxArraySize(fboxes); cfbox_mapping[i]= hypre_CTAlloc(HYPRE_Int, j); /* assuming if i1 > i2 and (box j1) is coarsened from (box i1) and (box j2) from (box i2), then j1 > j2. */ k= 0; hypre_ForBoxI(j, fboxes) { fbox= hypre_BoxArrayBox(fboxes, j); hypre_CopyBox(fbox, &copy_box); hypre_ProjectBox(&copy_box, zero_index, rfactor); hypre_StructMapFineToCoarse(hypre_BoxIMin(&copy_box), zero_index, rfactor, hypre_BoxIMin(&copy_box)); hypre_StructMapFineToCoarse(hypre_BoxIMax(&copy_box), zero_index, rfactor, hypre_BoxIMax(&copy_box)); /* since the ordering of the cboxes was determined by the fbox ordering, we only have to check if the first cbox in the list intersects with copy_box. If not, this fbox vanished in the coarsening. Note that this gives you the correct interior cbox. */ cbox= hypre_BoxArrayBox(cboxes, k); hypre_IntersectBoxes(&copy_box, cbox, &copy_box); if (hypre_BoxVolume(&copy_box)) { cfbox_mapping[i][k]= j; fcbox_mapping[i][j]= k; k++; } /* if (hypre_BoxVolume(&copy_box)) */ } /* hypre_ForBoxI(j, fboxes) */ } /* for (i= 0; i< nparts; i++) */ /* variable rank bounds for this processor */ n_CtoVbox = hypre_TAlloc(HYPRE_Int **, nparts); CtoVboxnums = hypre_TAlloc(HYPRE_Int ***, nparts); for (part= 0; part< nparts; part++) { hypre_SStructCellGridBoxNumMap(fgrid_edge, part, &n_CtoVbox[part], &CtoVboxnums[part]); } /* variable rank bounds for this processor */ lower_ranks= hypre_TAlloc(HYPRE_Int *, nparts); upper_ranks= hypre_TAlloc(HYPRE_Int *, nparts); for (part= 0; part< nparts; part++) { p_fgrid = hypre_SStructGridPGrid(fgrid_edge, part); Edge_nvars= hypre_SStructPGridNVars(p_fgrid); lower_ranks[part]= hypre_CTAlloc(HYPRE_Int, Edge_nvars); upper_ranks[part]= hypre_CTAlloc(HYPRE_Int, Edge_nvars); for (t= 0; t< Edge_nvars; t++) { var_fgrid= hypre_SStructPGridSGrid(p_fgrid, t); box_array= hypre_StructGridBoxes(var_fgrid); fbox = hypre_BoxArrayBox(box_array, 0); hypre_CopyIndex(hypre_BoxIMin(fbox), findex); hypre_SStructGridFindBoxManEntry(fgrid_edge, part, findex, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, findex, &lower_ranks[part][t], matrix_type); fbox= hypre_BoxArrayBox(box_array, hypre_BoxArraySize(box_array)-1); hypre_CopyIndex(hypre_BoxIMax(fbox), findex); hypre_SStructGridFindBoxManEntry(fgrid_edge, part, findex, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, findex, &upper_ranks[part][t], matrix_type); } } /* CREATE IJ_MATRICES- need to find the size of each one. Notice that the row and col ranks of these matrices can be created using only grid information. Grab the first part, first variable, first box, and lower index (lower rank); Grab the last part, last variable, last box, and upper index (upper rank). */ /* edge_Edge. Same for 2-d and 3-d. */ /* lower rank */ start_rank1= hypre_SStructGridStartRank(fgrid_edge); start_rank2= hypre_SStructGridStartRank(cgrid_edge); ilower = start_rank1; jlower = start_rank2; /* upper rank */ part= nparts-1; p_fgrid = hypre_SStructGridPGrid(fgrid_edge, part); nvars = hypre_SStructPGridNVars(p_fgrid); var_fgrid= hypre_SStructPGridSGrid(p_fgrid, nvars-1); fboxes = hypre_StructGridBoxes(var_fgrid); fbox = hypre_BoxArrayBox(fboxes, hypre_BoxArraySize(fboxes)-1); hypre_SStructGridBoxProcFindBoxManEntry(fgrid_edge, part, nvars-1, hypre_BoxArraySize(fboxes)-1, myproc, &entry); hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(fbox), &iupper); p_cgrid = hypre_SStructGridPGrid(cgrid_edge, part); nvars = hypre_SStructPGridNVars(p_cgrid); var_cgrid= hypre_SStructPGridSGrid(p_cgrid, nvars-1); cboxes = hypre_StructGridBoxes(var_cgrid); cbox = hypre_BoxArrayBox(cboxes, hypre_BoxArraySize(cboxes)-1); hypre_SStructGridBoxProcFindBoxManEntry(cgrid_edge, part, nvars-1, hypre_BoxArraySize(cboxes)-1, myproc, &entry); hypre_SStructBoxManEntryGetGlobalCSRank(entry, hypre_BoxIMax(cbox), &jupper); HYPRE_IJMatrixCreate(comm, ilower, iupper, jlower, jupper, &edge_Edge); HYPRE_IJMatrixSetObjectType(edge_Edge, HYPRE_PARCSR); HYPRE_IJMatrixInitialize(edge_Edge); /*----------------------------------------------------------------------- * edge_Edge, the actual interpolation matrix. * For each fine edge row, we need to know if it is a edge, * boundary edge, or face edge. Knowing this allows us to determine the * structure and weights of the interpolation matrix. * We assume that a coarse edge interpolates only to fine edges in or on * an agglomerate. That is, fine edges with indices that do were * truncated do not get interpolated to. * Scheme: Loop over fine edge grid. For each fine edge ijk, * 1) map it to a fine cell with the fine edge at the lower end * of the box,e.g. x_edge[ijk] -> cell[i,j+1,k+1]. * 2) coarsen the fine cell to obtain a coarse cell. Determine the * location of the fine edge with respect to the coarse edges * of this cell. Coarsening needed only when determining the * column rank. * Need to distinguish between 2-d and 3-d. *-----------------------------------------------------------------------*/ /* count the row/col connections */ iedgeEdge = hypre_CTAlloc(HYPRE_Int, nedges); ncols_edgeEdge= hypre_CTAlloc(HYPRE_Int, nedges); /* get the contracted boxes */ contract_fedgeBoxes= hypre_TAlloc(hypre_BoxArray *, nparts); Edge_cstarts= hypre_TAlloc(hypre_Index *, nparts); upper_shifts= hypre_TAlloc(hypre_Index *, nparts); lower_shifts= hypre_TAlloc(hypre_Index *, nparts); for (part= 0; part< nparts; part++) { p_fgrid = hypre_SStructGridPGrid(fgrid_edge, part); var_fgrid= hypre_SStructPGridCellSGrid(p_fgrid); fboxes = hypre_StructGridBoxes(var_fgrid); /* fill up the contracted box_array */ contract_fedgeBoxes[part]= hypre_BoxArrayCreate(0, ndim); Edge_cstarts[part]= hypre_TAlloc(hypre_Index, hypre_BoxArraySize(fboxes)); upper_shifts[part]= hypre_TAlloc(hypre_Index, hypre_BoxArraySize(fboxes)); lower_shifts[part]= hypre_TAlloc(hypre_Index, hypre_BoxArraySize(fboxes)); hypre_ForBoxI(i, fboxes) { fbox= hypre_BoxArrayBox(fboxes, i); /* contract the fbox to correspond to the correct cbox */ cbox= hypre_BoxContraction(fbox, var_fgrid, rfactor); hypre_AppendBox(cbox, contract_fedgeBoxes[part]); /* record the offset mapping between the coarse cell index and the fine cell index */ hypre_ClearIndex(upper_shifts[part][i]); hypre_ClearIndex(lower_shifts[part][i]); for (k= 0; k< ndim; k++) { m= hypre_BoxIMin(cbox)[k]; p= m%rfactor[k]; if (p > 0 && m > 0) { upper_shifts[part][i][k]= p-1; lower_shifts[part][i][k]= p-rfactor[k]; } else { upper_shifts[part][i][k]= rfactor[k]-p-1; lower_shifts[part][i][k]=-p; } } /* record the cstarts of the cbox */ hypre_ProjectBox(cbox, zero_index, rfactor); hypre_CopyIndex(hypre_BoxIMin(cbox), Edge_cstarts[part][i]); hypre_StructMapFineToCoarse(Edge_cstarts[part][i], zero_index, rfactor, Edge_cstarts[part][i]); hypre_BoxDestroy(cbox); } } /* for (part= 0; part< nparts; part++) */ /*----------------------------------------------------------------------- * loop first over the fedges aligning with the agglomerate coarse edges. * Will loop over the face & interior edges separately also. *-----------------------------------------------------------------------*/ j= 0; for (part= 0; part< nparts; part++) { p_fgrid= hypre_SStructGridPGrid(fgrid_edge, part); /* edge grid */ Edge_nvars= hypre_SStructPGridNVars(p_fgrid); Edge_vartypes= hypre_SStructPGridVarTypes(p_fgrid); /* note that fboxes are the contracted CELL boxes. Will get the correct variable grid extents. */ fboxes= contract_fedgeBoxes[part]; for (t= 0; t< Edge_nvars; t++) { var = Edge_vartypes[t]; var_fgrid = hypre_SStructPGridVTSGrid(p_fgrid, var); box_array = hypre_StructGridBoxes(var_fgrid); n_boxoffsets= ndim-1; boxoffset = hypre_CTAlloc(hypre_Index, n_boxoffsets); suboffset = hypre_CTAlloc(hypre_Index, n_boxoffsets); switch(var) { case 2: /* 2-d: x_face (vertical edges), stride=[rfactor[0],1,1] */ { hypre_SetIndex3(stride, rfactor[0], 1, 1); hypre_CopyIndex(varoffsets[2], var_index); /* boxoffset shrink in the i direction */ hypre_SetIndex3(boxoffset[0], rfactor[0]-1, 0, 0); hypre_SetIndex3(suboffset[0], 1, 0, 0); /* extend loop_size by one in the stride direction */ hypre_SetIndex3(hi_index, 1, 0, 0); break; } case 3: /* 2-d: y_face (horizontal edges), stride=[1,rfactor[1],1] */ { hypre_SetIndex3(stride, 1, rfactor[1], 1); hypre_CopyIndex(varoffsets[3], var_index); /* boxoffset shrink in the j direction */ hypre_SetIndex3(boxoffset[0], 0, rfactor[1]-1, 0); hypre_SetIndex3(suboffset[0], 0, 1, 0); /* extend loop_size by one in the stride direction */ hypre_SetIndex3(hi_index, 0, 1, 0); break; } case 5: /* 3-d: x_edge, stride=[1,rfactor[1],rfactor[2]] */ { hypre_SetIndex3(stride, 1, rfactor[1], rfactor[2]); hypre_CopyIndex(varoffsets[5], var_index); /* boxoffset shrink in the j & k directions */ hypre_SetIndex3(boxoffset[0], 0, rfactor[1]-1, 0); hypre_SetIndex3(boxoffset[1], 0, 0, rfactor[2]-1); hypre_SetIndex3(suboffset[0], 0, 1, 0); hypre_SetIndex3(suboffset[1], 0, 0, 1); /* extend loop_size by one in the stride direction */ hypre_SetIndex3(hi_index, 0, 1, 1); break; } case 6: /* 3-d: y_edge, stride=[rfactor[0],1,rfactor[2]] */ { hypre_SetIndex3(stride, rfactor[0], 1, rfactor[2]); hypre_CopyIndex(varoffsets[6], var_index); /* boxoffset shrink in the i & k directions */ hypre_SetIndex3(boxoffset[0], rfactor[0]-1, 0, 0); hypre_SetIndex3(boxoffset[1], 0, 0, rfactor[2]-1); hypre_SetIndex3(suboffset[0], 1, 0, 0); hypre_SetIndex3(suboffset[1], 0, 0, 1); /* extend loop_size by one in the stride direction */ hypre_SetIndex3(hi_index, 1, 0, 1); break; } case 7: /* 3-d: z_edge, stride=[rfactor[0],rfactor[1],1] */ { hypre_SetIndex3(stride, rfactor[0], rfactor[1], 1); hypre_CopyIndex(varoffsets[7], var_index); /* boxoffset shrink in the i & j directions */ hypre_SetIndex3(boxoffset[0], rfactor[0]-1, 0, 0); hypre_SetIndex3(boxoffset[1], 0, rfactor[1]-1, 0); hypre_SetIndex3(suboffset[0], 1, 0, 0); hypre_SetIndex3(suboffset[1], 0, 1, 0); /* extend loop_size by one in the stride direction */ hypre_SetIndex3(hi_index, 1, 1, 0); break; } } hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); /* vboxes inside the i'th cellbox */ num_vboxes= n_CtoVbox[part][i]; vboxnums = CtoVboxnums[part][i]; /* adjust the project cellbox to the variable box */ hypre_CopyBox(cellbox, &copy_box); /* the adjusted variable box may be bigger than the actually variable box- variables that are shared may lead to smaller variable boxes than the SubtractIndex produces. If the box has to be decreased, then we decrease it by (rfactor[j]-1) in the appropriate direction. Check the location of the shifted lower box index. */ for (k= 0; k< n_boxoffsets; k++) { hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), suboffset[k], 3, findex); row_in= falseV; for (p= 0; p< num_vboxes[t]; p++) { vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]); if (hypre_IndexInBox(findex, vbox)) { hypre_CopyIndex(findex, hypre_BoxIMin(&copy_box)); row_in= trueV; break; } } /* not in any vbox */ if (!row_in) { hypre_AddIndexes(hypre_BoxIMin(&copy_box), boxoffset[k], 3, hypre_BoxIMin(&copy_box)); } } hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, stride, loop_size); /* extend the loop_size so that upper boundary of the box are reached. */ hypre_AddIndexes(loop_size, hi_index, 3, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, stride, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,entry,p,j) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); for (k= 0; k< 3; k++) { findex[k]*= stride[k]; } hypre_AddIndexes(findex, start, 3, findex); hypre_SStructGridFindBoxManEntry(fgrid_edge, part, findex, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, findex, &p, matrix_type); /* still row p may be outside the processor- check to make sure in */ if ( (p <= upper_ranks[part][t]) && (p >= lower_ranks[part][t]) ) { iedgeEdge[j]= p; ncols_edgeEdge[j]= 1; j++; } } hypre_BoxLoop1End(m); } /* hypre_ForBoxI */ hypre_TFree(boxoffset); hypre_TFree(suboffset); } /* for (t= 0; t< nvars; t++) */ } /* for (part= 0; part< nparts; part++) */ /*----------------------------------------------------------------------- * Record the row ranks for the face edges. Only for 3-d. * Loop over the face edges. *-----------------------------------------------------------------------*/ if (ndim == 3) { for (part= 0; part< nparts; part++) { p_fgrid= hypre_SStructGridPGrid(fgrid_edge, part); /* edge grid */ Edge_nvars= hypre_SStructPGridNVars(p_fgrid); Edge_vartypes= hypre_SStructPGridVarTypes(p_fgrid); /* note that fboxes are the contracted CELL boxes. Will get the correct variable grid extents. */ fboxes= contract_fedgeBoxes[part]; /* may need to shrink a given box in some boxoffset directions */ boxoffset= hypre_TAlloc(hypre_Index, ndim); for (t= 0; t< ndim; t++) { hypre_ClearIndex(boxoffset[t]); hypre_IndexD(boxoffset[t], t)= rfactor[t]-1; } for (t= 0; t< Edge_nvars; t++) { var = Edge_vartypes[t]; var_fgrid= hypre_SStructPGridVTSGrid(p_fgrid, var); box_array= hypre_StructGridBoxes(var_fgrid); /* to reduce comparison, take the switch outside of the loop */ switch(var) { case 5: { /* 3-d x_edge, can be Y or Z_Face */ hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); /* vboxes inside the i'th cellbox */ num_vboxes= n_CtoVbox[part][i]; vboxnums = CtoVboxnums[part][i]; /* adjust the contracted cellbox to the variable box */ hypre_CopyBox(cellbox, &copy_box); /****************************************************** * Check the location of the shifted lower box index: * x_edge-> Z_Face & Y_Face: * Z_Face- contract in the z direction only if the * processor interface is in the z direction * Y_Face- contract in the y direction if the processor * interface is in the y direction. ******************************************************/ hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), kshift, 3, findex); /* loop over all the vboxes to see if findex is inside */ row_in= falseV; for (p= 0; p< num_vboxes[t]; p++) { vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]); if (hypre_IndexInBox(findex, vbox)) { hypre_CopyIndex(findex, hypre_BoxIMin(&copy_box)); row_in= trueV; break; } } /* not in any vbox */ if (!row_in) { hypre_AddIndexes(hypre_BoxIMin(&copy_box), boxoffset[2], 3, hypre_BoxIMin(&copy_box)); } hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), jshift, 3, hypre_BoxIMin(&copy_box)); hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); /* increase the loop_size by one in the Z plane direction */ loop_size[2]++; hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,l,j) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); for (k= 0; k< 3; k++) { findex[k]*= rfactor[k]; } hypre_AddIndexes(findex, start, 3, findex); /************************************************************ * Loop over the Z_Face x_edges. ************************************************************/ for (p= 0; p< rfactor[0]; p++) { hypre_CopyIndex(findex, var_index); var_index[0]+= p; for (n= 1; n< rfactor[1]; n++) { var_index[1]++; hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); /* still row l may be outside the processor */ if ((l <= upper_ranks[part][t]) && (l >= lower_ranks[part][t])) { iedgeEdge[j]= l; /* Z_Face. Two coarse Edge connections. */ ncols_edgeEdge[j]= 2; j++; } } /* for (n= 1; n< rfactor[1]; n++) */ } /* for (p= 0; p< rfactor[0]; p++) */ } hypre_BoxLoop1End(m); /* Y_Face */ hypre_CopyBox(cellbox, &copy_box); hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), jshift, 3, findex); /* loop over all the vboxes to see if findex is inside */ row_in= falseV; for (p= 0; p< num_vboxes[t]; p++) { vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]); if (hypre_IndexInBox(findex, vbox)) { hypre_CopyIndex(findex, hypre_BoxIMin(&copy_box)); row_in= trueV; break; } } /* not in any vbox */ if (!row_in) { hypre_AddIndexes(hypre_BoxIMin(&copy_box), boxoffset[1], 3, hypre_BoxIMin(&copy_box)); } hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), kshift, 3, hypre_BoxIMin(&copy_box)); hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); loop_size[1]++; hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,l,j) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); for (k= 0; k< 3; k++) { findex[k]*= rfactor[k]; } hypre_AddIndexes(findex, start, 3, findex); /* Y_Face */ for (p= 0; p< rfactor[0]; p++) { hypre_CopyIndex(findex, var_index); var_index[0]+= p; for (n= 1; n< rfactor[2]; n++) { var_index[2]++; hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); if ((l <= upper_ranks[part][t]) && (l >= lower_ranks[part][t])) { iedgeEdge[j]= l; /* Y_Face. Two coarse Edge connections. */ ncols_edgeEdge[j]= 2; j++; } } /* for (n= 1; n< rfactor[2]; n++) */ } /* for (p= 0; p< rfactor[0]; p++) */ } hypre_BoxLoop1End(m); } /* hypre_ForBoxI(i, fboxes) */ break; } case 6: { /* 3-d y_edge, can be X or Z_Face */ hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); /* vboxes inside the i'th cellbox */ num_vboxes= n_CtoVbox[part][i]; vboxnums = CtoVboxnums[part][i]; /* adjust the project cellbox to the variable box */ hypre_CopyBox(cellbox, &copy_box); /****************************************************** * Check the location of the shifted lower box index: * y_edge-> X_Face & Z_Face: * Z_Face- contract in the z direction only if the * processor interface is in the z direction * X_Face- contract in the x direction if the processor * interface is in the x direction. ******************************************************/ hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), kshift, 3, findex); /* loop over all the vboxes to see if findex is inside */ row_in= falseV; for (p= 0; p< num_vboxes[t]; p++) { vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]); if (hypre_IndexInBox(findex, vbox)) { hypre_CopyIndex(findex, hypre_BoxIMin(&copy_box)); row_in= trueV; break; } } /* not in any vbox */ if (!row_in) { hypre_AddIndexes(hypre_BoxIMin(&copy_box), boxoffset[2], 3, hypre_BoxIMin(&copy_box)); } hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), ishift, 3, hypre_BoxIMin(&copy_box)); hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); /* increase the loop_size by one in the Z_Face direction to cover upper boundary Z_Faces. */ loop_size[2]++; hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,l,j) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); for (k= 0; k< 3; k++) { findex[k]*= rfactor[k]; } hypre_AddIndexes(findex, start, 3, findex); /* Z_Face */ for (p= 0; p< rfactor[1]; p++) { hypre_CopyIndex(findex, var_index); var_index[1]+= p; for (n= 1; n< rfactor[0]; n++) { var_index[0]++; hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); if ((l <= upper_ranks[part][t]) && (l >= lower_ranks[part][t])) { iedgeEdge[j]= l; /* Z_Face. Two coarse Edge connections. */ ncols_edgeEdge[j]= 2; j++; } } /* for (n= 1; n< rfactor[0]; n++) */ } /* for (p= 0; p< rfactor[1]; p++) */ } hypre_BoxLoop1End(m); /* X_Face */ hypre_CopyBox(cellbox, &copy_box); hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), ishift, 3, findex); /* loop over all the vboxes to see if findex is inside */ row_in= falseV; for (p= 0; p< num_vboxes[t]; p++) { vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]); if (hypre_IndexInBox(findex, vbox)) { hypre_CopyIndex(findex, hypre_BoxIMin(&copy_box)); row_in= trueV; break; } } /* not in any vbox */ if (!row_in) { hypre_AddIndexes(hypre_BoxIMin(&copy_box), boxoffset[0], 3, hypre_BoxIMin(&copy_box)); } hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), kshift, 3, hypre_BoxIMin(&copy_box)); hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); loop_size[0]++; hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,l,j) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); for (k= 0; k< 3; k++) { findex[k]*= rfactor[k]; } hypre_AddIndexes(findex, start, 3, findex); /* X_Face */ for (p= 0; p< rfactor[1]; p++) { hypre_CopyIndex(findex, var_index); var_index[1]+= p; for (n= 1; n< rfactor[2]; n++) { var_index[2]++; hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); if ((l <= upper_ranks[part][t]) && (l >= lower_ranks[part][t])) { iedgeEdge[j]= l; /* X_Face. Two coarse Edge connections. */ ncols_edgeEdge[j]= 2; j++; } } /* for (n= 1; n< rfactor[2]; n++) */ } /* for (p= 0; p< rfactor[1]; p++) */ } hypre_BoxLoop1End(m); } /* hypre_ForBoxI(i, fboxes) */ break; } case 7: { /* 3-d z_edge, can be interior, X or Y_Face, or Z_Edge */ hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); /* vboxes inside the i'th cellbox */ num_vboxes= n_CtoVbox[part][i]; vboxnums = CtoVboxnums[part][i]; /* adjust the project cellbox to the variable box */ hypre_CopyBox(cellbox, &copy_box); /****************************************************** * Check the location of the shifted lower box index: * z_edge-> X_Face & Y_Face: * X_Face- contract in the x direction if the processor * interface is in the x direction. * Y_Face- contract in the y direction if the processor * interface is in the y direction. ******************************************************/ hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), ishift, 3, findex); /* loop over all the vboxes to see if findex is inside */ row_in= falseV; for (p= 0; p< num_vboxes[t]; p++) { vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]); if (hypre_IndexInBox(findex, vbox)) { hypre_CopyIndex(findex, hypre_BoxIMin(&copy_box)); row_in= trueV; break; } } /* not in any vbox */ if (!row_in) { hypre_AddIndexes(hypre_BoxIMin(&copy_box), boxoffset[0], 3, hypre_BoxIMin(&copy_box)); } hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), jshift, 3, hypre_BoxIMin(&copy_box)); hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); /* increase the loop_size by one in the X_Face direction */ loop_size[0]++; hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,l,j) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); for (k= 0; k< 3; k++) { findex[k]*= rfactor[k]; } hypre_AddIndexes(findex, start, 3, findex); /* X_Face */ for (p= 0; p< rfactor[2]; p++) { hypre_CopyIndex(findex, var_index); var_index[2]+= p; for (n= 1; n< rfactor[1]; n++) { var_index[1]++; hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); if ((l <= upper_ranks[part][t]) && (l >= lower_ranks[part][t])) { iedgeEdge[j]= l; /* X_Face. Two coarse Edge connections. */ ncols_edgeEdge[j]= 2; j++; } } /* for (n= 1; n< rfactor[1]; n++) */ } /* for (p= 0; p< rfactor[2]; p++) */ } hypre_BoxLoop1End(m); /* Y_Face */ hypre_CopyBox(cellbox, &copy_box); hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), jshift, 3, findex); /* loop over all the vboxes to see if findex is inside */ row_in= falseV; for (p= 0; p< num_vboxes[t]; p++) { vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]); if (hypre_IndexInBox(findex, vbox)) { hypre_CopyIndex(findex, hypre_BoxIMin(&copy_box)); row_in= trueV; break; } } /* not in any vbox */ if (!row_in) { hypre_AddIndexes(hypre_BoxIMin(&copy_box), boxoffset[1], 3, hypre_BoxIMin(&copy_box)); } hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), ishift, 3, hypre_BoxIMin(&copy_box)); hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); loop_size[1]++; hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,l,j) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); for (k= 0; k< 3; k++) { findex[k]*= rfactor[k]; } hypre_AddIndexes(findex, start, 3, findex); /* Y_Face */ for (p= 0; p< rfactor[2]; p++) { hypre_CopyIndex(findex, var_index); var_index[2]+= p; for (n= 1; n< rfactor[0]; n++) { var_index[0]++; hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); if ((l <= upper_ranks[part][t]) && (l >= lower_ranks[part][t])) { iedgeEdge[j]= l; /* Y_Face. Two coarse Edge connections. */ ncols_edgeEdge[j]= 2; j++; } } /* for (n= 1; n< rfactor[0]; n++) */ } /* for (p= 0; p< rfactor[2]; p++) */ } hypre_BoxLoop1End(m); } /* hypre_ForBoxI(i, fboxes) */ break; } } /* switch */ } /* for (t= 0; t< Edge_nvars; t++) */ hypre_TFree(boxoffset); } /* for (part= 0; part< nparts; part++) */ } /* if (ndim == 3) */ for (part= 0; part< nparts; part++) { p_fgrid= hypre_SStructGridPGrid(fgrid_edge, part); /* edge grid */ Edge_nvars= hypre_SStructPGridNVars(p_fgrid); Edge_vartypes= hypre_SStructPGridVarTypes(p_fgrid); /* note that fboxes are the contracted CELL boxes. Will get the correct variable grid extents. */ fboxes= contract_fedgeBoxes[part]; for (t= 0; t< Edge_nvars; t++) { var = Edge_vartypes[t]; var_fgrid= hypre_SStructPGridVTSGrid(p_fgrid, var); box_array= hypre_StructGridBoxes(var_fgrid); /* to reduce comparison, take the switch outside of the loop */ switch(var) { case 2: { /* 2-d x_face = x_edge, can be interior */ hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); /* adjust the contract cellbox to the variable box */ hypre_CopyBox(cellbox, &copy_box); hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), varoffsets[var], 3, hypre_BoxIMin(&copy_box)); /*hypre_IntersectBoxes(&copy_box, vbox, &copy_box);*/ hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,l,j) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); for (k= 0; k< 3; k++) { findex[k]*= rfactor[k]; } hypre_AddIndexes(findex, start, 3, findex); /* get interior edges */ for (p= 1; p< rfactor[0]; p++) { hypre_CopyIndex(findex, var_index); var_index[0]+= p; for (n= 0; n< rfactor[1]; n++) { hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); iedgeEdge[j]= l; /* lies interior of Face. Two coarse Edge connection. */ ncols_edgeEdge[j]= 2; j++; var_index[1]++; } /* for (n= 0; n< rfactor[1]; n++) */ } /* for (p= 1; p< rfactor[0]; p++) */ } hypre_BoxLoop1End(m); } /* hypre_ForBoxI(i, fboxes) */ break; } case 3: { /* 2-d y_face = y_edge, can be interior */ hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); /* adjust the project cellbox to the variable box */ hypre_CopyBox(cellbox, &copy_box); hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), varoffsets[var], 3, hypre_BoxIMin(&copy_box)); /* hypre_IntersectBoxes(&copy_box, vbox, &copy_box);*/ hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,l,j) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); for (k= 0; k< 3; k++) { findex[k]*= rfactor[k]; } hypre_AddIndexes(findex, start, 3, findex); /* get interior edges */ for (p= 1; p< rfactor[1]; p++) { hypre_CopyIndex(findex, var_index); var_index[1]+= p; for (n= 0; n< rfactor[0]; n++) { hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); iedgeEdge[j]= l; /* lies interior of Face. Two coarse Edge connection. */ ncols_edgeEdge[j]= 2; j++; var_index[0]++; } /* for (n= 0; n< rfactor[0]; n++) */ } /* for (p= 1; p< rfactor[1]; p++) */ } hypre_BoxLoop1End(m); } /* hypre_ForBoxI(i, fboxes) */ break; } case 5: { /* 3-d x_edge, can be only interior */ hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); /* adjust the project cellbox to the variable box */ hypre_CopyBox(cellbox, &copy_box); hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), varoffsets[var], 3, hypre_BoxIMin(&copy_box)); /* hypre_IntersectBoxes(&copy_box, vbox, &copy_box);*/ hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,l,j) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); for (k= 0; k< 3; k++) { findex[k]*= rfactor[k]; } hypre_AddIndexes(findex, start, 3, findex); /* get interior edges */ for (p= 1; p< rfactor[2]; p++) { hypre_CopyIndex(findex, var_index); var_index[2]+= p; for (n= 1; n< rfactor[1]; n++) { var_index[1]++; for (k= 0; k< rfactor[0]; k++) { hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); iedgeEdge[j]= l; /* Interior. Four coarse Edge connections. */ ncols_edgeEdge[j]= 4; j++; var_index[0]++; } /* for (k= 0; k< rfactor[0]; k++) */ /* reset var_index[0] to the initial index for next k loop */ var_index[0]-= rfactor[0]; } /* for (n= 1; n< rfactor[1]; n++) */ /* reset var_index[1] to the initial index for next n loop */ var_index[1]-= (rfactor[1]-1); } /* for (p= 1; p< rfactor[2]; p++) */ } hypre_BoxLoop1End(m); } /* hypre_ForBoxI(i, fboxes) */ break; } case 6: { /* 3-d y_edge, can be only interior */ hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); /* adjust the project cellbox to the variable box */ hypre_CopyBox(cellbox, &copy_box); hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), varoffsets[var], 3, hypre_BoxIMin(&copy_box)); /* hypre_IntersectBoxes(&copy_box, vbox, &copy_box);*/ hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,l,j) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); for (k= 0; k< 3; k++) { findex[k]*= rfactor[k]; } hypre_AddIndexes(findex, start, 3, findex); /* get interior edges */ for (p= 1; p< rfactor[2]; p++) { hypre_CopyIndex(findex, var_index); var_index[2]+= p; for (n= 1; n< rfactor[0]; n++) { var_index[0]++; for (k= 0; k< rfactor[1]; k++) { hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); iedgeEdge[j]= l; /* Interior. Four coarse Edge connections. */ ncols_edgeEdge[j]= 4; j++; var_index[1]++; } /* for (k= 0; k< rfactor[1]; k++) */ /* reset var_index[1] to the initial index for next k loop */ var_index[1]-= rfactor[1]; } /* for (n= 1; n< rfactor[0]; n++) */ /* reset var_index[0] to the initial index for next n loop */ var_index[0]-= (rfactor[0]-1); } /* for (p= 1; p< rfactor[2]; p++) */ } hypre_BoxLoop1End(m); } /* hypre_ForBoxI(i, fboxes) */ break; } case 7: { /* 3-d z_edge, can be only interior */ hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); /* adjust the project cellbox to the variable box */ hypre_CopyBox(cellbox, &copy_box); hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), varoffsets[var], 3, hypre_BoxIMin(&copy_box)); /* hypre_IntersectBoxes(&copy_box, vbox, &copy_box);*/ hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,k,p,var_index,n,entry,l,j) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); for (k= 0; k< 3; k++) { findex[k]*= rfactor[k]; } hypre_AddIndexes(findex, start, 3, findex); /* get interior edges */ for (p= 1; p< rfactor[1]; p++) { hypre_CopyIndex(findex, var_index); var_index[1]+= p; for (n= 1; n< rfactor[0]; n++) { var_index[0]++; for (k= 0; k< rfactor[2]; k++) { hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); iedgeEdge[j]= l; /* Interior. Four coarse Edge connections. */ ncols_edgeEdge[j]= 4; j++; var_index[2]++; } /* for (k= 0; k< rfactor[2]; k++) */ /* reset var_index[2] to the initial index for next k loop */ var_index[2]-= rfactor[2]; } /* for (n= 1; n< rfactor[0]; n++) */ /* reset var_index[0] to the initial index for next n loop */ var_index[0]-= (rfactor[0]-1); } /* for (p= 1; p< rfactor[1]; p++) */ } hypre_BoxLoop1End(m); } /* hypre_ForBoxI(i, fboxes) */ break; } } /* switch */ } /* for (t= 0; t< Edge_nvars; t++) */ } /* for (part= 0; part< nparts; part++) */ k= 0; j= 0; for (i= 0; i< nedges; i++) { if (ncols_edgeEdge[i]) { k+= ncols_edgeEdge[i]; j++; } } vals_edgeEdge = hypre_CTAlloc(HYPRE_Real, k); jedge_Edge = hypre_CTAlloc(HYPRE_Int, k); /* update nedges so that the true number of rows is set */ size= j; /********************************************************************* * Fill up the edge_Edge interpolation matrix. Interpolation weights * are determined differently for each type of fine edges. *********************************************************************/ /* loop over fedges aligning with the agglomerate coarse edges first. */ k= 0; for (part= 0; part< nparts; part++) { p_fgrid= hypre_SStructGridPGrid(fgrid_edge, part); /* edge grid */ Edge_nvars= hypre_SStructPGridNVars(p_fgrid); Edge_vartypes= hypre_SStructPGridVarTypes(p_fgrid); p_cgrid= hypre_SStructGridPGrid(cgrid_edge, part); /* Edge grid */ /* note that fboxes are the contracted CELL boxes. Will get the correct variable grid extents. */ fboxes= contract_fedgeBoxes[part]; for (t= 0; t< Edge_nvars; t++) { var = Edge_vartypes[t]; var_fgrid= hypre_SStructPGridVTSGrid(p_fgrid, var); box_array= hypre_StructGridBoxes(var_fgrid); n_boxoffsets= ndim-1; boxoffset = hypre_CTAlloc(hypre_Index, n_boxoffsets); suboffset = hypre_CTAlloc(hypre_Index, n_boxoffsets); switch(var) { case 2: /* 2-d: x_face (vertical edges), stride=[rfactor[0],1,1] fCedge_ratio= 1.0/rfactor[1] */ { hypre_SetIndex3(stride, rfactor[0], 1, 1); fCedge_ratio= 1.0/rfactor[1]; /* boxoffset shrink in the i direction */ hypre_SetIndex3(boxoffset[0], rfactor[0]-1, 0, 0); hypre_SetIndex3(suboffset[0], 1, 0, 0); /* extend loop_size by one in the stride direction */ hypre_SetIndex3(hi_index, 1, 0, 0); break; } case 3: /* 2-d: y_face (horizontal edges), stride=[1,rfactor[1],1] fCedge_ratio= 1.0/rfactor[0] */ { hypre_SetIndex3(stride, 1, rfactor[1], 1); fCedge_ratio= 1.0/rfactor[0]; /* boxoffset shrink in the j direction */ hypre_SetIndex3(boxoffset[0], 0, rfactor[1]-1, 0); hypre_SetIndex3(suboffset[0], 0, 1, 0); /* extend loop_size by one in the stride direction */ hypre_SetIndex3(hi_index, 0, 1, 0); break; } case 5: /* 3-d: x_edge, stride=[1,rfactor[1],rfactor[2]] fCedge_ratio= 1.0/rfactor[0] */ { hypre_SetIndex3(stride, 1, rfactor[1], rfactor[2]); fCedge_ratio= 1.0/rfactor[0]; /* boxoffset shrink in the j & k directions */ hypre_SetIndex3(boxoffset[0], 0, rfactor[1]-1, 0); hypre_SetIndex3(boxoffset[1], 0, 0, rfactor[2]-1); hypre_SetIndex3(suboffset[0], 0, 1, 0); hypre_SetIndex3(suboffset[1], 0, 0, 1); /* extend loop_size by one in the stride direction */ hypre_SetIndex3(hi_index, 0, 1, 1); break; } case 6: /* 3-d: y_edge, stride=[rfactor[0],1,rfactor[2]] fCedge_ratio= 1.0/rfactor[1] */ { hypre_SetIndex3(stride, rfactor[0], 1, rfactor[2]); fCedge_ratio= 1.0/rfactor[1]; /* boxoffset shrink in the i & k directions */ hypre_SetIndex3(boxoffset[0], rfactor[0]-1, 0, 0); hypre_SetIndex3(boxoffset[1], 0, 0, rfactor[2]-1); hypre_SetIndex3(suboffset[0], 1, 0, 0); hypre_SetIndex3(suboffset[1], 0, 0, 1); /* extend loop_size by one in the stride direction */ hypre_SetIndex3(hi_index, 1, 0, 1); break; } case 7: /* 3-d: z_edge, stride=[rfactor[0],rfactor[1],1] fCedge_ratio= 1.0/rfactor[2] */ { hypre_SetIndex3(stride, rfactor[0], rfactor[1], 1); fCedge_ratio= 1.0/rfactor[2]; /* boxoffset shrink in the i & j directions */ hypre_SetIndex3(boxoffset[0], rfactor[0]-1, 0, 0); hypre_SetIndex3(boxoffset[1], 0, rfactor[1]-1, 0); hypre_SetIndex3(suboffset[0], 1, 0, 0); hypre_SetIndex3(suboffset[1], 0, 1, 0); /* extend loop_size by one in the stride direction */ hypre_SetIndex3(hi_index, 1, 1, 0); break; } } hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); /* vboxes inside the i'th cellbox */ num_vboxes= n_CtoVbox[part][i]; vboxnums = CtoVboxnums[part][i]; hypre_CopyIndex(Edge_cstarts[part][i], cstart); /* adjust the contracted cellbox to the variable box. Note that some of the fboxes may be skipped because they vanish. */ hypre_CopyBox(cellbox, &copy_box); for (j= 0; j< n_boxoffsets; j++) { hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), suboffset[j], 3, findex); row_in= falseV; for (p= 0; p< num_vboxes[t]; p++) { vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]); if (hypre_IndexInBox(findex, vbox)) { hypre_CopyIndex(findex, hypre_BoxIMin(&copy_box)); row_in= trueV; break; } } /* not in any vbox */ if (!row_in) { hypre_AddIndexes(hypre_BoxIMin(&copy_box), boxoffset[j], 3, hypre_BoxIMin(&copy_box)); /* also modify cstart */ hypre_AddIndexes(boxoffset[j], one_index, 3, boxoffset[j]); hypre_StructMapFineToCoarse(boxoffset[j], zero_index, rfactor, boxoffset[j]); hypre_AddIndexes(cstart, boxoffset[j], 3, cstart); } } hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, stride, loop_size); /* extend the loop_size so that upper boundary of the box are reached. */ hypre_AddIndexes(loop_size, hi_index, 3, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); /* note that the correct cbox corresponding to this non-vanishing fbox is used. */ hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, stride, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,j,entry,p,cindex,l,k) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); for (j= 0; j< 3; j++) { findex[j]*= stride[j]; } /* make sure that we do have the fine row corresponding to findex */ hypre_AddIndexes(findex, start, 3, findex); hypre_SStructGridFindBoxManEntry(fgrid_edge, part, findex, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, findex, &p, matrix_type); /* still row p may be outside the processor- check to make sure in */ if ( (p <= upper_ranks[part][t]) && (p >= lower_ranks[part][t]) ) { hypre_SubtractIndexes(findex, start, 3, findex); /* determine where the edge lies- coarsening required. */ hypre_StructMapFineToCoarse(findex, zero_index, rfactor, cindex); hypre_AddIndexes(cindex, cstart, 3, cindex); /* lies on coarse Edge. Coarse Edge connection: var_index= cindex - subtract_index.*/ hypre_SubtractIndexes(cindex, varoffsets[var], 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); jedge_Edge[k]= l; vals_edgeEdge[k]= fCedge_ratio; k++; } /* if ((p <= upper_ranks[part][t]) && (p >= lower_ranks[part][t])) */ } hypre_BoxLoop1End(m); } /* hypre_ForBoxI */ hypre_TFree(boxoffset); hypre_TFree(suboffset); } /* for (t= 0; t< nvars; t++) */ } /* for (part= 0; part< nparts; part++) */ /* generate the face interpolation weights/info. Only for 3-d */ if (ndim == 3) { for (part= 0; part< nparts; part++) { p_fgrid= hypre_SStructGridPGrid(fgrid_edge, part); /* edge grid */ Edge_nvars= hypre_SStructPGridNVars(p_fgrid); Edge_vartypes= hypre_SStructPGridVarTypes(p_fgrid); p_cgrid= hypre_SStructGridPGrid(cgrid_edge, part); /* Edge grid */ /* note that fboxes are the contracted CELL boxes. Will get the correct variable grid extents. */ fboxes= contract_fedgeBoxes[part]; /* may need to shrink a given box in some boxoffset directions */ boxoffset= hypre_TAlloc(hypre_Index, ndim); for (t= 0; t< ndim; t++) { hypre_ClearIndex(boxoffset[t]); hypre_IndexD(boxoffset[t], t)= rfactor[t]-1; } for (t= 0; t< Edge_nvars; t++) { var = Edge_vartypes[t]; var_fgrid= hypre_SStructPGridVTSGrid(p_fgrid, var); box_array= hypre_StructGridBoxes(var_fgrid); switch(var) { case 5: { /* 3-d x_edge, can be Y or Z_Face */ hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); /* vboxes inside the i'th cellbox */ num_vboxes= n_CtoVbox[part][i]; vboxnums = CtoVboxnums[part][i]; hypre_CopyIndex(Edge_cstarts[part][i], cstart); /* adjust the project cellbox to the variable box */ hypre_CopyBox(cellbox, &copy_box); /****************************************************** * Check the location of the shifted lower box index: * x_edge-> Z_Face & Y_Face: * Z_Face- contract in the z direction only if the * processor interface is in the z direction * Y_Face- contract in the y direction if the processor * interface is in the y direction. ******************************************************/ hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), kshift, 3, findex); /* loop over all the vboxes to see if findex is inside */ row_in= falseV; for (p= 0; p< num_vboxes[t]; p++) { vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]); if (hypre_IndexInBox(findex, vbox)) { hypre_CopyIndex(findex, hypre_BoxIMin(&copy_box)); row_in= trueV; break; } } /* not in any vbox */ if (!row_in) { hypre_AddIndexes(hypre_BoxIMin(&copy_box), boxoffset[2], 3, hypre_BoxIMin(&copy_box)); /* modify cstart */ hypre_AddIndexes(cstart, kshift, 3, cstart); } hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), jshift, 3, hypre_BoxIMin(&copy_box)); hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); /* increase the loop_size by one in the Z plane direction */ loop_size[2]++; hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,cindex,l,var_index,entry,rank2,rank,p,n,k) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); /* because of rfactor striding, cindex= findex. But adjust by cstart to get actually coarse edge. */ hypre_CopyIndex(findex, cindex); hypre_AddIndexes(cindex, cstart, 3, cindex); /* Will need the actual fine indices. */ for (l= 0; l< ndim; l++) { findex[l]*= rfactor[l]; } hypre_AddIndexes(findex, start, 3, findex); /****************************************************** * ranks for coarse edges. Fine edges of agglomerate * connect to these coarse edges. * Z_Face (i,j,k-1). Two like-var coarse Edge connections. * x_Edge (i,j,k-1), (i,j-1,k-1) ******************************************************/ hypre_SubtractIndexes(cindex, kshift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank2, matrix_type); hypre_SubtractIndexes(var_index, jshift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank, matrix_type); /* loop over the strips of x_edges making up the Z_Face */ for (p= 0; p< rfactor[0]; p++) { hypre_CopyIndex(findex, var_index); var_index[0]+= p; for (n= 1; n< rfactor[1]; n++) { var_index[1]++; hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); /* still row l may be outside the processor */ if ((l <= upper_ranks[part][t]) && (l >= lower_ranks[part][t])) { jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) n/(rfactor[1]*rfactor[0]); k++; jedge_Edge[k]= rank2; vals_edgeEdge[k]= 1.0/rfactor[0]*(1.0-(HYPRE_Real) n/rfactor[1]); k++; } } /* for (n= 1; n< rfactor[1]; n++) */ } /* for (p= 0; p< rfactor[0]; p++) */ } hypre_BoxLoop1End(m); /* Y plane direction */ hypre_CopyIndex(Edge_cstarts[part][i], cstart); hypre_CopyBox(cellbox, &copy_box); hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), jshift, 3, findex); /* loop over all the vboxes to see if findex is inside */ row_in= falseV; for (p= 0; p< num_vboxes[t]; p++) { vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]); if (hypre_IndexInBox(findex, vbox)) { hypre_CopyIndex(findex, hypre_BoxIMin(&copy_box)); row_in= trueV; break; } } /* not in any vbox */ if (!row_in) { hypre_AddIndexes(hypre_BoxIMin(&copy_box), boxoffset[1], 3, hypre_BoxIMin(&copy_box)); /* modify cstart */ hypre_AddIndexes(cstart, jshift, 3, cstart); } hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), kshift, 3, hypre_BoxIMin(&copy_box)); hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); loop_size[1]++; hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,cindex,l,var_index,entry,rank2,rank,p,n,k) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); /* because of rfactor striding, cindex= findex. But adjust by cstart to get actually coarse edge. */ hypre_CopyIndex(findex, cindex); hypre_AddIndexes(cindex, cstart, 3, cindex); /* Will need the actual fine indices. */ for (l= 0; l< ndim; l++) { findex[l]*= rfactor[l]; } hypre_AddIndexes(findex, start, 3, findex); /****************************************************** * Y_Face. Two coarse Edge connections. * x_Edge (i,j-1,k), (i,j-1,k-1) ******************************************************/ hypre_SubtractIndexes(cindex, jshift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank2, matrix_type); hypre_SubtractIndexes(var_index, kshift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank, matrix_type); /* loop over the strips of x_edges making up the Y_Face */ for (p= 0; p< rfactor[0]; p++) { hypre_CopyIndex(findex, var_index); var_index[0]+= p; for (n= 1; n< rfactor[2]; n++) { var_index[2]++; hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); if ((l <= upper_ranks[part][t]) && (l >= lower_ranks[part][t])) { jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) n/(rfactor[0]*rfactor[2]); k++; jedge_Edge[k]= rank2; vals_edgeEdge[k]= 1.0/rfactor[0]*(1.0 - (HYPRE_Real) n/rfactor[2]); k++; } } /* for (n= 1; n< rfactor[2]; n++) */ } /* for (p= 0; p< rfactor[0]; p++) */ } hypre_BoxLoop1End(m); } /* hypre_ForBoxI(i, fboxes) */ break; } case 6: { /* 3-d y_edge, can be X or Z_Face */ hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); /* vboxes inside the i'th cellbox */ num_vboxes= n_CtoVbox[part][i]; vboxnums = CtoVboxnums[part][i]; hypre_CopyIndex(Edge_cstarts[part][i], cstart); /* adjust the project cellbox to the variable box */ hypre_CopyBox(cellbox, &copy_box); /****************************************************** * Check the location of the shifted lower box index: * y_edge-> X_Face & Z_Face: * Z_Face- contract in the z direction only if the * processor interface is in the z direction * X_Face- contract in the x direction if the processor * interface is in the x direction. ******************************************************/ /* Z_Face */ hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), kshift, 3, findex); /* loop over all the vboxes to see if findex is inside */ row_in= falseV; for (p= 0; p< num_vboxes[t]; p++) { vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]); if (hypre_IndexInBox(findex, vbox)) { hypre_CopyIndex(findex, hypre_BoxIMin(&copy_box)); row_in= trueV; break; } } /* not in any vbox */ if (!row_in) { hypre_AddIndexes(hypre_BoxIMin(&copy_box), boxoffset[2], 3, hypre_BoxIMin(&copy_box)); /* modify cstart */ hypre_AddIndexes(cstart, kshift, 3, cstart); } hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), ishift, 3, hypre_BoxIMin(&copy_box)); hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); /* increase the loop_size by one in the Z plane direction */ loop_size[2]++; hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,cindex,l,var_index,entry,rank2,rank,p,n,k) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); /* because of rfactor striding, cindex= findex. But adjust by cstart to get actually coarse edge. */ hypre_CopyIndex(findex, cindex); hypre_AddIndexes(cindex, cstart, 3, cindex); /* Will need the actual fine indices. */ for (l= 0; l< ndim; l++) { findex[l]*= rfactor[l]; } hypre_AddIndexes(findex, start, 3, findex); /****************************************************** * ranks for coarse edges. Fine edges of agglomerate * connect to these coarse edges. * Z_Face (i,j,k-1). Two like-var coarse Edge connections. * y_Edge (i,j,k-1), (i-1,j,k-1) ******************************************************/ hypre_SubtractIndexes(cindex, kshift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank2, matrix_type); hypre_SubtractIndexes(var_index, ishift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank, matrix_type); /* loop over the strips of y_edges making up the Z_Face */ for (p= 0; p< rfactor[1]; p++) { hypre_CopyIndex(findex, var_index); var_index[1]+= p; for (n= 1; n< rfactor[0]; n++) { var_index[0]++; hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); if ((l <= upper_ranks[part][t]) && (l >= lower_ranks[part][t])) { jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) n/(rfactor[0]*rfactor[1]); k++; jedge_Edge[k]= rank2; vals_edgeEdge[k]= 1.0/rfactor[1]*(1.0 - (HYPRE_Real) n/rfactor[0]); k++; } } /* for (n= 1; n< rfactor[0]; n++) */ } /* for (p= 0; p< rfactor[1]; p++) */ } hypre_BoxLoop1End(m); /* X_Face */ hypre_CopyBox(cellbox, &copy_box); hypre_CopyIndex(Edge_cstarts[part][i], cstart); hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), ishift, 3, findex); /* loop over all the vboxes to see if findex is inside */ row_in= falseV; for (p= 0; p< num_vboxes[t]; p++) { vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]); if (hypre_IndexInBox(findex, vbox)) { hypre_CopyIndex(findex, hypre_BoxIMin(&copy_box)); row_in= trueV; break; } } /* not in any vbox */ if (!row_in) { hypre_AddIndexes(hypre_BoxIMin(&copy_box), boxoffset[0], 3, hypre_BoxIMin(&copy_box)); /* modify cstart */ hypre_AddIndexes(cstart, ishift, 3, cstart); } hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), kshift, 3, hypre_BoxIMin(&copy_box)); hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); loop_size[0]++; hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,cindex,l,var_index,entry,rank2,rank,p,n,k) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); /* because of rfactor striding, cindex= findex. But adjust by cstart to get actually coarse edge. */ hypre_CopyIndex(findex, cindex); hypre_AddIndexes(cindex, cstart, 3, cindex); /* Will need the actual fine indices. */ for (l= 0; l< ndim; l++) { findex[l]*= rfactor[l]; } hypre_AddIndexes(findex, start, 3, findex); /****************************************************** * X_Face. Two coarse Edge connections. * y_Edge (i-1,j,k), (i-1,j,k-1) ******************************************************/ hypre_SubtractIndexes(cindex, ishift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank2, matrix_type); hypre_SubtractIndexes(var_index, kshift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank, matrix_type); /* loop over the strips of y_edges making up the X_Face */ for (p= 0; p< rfactor[1]; p++) { hypre_CopyIndex(findex, var_index); var_index[1]+= p; for (n= 1; n< rfactor[2]; n++) { var_index[2]++; hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); if ((l <= upper_ranks[part][t]) && (l >= lower_ranks[part][t])) { jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) n/(rfactor[1]*rfactor[2]); k++; jedge_Edge[k]= rank2; vals_edgeEdge[k]= 1.0/rfactor[1]*(1.0 - (HYPRE_Real) n/rfactor[2]); k++; } } /* for (n= 1; n< rfactor[2]; n++) */ } /* for (p= 0; p< rfactor[1]; p++) */ } hypre_BoxLoop1End(m); } /* hypre_ForBoxI(i, fboxes) */ break; } case 7: { /* 3-d z_edge, can be X or Y_Face */ hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); /* vboxes inside the i'th cellbox */ num_vboxes= n_CtoVbox[part][i]; vboxnums = CtoVboxnums[part][i]; hypre_CopyIndex(Edge_cstarts[part][i], cstart); /* adjust the project cellbox to the variable box */ hypre_CopyBox(cellbox, &copy_box); /****************************************************** * Check the location of the shifted lower box index: * z_edge-> X_Face & Y_Face: * X_Face- contract in the x direction if the processor * interface is in the x direction. * Y_Face- contract in the y direction if the processor * interface is in the y direction. ******************************************************/ hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), ishift, 3, findex); /* loop over all the vboxes to see if findex is inside */ row_in= falseV; for (p= 0; p< num_vboxes[t]; p++) { vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]); if (hypre_IndexInBox(findex, vbox)) { hypre_CopyIndex(findex, hypre_BoxIMin(&copy_box)); row_in= trueV; break; } } /* not in any vbox */ if (!row_in) { hypre_AddIndexes(hypre_BoxIMin(&copy_box), boxoffset[0], 3, hypre_BoxIMin(&copy_box)); /* modify cstart */ hypre_AddIndexes(cstart, ishift, 3, cstart); } hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), jshift, 3, hypre_BoxIMin(&copy_box)); hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); /* increase the loop_size by one in the X plane direction */ loop_size[0]++; hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,cindex,l,var_index,entry,rank2,rank,p,n,k) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); /* because of rfactor striding, cindex= findex. But adjust by cstart to get actually coarse edge. */ hypre_CopyIndex(findex, cindex); hypre_AddIndexes(cindex, cstart, 3, cindex); /* Will need the actual fine indices. */ for (l= 0; l< ndim; l++) { findex[l]*= rfactor[l]; } hypre_AddIndexes(findex, start, 3, findex); /****************************************************** * ranks for coarse edges. Fine edges of agglomerate * connect to these coarse edges. * X_Face. Two coarse Edge connections. * z_Edge (i-1,j,k), (i-1,j-1,k) ******************************************************/ hypre_SubtractIndexes(cindex, ishift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank2, matrix_type); hypre_SubtractIndexes(var_index, jshift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank, matrix_type); /* loop over the strips of z_edges making up the X_Face */ for (p= 0; p< rfactor[2]; p++) { hypre_CopyIndex(findex, var_index); var_index[2]+= p; for (n= 1; n< rfactor[1]; n++) { var_index[1]++; hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); if ((l <= upper_ranks[part][t]) && (l >= lower_ranks[part][t])) { jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) n/(rfactor[0]*rfactor[2]); k++; jedge_Edge[k]= rank2; vals_edgeEdge[k]= 1.0/rfactor[2]*(1.0-(HYPRE_Real) n/rfactor[0]); k++; } } /* for (n= 1; n< rfactor[1]; n++) */ } /* for (p= 0; p< rfactor[2]; p++) */ } hypre_BoxLoop1End(m); /* Y plane */ hypre_CopyBox(cellbox, &copy_box); hypre_CopyIndex(Edge_cstarts[part][i], cstart); hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), jshift, 3, findex); /* loop over all the vboxes to see if findex is inside */ row_in= falseV; for (p= 0; p< num_vboxes[t]; p++) { vbox= hypre_BoxArrayBox(box_array, vboxnums[t][p]); if (hypre_IndexInBox(findex, vbox)) { hypre_CopyIndex(findex, hypre_BoxIMin(&copy_box)); row_in= trueV; break; } } /* not in any vbox */ if (!row_in) { hypre_AddIndexes(hypre_BoxIMin(&copy_box), boxoffset[1], 3, hypre_BoxIMin(&copy_box)); /* modify cstart */ hypre_AddIndexes(cstart, jshift, 3, cstart); } hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), ishift, 3, hypre_BoxIMin(&copy_box)); hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); loop_size[1]++; hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, m); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,m,lindex,findex,cindex,l,var_index,entry,rank2,rank,p,n,k) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(m) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); /* because of rfactor striding, cindex= findex. But adjust by cstart to get actually coarse edge. */ hypre_CopyIndex(findex, cindex); hypre_AddIndexes(cindex, cstart, 3, cindex); /* Will need the actual fine indices. */ for (l= 0; l< ndim; l++) { findex[l]*= rfactor[l]; } hypre_AddIndexes(findex, start, 3, findex); /********************************************************** * Y_Face (i,j-1,k). Two like-var coarse Edge connections. * z_Edge (i,j-1,k), (i-1,j-1,k) **********************************************************/ hypre_SubtractIndexes(cindex, jshift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank2, matrix_type); hypre_SubtractIndexes(var_index, ishift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank, matrix_type); /* loop over the strips of y_edges making up the Y_Face */ for (p= 0; p< rfactor[2]; p++) { hypre_CopyIndex(findex, var_index); var_index[2]+= p; for (n= 1; n< rfactor[0]; n++) { var_index[0]++; hypre_SStructGridFindBoxManEntry(fgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &l, matrix_type); if ((l <= upper_ranks[part][t]) && (l >= lower_ranks[part][t])) { jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) n/(rfactor[0]*rfactor[2]); k++; jedge_Edge[k]= rank2; vals_edgeEdge[k]= 1.0/rfactor[2]*(1.0-(HYPRE_Real) n/rfactor[0]); k++; } } /* for (n= 1; n< rfactor[0]; n++) */ } /* for (p= 0; p< rfactor[2]; p++) */ } hypre_BoxLoop1End(m); } /* hypre_ForBoxI(i, fboxes) */ break; } } /* switch */ } /* for (t= 0; t< Edge_nvars; t++) */ hypre_TFree(boxoffset); } /* for (part= 0; part< nparts; part++) */ } /* if (ndim == 3) */ /* generate the interior interpolation weights/info */ for (part= 0; part< nparts; part++) { p_fgrid= hypre_SStructGridPGrid(fgrid_edge, part); /* edge grid */ Edge_nvars= hypre_SStructPGridNVars(p_fgrid); Edge_vartypes= hypre_SStructPGridVarTypes(p_fgrid); p_cgrid= hypre_SStructGridPGrid(cgrid_edge, part); /* Edge grid */ /* note that fboxes are the contracted CELL boxes. Will get the correct variable grid extents. */ fboxes= contract_fedgeBoxes[part]; for (t= 0; t< Edge_nvars; t++) { var = Edge_vartypes[t]; var_fgrid= hypre_SStructPGridVTSGrid(p_fgrid, var); box_array= hypre_StructGridBoxes(var_fgrid); switch(var) { case 2: { /* 2-d x_face = x_edge, can be interior or on X_Edge */ hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); vbox = hypre_BoxArrayBox(box_array, i); hypre_CopyIndex(Edge_cstarts[part][i], cstart); /* adjust the project cellbox to the variable box */ hypre_CopyBox(cellbox, &copy_box); hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), varoffsets[var], 3, hypre_BoxIMin(&copy_box)); /* hypre_IntersectBoxes(&copy_box, vbox, &copy_box);*/ hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, r); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,r,lindex,findex,p,n,cindex,entry,rank,var_index,k) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(r) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); /***************************************************** * Where the fine edge lies wrt the coarse edge: * Since we stride by rfactor, lindex is * the coarse index. No coarsening needed, i.e., * cindex= findex. * * Loop over the interior fine edges in an agglomerate. *****************************************************/ for (p= 1; p< rfactor[0]; p++) { for (n= 0; n< rfactor[1]; n++) { hypre_CopyIndex(findex, cindex); hypre_AddIndexes(cindex, cstart, 3, cindex); /*interior of Face. Extract the two coarse Edge (x_Edge ijk & (i-1,j,k)*/ hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank, matrix_type); jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) p/(rfactor[0]*rfactor[1]); k++; hypre_SubtractIndexes(cindex, ishift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank, matrix_type); jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) (rfactor[0]-p)/(rfactor[0]*rfactor[1]); k++; } /* for (n= 0; n< rfactor[1]; n++) */ } /* for (p= 1; p< rfactor[0]; p++) */ } hypre_BoxLoop1End(r); } /* hypre_ForBoxI(i, fboxes) */ break; } case 3: { /* 2-d y_face = y_edge, can be interior or on Y_Edge */ hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); vbox = hypre_BoxArrayBox(box_array, i); hypre_CopyIndex(Edge_cstarts[part][i], cstart); /* adjust the project cellbox to the variable box */ hypre_CopyBox(cellbox, &copy_box); hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), varoffsets[var], 3, hypre_BoxIMin(&copy_box)); /* hypre_IntersectBoxes(&copy_box, vbox, &copy_box);*/ hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, r); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,r,lindex,findex,p,n,cindex,entry,rank,var_index,k) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(r) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); /***************************************************** * Where the fine edge lies wrt the coarse edge: * Since we stride by rfactor, lindex is * the coarse index. No coarsening needed, i.e., * cindex= findex. * * Loop over the interior fine edges in an agglomerate. *****************************************************/ for (p= 1; p< rfactor[1]; p++) { for (n= 0; n< rfactor[0]; n++) { hypre_CopyIndex(findex, cindex); hypre_AddIndexes(cindex, cstart, 3, cindex); /*lies interior of Face. Extract the two coarse Edge (y_Edge ijk & (i,j-1,k). */ hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank, matrix_type); jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) p/(rfactor[0]*rfactor[1]); k++; hypre_SubtractIndexes(cindex, jshift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank, matrix_type); jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) (rfactor[1]-p)/(rfactor[0]*rfactor[1]); k++; } /* for (n= 0; n< rfactor[0]; n++) */ } /* for (p= 1; p< rfactor[1]; p++) */ } hypre_BoxLoop1End(r); } /* hypre_ForBoxI(i, fboxes) */ break; } case 5: { /* 3-d x_edge, must be interior */ hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); vbox = hypre_BoxArrayBox(box_array, i); hypre_CopyIndex(Edge_cstarts[part][i], cstart); /* adjust the project cellbox to the variable box */ hypre_CopyBox(cellbox, &copy_box); hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), varoffsets[var], 3, hypre_BoxIMin(&copy_box)); /*hypre_IntersectBoxes(&copy_box, vbox, &copy_box);*/ hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, r); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,r,lindex,findex,p,n,m,cindex,entry,rank,var_index,k) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(r) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); /***************************************************** * Where the fine edge lies wrt the coarse edge: * Since we stride by rfactor, lindex is * the coarse index. No coarsening needed, i.e., * cindex= findex. * * Loop over the interior fine edges in an agglomerate. *****************************************************/ for (p= 1; p< rfactor[2]; p++) { for (n= 1; n< rfactor[1]; n++) { for (m= 0; m< rfactor[0]; m++) { hypre_CopyIndex(findex, cindex); hypre_AddIndexes(cindex, cstart, 3, cindex); /*********************************************** * Interior. * x_Edge ijk, (i,j-1,k), (i,j-1,k-1), (i,j,k-1) ***********************************************/ hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank, matrix_type); jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) p*n/ (rfactor[0]*rfactor[1]*rfactor[2]); k++; hypre_SubtractIndexes(cindex, jshift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank, matrix_type); jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) p*(rfactor[1]-n)/ (rfactor[0]*rfactor[1]*rfactor[2]); k++; hypre_SubtractIndexes(var_index, kshift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank, matrix_type); jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) (rfactor[1]-n)*(rfactor[2]-p)/ (rfactor[0]*rfactor[1]*rfactor[2]); k++; hypre_AddIndexes(var_index, jshift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank, matrix_type); jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) n*(rfactor[2]-p)/ (rfactor[0]*rfactor[1]*rfactor[2]); k++; } /* for (m= 0; m< rfactor[0]; m++) */ } /* for (n= 1; n< rfactor[1]; n++) */ } /* for (p= 1; p< rfactor[2]; p++) */ } hypre_BoxLoop1End(r); } /* hypre_ForBoxI(i, fboxes) */ break; } case 6: { /* 3-d y_edge, must be interior */ hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); vbox = hypre_BoxArrayBox(box_array, i); hypre_CopyIndex(Edge_cstarts[part][i], cstart); /* adjust the project cellbox to the variable box */ hypre_CopyBox(cellbox, &copy_box); hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), varoffsets[var], 3, hypre_BoxIMin(&copy_box)); /*hypre_IntersectBoxes(&copy_box, vbox, &copy_box);*/ hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, r); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,r,lindex,findex,p,n,m,cindex,entry,rank,var_index,k) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(r) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); /***************************************************** * Where the fine edge lies wrt the coarse edge: * Since we stride by rfactor, lindex is * the coarse index. No coarsening needed, i.e., * cindex= findex. * * Loop over the interior fine edges in an agglomerate. *****************************************************/ for (p= 1; p< rfactor[2]; p++) { for (n= 1; n< rfactor[0]; n++) { for (m= 0; m< rfactor[1]; m++) { hypre_CopyIndex(findex, cindex); hypre_AddIndexes(cindex, cstart, 3, cindex); /*********************************************** * Interior. * y_Edge ijk, (i-1,j,k), (i-1,j,k-1), (i,j,k-1) ***********************************************/ hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank, matrix_type); jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) p*n/ (rfactor[0]*rfactor[1]*rfactor[2]); k++; hypre_SubtractIndexes(cindex, ishift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank, matrix_type); jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) p*(rfactor[0]-n)/ (rfactor[0]*rfactor[1]*rfactor[2]); k++; hypre_SubtractIndexes(var_index, kshift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank, matrix_type); jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) (rfactor[0]-n)*(rfactor[2]-p)/ (rfactor[0]*rfactor[1]*rfactor[2]); k++; hypre_AddIndexes(var_index, ishift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank, matrix_type); jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) n*(rfactor[2]-p)/ (rfactor[0]*rfactor[1]*rfactor[2]); k++; } /* for (m= 0; m< rfactor[1]; m++) */ } /* for (n= 1; n< rfactor[0]; n++) */ } /* for (p= 1; p< rfactor[2]; p++) */ } hypre_BoxLoop1End(r); } /* hypre_ForBoxI(i, fboxes) */ break; } case 7: { /* 3-d z_edge, only the interior */ hypre_ForBoxI(i, fboxes) { cellbox= hypre_BoxArrayBox(fboxes, i); vbox = hypre_BoxArrayBox(box_array, i); hypre_CopyIndex(Edge_cstarts[part][i], cstart); /* adjust the project cellbox to the variable box */ hypre_CopyBox(cellbox, &copy_box); hypre_SubtractIndexes(hypre_BoxIMin(&copy_box), varoffsets[var], 3, hypre_BoxIMin(&copy_box)); /*hypre_IntersectBoxes(&copy_box, vbox, &copy_box);*/ hypre_BoxGetSize(&copy_box, loop_size); hypre_StructMapFineToCoarse(loop_size, zero_index, rfactor, loop_size); hypre_CopyIndex(hypre_BoxIMin(&copy_box), start); hypre_BoxLoop1Begin(ndim, loop_size, &copy_box, start, rfactor, r); #if 0 #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(HYPRE_BOX_PRIVATE,r,lindex,findex,p,n,m,cindex,entry,rank,var_index,k) HYPRE_SMP_SCHEDULE #endif #else hypre_BoxLoopSetOneBlock(); #endif hypre_BoxLoop1For(r) { hypre_BoxLoopGetIndex(lindex); hypre_SetIndex3(findex, lindex[0], lindex[1], lindex[2]); /***************************************************** * Where the fine edge lies wrt the coarse edge: * Since we stride by rfactor, lindex is * the coarse index. No coarsening needed, i.e., * cindex= findex. * * Loop over the interior fine edges in an agglomerate. *****************************************************/ for (p= 1; p< rfactor[1]; p++) { for (n= 1; n< rfactor[0]; n++) { for (m= 0; m< rfactor[2]; m++) { hypre_CopyIndex(findex, cindex); hypre_AddIndexes(cindex, cstart, 3, cindex); /************************************************* * Interior. * z_Edge ijk, (i-1,j,k), (i-1,j-1,k), (i,j-1,k) *************************************************/ hypre_SStructGridFindBoxManEntry(cgrid_edge, part, cindex, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, cindex, &rank, matrix_type); jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) n*p/ (rfactor[0]*rfactor[1]*rfactor[2]); k++; hypre_SubtractIndexes(cindex, ishift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank, matrix_type); jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) p*(rfactor[0]-n)/ (rfactor[0]*rfactor[1]*rfactor[2]); k++; hypre_SubtractIndexes(var_index, jshift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank, matrix_type); jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) (rfactor[1]-p)*(rfactor[0]-n)/ (rfactor[0]*rfactor[1]*rfactor[2]); k++; hypre_AddIndexes(var_index, ishift, 3, var_index); hypre_SStructGridFindBoxManEntry(cgrid_edge, part, var_index, t, &entry); hypre_SStructBoxManEntryGetGlobalRank(entry, var_index, &rank, matrix_type); jedge_Edge[k]= rank; vals_edgeEdge[k]= (HYPRE_Real) n*(rfactor[1]-p)/ (rfactor[0]*rfactor[1]*rfactor[2]); k++; } /* for (m= 0; m< rfactor[2]; m++) */ } /* for (n= 1; n< rfactor[0]; n++) */ } /* for (p= 1; p< rfactor[1]; p++) */ } hypre_BoxLoop1End(r); } /* hypre_ForBoxI(i, fboxes) */ break; } } /* switch */ } /* for (t= 0; t< Edge_nvars; t++) */ } /* for (part= 0; part< nparts; part++) */ HYPRE_IJMatrixSetValues(edge_Edge, size, ncols_edgeEdge, (const HYPRE_Int*) iedgeEdge, (const HYPRE_Int*) jedge_Edge, (const HYPRE_Real*) vals_edgeEdge); HYPRE_IJMatrixAssemble((HYPRE_IJMatrix) edge_Edge); hypre_TFree(ncols_edgeEdge); hypre_TFree(iedgeEdge); hypre_TFree(jedge_Edge); hypre_TFree(vals_edgeEdge); hypre_TFree(varoffsets); hypre_TFree(vartype_map); /* n_CtoVbox[part][cellboxi][var] & CtoVboxnums[part][cellboxi][var][nvboxes] */ for (part= 0; part< nparts; part++) { p_fgrid= hypre_SStructGridPGrid(fgrid_edge, part); Edge_nvars= hypre_SStructPGridNVars(p_fgrid); var_fgrid= hypre_SStructPGridCellSGrid(p_fgrid); fboxes = hypre_StructGridBoxes(var_fgrid); hypre_ForBoxI(j, fboxes) { for (t= 0; t< Edge_nvars; t++) { hypre_TFree(CtoVboxnums[part][j][t]); } hypre_TFree(n_CtoVbox[part][j]); hypre_TFree(CtoVboxnums[part][j]); } hypre_TFree(n_CtoVbox[part]); hypre_TFree(CtoVboxnums[part]); } hypre_TFree(n_CtoVbox); hypre_TFree(CtoVboxnums); for (part= 0; part< nparts; part++) { hypre_BoxArrayDestroy(contract_fedgeBoxes[part]); hypre_TFree(Edge_cstarts[part]); hypre_TFree(upper_shifts[part]); hypre_TFree(lower_shifts[part]); hypre_TFree(cfbox_mapping[part]); hypre_TFree(fcbox_mapping[part]); hypre_TFree(upper_ranks[part]); hypre_TFree(lower_ranks[part]); } hypre_TFree(contract_fedgeBoxes); hypre_TFree(Edge_cstarts); hypre_TFree(upper_shifts); hypre_TFree(lower_shifts); hypre_TFree(cfbox_mapping); hypre_TFree(fcbox_mapping); hypre_TFree(upper_ranks); hypre_TFree(lower_ranks); return (hypre_IJMatrix *) edge_Edge; }
DRB097-target-teams-distribute-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: [email protected], [email protected], [email protected], [email protected], [email protected]) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <stdio.h> /* use of omp target + teams + distribute + parallel for */ int main(int argc, char * argv[]) { int i, i2; int len = 2560; double sum = 0.0, sum2 = 0.0; double a[len], b[len]; /* Initialize with some values */ int _ret_val_0; #pragma cetus private(i) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<len; i ++ ) { a[i]=(((double)i)/2.0); b[i]=(((double)i)/3.0); } #pragma cetus private(i, i2) #pragma loop name main#1 #pragma cetus reduction(+: sum) #pragma cetus parallel #pragma omp parallel for private(i, i2) reduction(+: sum) for (i2=0; i2<len; i2+=256) { #pragma cetus private(i) #pragma loop name main#1#0 #pragma cetus reduction(+: sum) #pragma cetus parallel #pragma omp parallel for private(i) reduction(+: sum) for (i=i2; i<(((i2+256)<len) ? (i2+256) : len); i ++ ) { sum+=(a[i]*b[i]); } } /* CPU reference computation */ #pragma cetus private(i) #pragma loop name main#2 #pragma cetus reduction(+: sum2) #pragma cetus parallel #pragma omp parallel for private(i) reduction(+: sum2) for (i=0; i<len; i ++ ) { sum2+=(a[i]*b[i]); } printf("sum=%lf sum2=%lf\n", sum, sum2); _ret_val_0=0; return _ret_val_0; }
GB_unop__one_int64_int64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__one_int64_int64 // op(A') function: GB_unop_tran__one_int64_int64 // C type: int64_t // A type: int64_t // cast: ; // unaryop: cij = 1 #define GB_ATYPE \ int64_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = 1 ; // casting #define GB_CAST(z, aij) \ ; ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ ; ; \ /* Cx [pC] = op (cast (aij)) */ \ ; ; \ Cx [pC] = 1 ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ONE || GxB_NO_INT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__one_int64_int64 ( int64_t *Cx, // Cx and Ax may be aliased const int64_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (int64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { ; ; ; ; Cx [p] = 1 ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; ; ; ; ; Cx [p] = 1 ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__one_int64_int64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
copy.c
/* * ======================================================================================= * * Author: Jan Eitzinger (je), [email protected] * Copyright (c) 2020 RRZE, University Erlangen-Nuremberg * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. * * ======================================================================================= */ #include <timing.h> double copy( double * restrict a, double * restrict b, int N ) { double S, E; S = getTimeStamp(); #pragma omp parallel for schedule(static) for (int i=0; i<N; i++) { a[i] = b[i]; } E = getTimeStamp(); return E-S; }
quantize.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % QQQ U U AAA N N TTTTT IIIII ZZZZZ EEEEE % % Q Q U U A A NN N T I ZZ E % % Q Q U U AAAAA N N N T I ZZZ EEEEE % % Q QQ U U A A N NN T I ZZ E % % QQQQ UUU A A N N T IIIII ZZZZZ EEEEE % % % % % % MagickCore Methods to Reduce the Number of Unique Colors in an Image % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Realism in computer graphics typically requires using 24 bits/pixel to % generate an image. Yet many graphic display devices do not contain the % amount of memory necessary to match the spatial and color resolution of % the human eye. The Quantize methods takes a 24 bit image and reduces % the number of colors so it can be displayed on raster device with less % bits per pixel. In most instances, the quantized image closely % resembles the original reference image. % % A reduction of colors in an image is also desirable for image % transmission and real-time animation. % % QuantizeImage() takes a standard RGB or monochrome images and quantizes % them down to some fixed number of colors. % % For purposes of color allocation, an image is a set of n pixels, where % each pixel is a point in RGB space. RGB space is a 3-dimensional % vector space, and each pixel, Pi, is defined by an ordered triple of % red, green, and blue coordinates, (Ri, Gi, Bi). % % Each primary color component (red, green, or blue) represents an % intensity which varies linearly from 0 to a maximum value, Cmax, which % corresponds to full saturation of that color. Color allocation is % defined over a domain consisting of the cube in RGB space with opposite % vertices at (0,0,0) and (Cmax, Cmax, Cmax). QUANTIZE requires Cmax = % 255. % % The algorithm maps this domain onto a tree in which each node % represents a cube within that domain. In the following discussion % these cubes are defined by the coordinate of two opposite vertices (vertex % nearest the origin in RGB space and the vertex farthest from the origin). % % The tree's root node represents the entire domain, (0,0,0) through % (Cmax,Cmax,Cmax). Each lower level in the tree is generated by % subdividing one node's cube into eight smaller cubes of equal size. % This corresponds to bisecting the parent cube with planes passing % through the midpoints of each edge. % % The basic algorithm operates in three phases: Classification, % Reduction, and Assignment. Classification builds a color description % tree for the image. Reduction collapses the tree until the number it % represents, at most, the number of colors desired in the output image. % Assignment defines the output image's color map and sets each pixel's % color by restorage_class in the reduced tree. Our goal is to minimize % the numerical discrepancies between the original colors and quantized % colors (quantization error). % % Classification begins by initializing a color description tree of % sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color description % tree in the storage_class phase for realistic values of Cmax. If % colors components in the input image are quantized to k-bit precision, % so that Cmax= 2k-1, the tree would need k levels below the root node to % allow representing each possible input color in a leaf. This becomes % prohibitive because the tree's total number of nodes is 1 + % sum(i=1, k, 8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing the pixel's color. It updates the following data for each % such node: % % n1: Number of pixels whose color is contained in the RGB cube which % this node represents; % % n2: Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb: Sums of the red, green, and blue component values for all % pixels not classified at a lower depth. The combination of these sums % and n2 will ultimately characterize the mean color of a set of % pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the % quantization error for a node. % % Reduction repeatedly prunes the tree until the number of nodes with n2 % > 0 is less than or equal to the maximum number of colors allowed in % the output image. On any given iteration over the tree, it selects % those nodes whose E count is minimal for pruning and merges their color % statistics upward. It uses a pruning threshold, Ep, to govern node % selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors within % the cubic volume which the node represents. This includes n1 - n2 % pixels whose colors should be defined by nodes at a lower level in the % tree. % % Assignment generates the output image from the pruned tree. The output % image consists of two parts: (1) A color map, which is an array of % color descriptions (RGB triples) for each color present in the output % image; (2) A pixel array, which represents each pixel as an index % into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % This method is based on a similar algorithm written by Paul Raveling. % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" /* Define declarations. */ #if !defined(__APPLE__) && !defined(TARGET_OS_IPHONE) #define CacheShift 2 #else #define CacheShift 3 #endif #define ErrorQueueLength 16 #define MaxNodes 266817 #define MaxTreeDepth 8 #define NodesInAList 1920 /* Typdef declarations. */ typedef struct _DoublePixelPacket { double red, green, blue, alpha; } DoublePixelPacket; typedef struct _NodeInfo { struct _NodeInfo *parent, *child[16]; MagickSizeType number_unique; DoublePixelPacket total_color; double quantize_error; size_t color_number, id, level; } NodeInfo; typedef struct _Nodes { NodeInfo *nodes; struct _Nodes *next; } Nodes; typedef struct _CubeInfo { NodeInfo *root; size_t colors, maximum_colors; ssize_t transparent_index; MagickSizeType transparent_pixels; DoublePixelPacket target; double distance, pruning_threshold, next_threshold; size_t nodes, free_nodes, color_number; NodeInfo *next_node; Nodes *node_queue; MemoryInfo *memory_info; ssize_t *cache; DoublePixelPacket error[ErrorQueueLength]; double weights[ErrorQueueLength]; QuantizeInfo *quantize_info; MagickBooleanType associate_alpha; ssize_t x, y; size_t depth; MagickOffsetType offset; MagickSizeType span; } CubeInfo; /* Method prototypes. */ static CubeInfo *GetCubeInfo(const QuantizeInfo *,const size_t,const size_t); static NodeInfo *GetNodeInfo(CubeInfo *,const size_t,const size_t,NodeInfo *); static MagickBooleanType AssignImageColors(Image *,CubeInfo *,ExceptionInfo *), ClassifyImageColors(CubeInfo *,const Image *,ExceptionInfo *), DitherImage(Image *,CubeInfo *,ExceptionInfo *), SetGrayscaleImage(Image *,ExceptionInfo *); static size_t DefineImageColormap(Image *,CubeInfo *,NodeInfo *); static void ClosestColor(const Image *,CubeInfo *,const NodeInfo *), DestroyCubeInfo(CubeInfo *), PruneLevel(CubeInfo *,const NodeInfo *), PruneToCubeDepth(CubeInfo *,const NodeInfo *), ReduceImageColors(const Image *,CubeInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireQuantizeInfo() allocates the QuantizeInfo structure. % % The format of the AcquireQuantizeInfo method is: % % QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport QuantizeInfo *AcquireQuantizeInfo(const ImageInfo *image_info) { QuantizeInfo *quantize_info; quantize_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*quantize_info)); if (quantize_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(quantize_info); if (image_info != (ImageInfo *) NULL) { const char *option; quantize_info->dither_method=image_info->dither == MagickFalse ? NoDitherMethod : RiemersmaDitherMethod; option=GetImageOption(image_info,"dither"); if (option != (const char *) NULL) quantize_info->dither_method=(DitherMethod) ParseCommandOption( MagickDitherOptions,MagickFalse,option); quantize_info->measure_error=image_info->verbose; } return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A s s i g n I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AssignImageColors() generates the output image from the pruned tree. The % output image consists of two parts: (1) A color map, which is an array % of color descriptions (RGB triples) for each color present in the % output image; (2) A pixel array, which represents each pixel as an % index into the color map array. % % First, the assignment phase makes one pass over the pruned color % description tree to establish the image's color map. For each node % with n2 > 0, it divides Sr, Sg, and Sb by n2 . This produces the mean % color of all pixels that classify no lower than this node. Each of % these colors becomes an entry in the color map. % % Finally, the assignment phase reclassifies each pixel in the pruned % tree to identify the deepest node containing the pixel's color. The % pixel's value in the pixel array becomes the index of this node's mean % color in the color map. % % The format of the AssignImageColors() method is: % % MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static inline void AssociateAlphaPixel(const Image *image, const CubeInfo *cube_info,const Quantum *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (GetPixelAlpha(image,pixel) == OpaqueAlpha)) { alpha_pixel->red=(double) GetPixelRed(image,pixel); alpha_pixel->green=(double) GetPixelGreen(image,pixel); alpha_pixel->blue=(double) GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); return; } alpha=(double) (QuantumScale*GetPixelAlpha(image,pixel)); alpha_pixel->red=alpha*GetPixelRed(image,pixel); alpha_pixel->green=alpha*GetPixelGreen(image,pixel); alpha_pixel->blue=alpha*GetPixelBlue(image,pixel); alpha_pixel->alpha=(double) GetPixelAlpha(image,pixel); } static inline void AssociateAlphaPixelInfo(const CubeInfo *cube_info, const PixelInfo *pixel,DoublePixelPacket *alpha_pixel) { double alpha; if ((cube_info->associate_alpha == MagickFalse) || (pixel->alpha == OpaqueAlpha)) { alpha_pixel->red=(double) pixel->red; alpha_pixel->green=(double) pixel->green; alpha_pixel->blue=(double) pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; return; } alpha=(double) (QuantumScale*pixel->alpha); alpha_pixel->red=alpha*pixel->red; alpha_pixel->green=alpha*pixel->green; alpha_pixel->blue=alpha*pixel->blue; alpha_pixel->alpha=(double) pixel->alpha; } static inline size_t ColorToNodeId(const CubeInfo *cube_info, const DoublePixelPacket *pixel,size_t index) { size_t id; id=(size_t) (((ScaleQuantumToChar(ClampPixel(pixel->red)) >> index) & 0x01) | ((ScaleQuantumToChar(ClampPixel(pixel->green)) >> index) & 0x01) << 1 | ((ScaleQuantumToChar(ClampPixel(pixel->blue)) >> index) & 0x01) << 2); if (cube_info->associate_alpha != MagickFalse) id|=((ScaleQuantumToChar(ClampPixel(pixel->alpha)) >> index) & 0x1) << 3; return(id); } static MagickBooleanType AssignImageColors(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define AssignImageTag "Assign/Image" ssize_t y; /* Allocate image colormap. */ if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace(image,cube_info->quantize_info->colorspace, exception); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); if (AcquireImageColormap(image,cube_info->colors,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename);; image->colors=0; cube_info->transparent_pixels=0; cube_info->transparent_index=(-1); (void) DefineImageColormap(image,cube_info,cube_info->root); /* Create a reduced color image. */ if (cube_info->quantize_info->dither_method != NoDitherMethod) (void) DitherImage(image,cube_info,exception); else { CacheView *image_view; MagickBooleanType status; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { CubeInfo cube; register Quantum *magick_restrict q; register ssize_t x; ssize_t count; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); for (x=0; x < (ssize_t) image->columns; x+=count) { DoublePixelPacket pixel; register const NodeInfo *node_info; register ssize_t i; size_t id, index; /* Identify the deepest node containing the pixel's color. */ for (count=1; (x+count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,q+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,q,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,&cube,q,&pixel); node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); index=cube.color_number; for (i=0; i < (ssize_t) count; i++) { if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum( image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum( image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum( image->colormap[index].blue),q); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum( image->colormap[index].alpha),q); } q+=GetPixelChannels(image); } } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AssignImageColors) #endif proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); } if (cube_info->quantize_info->measure_error != MagickFalse) (void) GetImageQuantizeError(image,exception); if ((cube_info->quantize_info->number_colors == 2) && (cube_info->quantize_info->colorspace == GRAYColorspace)) { double intensity; /* Monochrome image. */ intensity=0.0; if ((image->colors > 1) && (GetPixelInfoLuma(image->colormap+0) > GetPixelInfoLuma(image->colormap+1))) intensity=(double) QuantumRange; image->colormap[0].red=intensity; image->colormap[0].green=intensity; image->colormap[0].blue=intensity; if (image->colors > 1) { image->colormap[1].red=(double) QuantumRange-intensity; image->colormap[1].green=(double) QuantumRange-intensity; image->colormap[1].blue=(double) QuantumRange-intensity; } } (void) SyncImage(image,exception); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClassifyImageColors() begins by initializing a color description tree % of sufficient depth to represent each possible input color in a leaf. % However, it is impractical to generate a fully-formed color % description tree in the storage_class phase for realistic values of % Cmax. If colors components in the input image are quantized to k-bit % precision, so that Cmax= 2k-1, the tree would need k levels below the % root node to allow representing each possible input color in a leaf. % This becomes prohibitive because the tree's total number of nodes is % 1 + sum(i=1,k,8k). % % A complete tree would require 19,173,961 nodes for k = 8, Cmax = 255. % Therefore, to avoid building a fully populated tree, QUANTIZE: (1) % Initializes data structures for nodes only as they are needed; (2) % Chooses a maximum depth for the tree as a function of the desired % number of colors in the output image (currently log2(colormap size)). % % For each pixel in the input image, storage_class scans downward from % the root of the color description tree. At each level of the tree it % identifies the single node which represents a cube in RGB space % containing It updates the following data for each such node: % % n1 : Number of pixels whose color is contained in the RGB cube % which this node represents; % % n2 : Number of pixels whose color is not represented in a node at % lower depth in the tree; initially, n2 = 0 for all nodes except % leaves of the tree. % % Sr, Sg, Sb : Sums of the red, green, and blue component values for % all pixels not classified at a lower depth. The combination of % these sums and n2 will ultimately characterize the mean color of a % set of pixels represented by this node. % % E: the distance squared in RGB space between each pixel contained % within a node and the nodes' center. This represents the quantization % error for a node. % % The format of the ClassifyImageColors() method is: % % MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, % const Image *image,ExceptionInfo *exception) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o image: the image. % */ static inline void SetAssociatedAlpha(const Image *image,CubeInfo *cube_info) { MagickBooleanType associate_alpha; associate_alpha=image->alpha_trait == BlendPixelTrait ? MagickTrue : MagickFalse; if ((cube_info->quantize_info->number_colors == 2) && (cube_info->quantize_info->colorspace == GRAYColorspace)) associate_alpha=MagickFalse; cube_info->associate_alpha=associate_alpha; } static MagickBooleanType ClassifyImageColors(CubeInfo *cube_info, const Image *image,ExceptionInfo *exception) { #define ClassifyImageTag "Classify/Image" CacheView *image_view; DoublePixelPacket error, mid, midpoint, pixel; MagickBooleanType proceed; double bisect; NodeInfo *node_info; size_t count, id, index, level; ssize_t y; /* Classify the first cube_info->maximum_colors colors to a tree depth of 8. */ SetAssociatedAlpha(image,cube_info); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image, cube_info->quantize_info->colorspace,exception); else if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); midpoint.red=(double) QuantumRange/2.0; midpoint.green=(double) QuantumRange/2.0; midpoint.blue=(double) QuantumRange/2.0; midpoint.alpha=(double) QuantumRange/2.0; error.alpha=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= MaxTreeDepth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); continue; } if (level == MaxTreeDepth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance)) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel(OpaqueAlpha); p+=count*GetPixelChannels(image); } if (cube_info->colors > cube_info->maximum_colors) { PruneToCubeDepth(cube_info,cube_info->root); break; } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } for (y++; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; if (cube_info->nodes > MaxNodes) { /* Prune one level if the color tree is too large. */ PruneLevel(cube_info,cube_info->root); cube_info->depth--; } for (x=0; x < (ssize_t) image->columns; x+=(ssize_t) count) { /* Start at the root and descend the color cube tree. */ for (count=1; (x+(ssize_t) count) < (ssize_t) image->columns; count++) { PixelInfo packet; GetPixelInfoPixel(image,p+count*GetPixelChannels(image),&packet); if (IsPixelEquivalent(image,p,&packet) == MagickFalse) break; } AssociateAlphaPixel(image,cube_info,p,&pixel); index=MaxTreeDepth-1; bisect=((double) QuantumRange+1.0)/2.0; mid=midpoint; node_info=cube_info->root; for (level=1; level <= cube_info->depth; level++) { double distance; bisect*=0.5; id=ColorToNodeId(cube_info,&pixel,index); mid.red+=(id & 1) != 0 ? bisect : -bisect; mid.green+=(id & 2) != 0 ? bisect : -bisect; mid.blue+=(id & 4) != 0 ? bisect : -bisect; mid.alpha+=(id & 8) != 0 ? bisect : -bisect; if (node_info->child[id] == (NodeInfo *) NULL) { /* Set colors of new node to contain pixel. */ node_info->child[id]=GetNodeInfo(cube_info,id,level,node_info); if (node_info->child[id] == (NodeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","%s", image->filename); continue; } if (level == cube_info->depth) cube_info->colors++; } /* Approximate the quantization error represented by this node. */ node_info=node_info->child[id]; error.red=QuantumScale*(pixel.red-mid.red); error.green=QuantumScale*(pixel.green-mid.green); error.blue=QuantumScale*(pixel.blue-mid.blue); if (cube_info->associate_alpha != MagickFalse) error.alpha=QuantumScale*(pixel.alpha-mid.alpha); distance=(double) (error.red*error.red+error.green*error.green+ error.blue*error.blue+error.alpha*error.alpha); if (IsNaN(distance) != MagickFalse) distance=0.0; node_info->quantize_error+=count*sqrt(distance); cube_info->root->quantize_error+=node_info->quantize_error; index--; } /* Sum RGB for this leaf for later derivation of the mean cube color. */ node_info->number_unique+=count; node_info->total_color.red+=count*QuantumScale*ClampPixel(pixel.red); node_info->total_color.green+=count*QuantumScale*ClampPixel(pixel.green); node_info->total_color.blue+=count*QuantumScale*ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) node_info->total_color.alpha+=count*QuantumScale* ClampPixel(pixel.alpha); else node_info->total_color.alpha+=count*QuantumScale* ClampPixel(OpaqueAlpha); p+=count*GetPixelChannels(image); } proceed=SetImageProgress(image,ClassifyImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } image_view=DestroyCacheView(image_view); if ((cube_info->quantize_info->colorspace != UndefinedColorspace) && (cube_info->quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace((Image *) image,sRGBColorspace,exception); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneQuantizeInfo() makes a duplicate of the given quantize info structure, % or if quantize info is NULL, a new one. % % The format of the CloneQuantizeInfo method is: % % QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o clone_info: Method CloneQuantizeInfo returns a duplicate of the given % quantize info, or if image info is NULL a new one. % % o quantize_info: a structure of type info. % */ MagickExport QuantizeInfo *CloneQuantizeInfo(const QuantizeInfo *quantize_info) { QuantizeInfo *clone_info; clone_info=(QuantizeInfo *) AcquireMagickMemory(sizeof(*clone_info)); if (clone_info == (QuantizeInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); GetQuantizeInfo(clone_info); if (quantize_info == (QuantizeInfo *) NULL) return(clone_info); clone_info->number_colors=quantize_info->number_colors; clone_info->tree_depth=quantize_info->tree_depth; clone_info->dither_method=quantize_info->dither_method; clone_info->colorspace=quantize_info->colorspace; clone_info->measure_error=quantize_info->measure_error; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o s e s t C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClosestColor() traverses the color cube tree at a particular node and % determines which colormap entry best represents the input color. % % The format of the ClosestColor method is: % % void ClosestColor(const Image *image,CubeInfo *cube_info, % const NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static void ClosestColor(const Image *image,CubeInfo *cube_info, const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) ClosestColor(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { double pixel; register double alpha, beta, distance; register DoublePixelPacket *magick_restrict q; register PixelInfo *magick_restrict p; /* Determine if this color is "closest". */ p=image->colormap+node_info->color_number; q=(&cube_info->target); alpha=1.0; beta=1.0; if (cube_info->associate_alpha != MagickFalse) { alpha=(double) (QuantumScale*p->alpha); beta=(double) (QuantumScale*q->alpha); } pixel=alpha*p->red-beta*q->red; distance=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->green-beta*q->green; distance+=pixel*pixel; if (distance <= cube_info->distance) { pixel=alpha*p->blue-beta*q->blue; distance+=pixel*pixel; if (distance <= cube_info->distance) { if (cube_info->associate_alpha != MagickFalse) { pixel=p->alpha-q->alpha; distance+=pixel*pixel; } if (distance <= cube_info->distance) { cube_info->distance=distance; cube_info->color_number=node_info->color_number; } } } } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o m p r e s s I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CompressImageColormap() compresses an image colormap by removing any % duplicate or unused color entries. % % The format of the CompressImageColormap method is: % % MagickBooleanType CompressImageColormap(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CompressImageColormap(Image *image, ExceptionInfo *exception) { QuantizeInfo quantize_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class != PseudoClass) return(MagickFalse); GetQuantizeInfo(&quantize_info); quantize_info.number_colors=image->colors; quantize_info.tree_depth=MaxTreeDepth; return(QuantizeImage(&quantize_info,image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineImageColormap() traverses the color cube tree and notes each colormap % entry. A colormap entry is any node in the color cube tree where the % of unique colors is not zero. DefineImageColormap() returns the number of % colors in the image colormap. % % The format of the DefineImageColormap method is: % % size_t DefineImageColormap(Image *image,CubeInfo *cube_info, % NodeInfo *node_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o node_info: the address of a structure of type NodeInfo which points to a % node in the color cube tree that is to be pruned. % */ static size_t DefineImageColormap(Image *image,CubeInfo *cube_info, NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) (void) DefineImageColormap(image,cube_info,node_info->child[i]); if (node_info->number_unique != 0) { register double alpha; register PixelInfo *magick_restrict q; /* Colormap entry is defined by the mean color in this cube. */ q=image->colormap+image->colors; alpha=(double) ((MagickOffsetType) node_info->number_unique); alpha=PerceptibleReciprocal(alpha); if (cube_info->associate_alpha == MagickFalse) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); q->alpha=(double) OpaqueAlpha; } else { double opacity; opacity=(double) (alpha*QuantumRange*node_info->total_color.alpha); q->alpha=(double) ClampToQuantum(opacity); if (q->alpha == OpaqueAlpha) { q->red=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*QuantumRange* node_info->total_color.blue); } else { double gamma; gamma=(double) (QuantumScale*q->alpha); gamma=PerceptibleReciprocal(gamma); q->red=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.red); q->green=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.green); q->blue=(double) ClampToQuantum(alpha*gamma*QuantumRange* node_info->total_color.blue); if (node_info->number_unique > cube_info->transparent_pixels) { cube_info->transparent_pixels=node_info->number_unique; cube_info->transparent_index=(ssize_t) image->colors; } } } node_info->color_number=image->colors++; } return(image->colors); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyCubeInfo() deallocates memory associated with an image. % % The format of the DestroyCubeInfo method is: % % DestroyCubeInfo(CubeInfo *cube_info) % % A description of each parameter follows: % % o cube_info: the address of a structure of type CubeInfo. % */ static void DestroyCubeInfo(CubeInfo *cube_info) { register Nodes *nodes; /* Release color cube tree storage. */ do { nodes=cube_info->node_queue->next; cube_info->node_queue->nodes=(NodeInfo *) RelinquishMagickMemory( cube_info->node_queue->nodes); cube_info->node_queue=(Nodes *) RelinquishMagickMemory( cube_info->node_queue); cube_info->node_queue=nodes; } while (cube_info->node_queue != (Nodes *) NULL); if (cube_info->memory_info != (MemoryInfo *) NULL) cube_info->memory_info=RelinquishVirtualMemory(cube_info->memory_info); cube_info->quantize_info=DestroyQuantizeInfo(cube_info->quantize_info); cube_info=(CubeInfo *) RelinquishMagickMemory(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyQuantizeInfo() deallocates memory associated with an QuantizeInfo % structure. % % The format of the DestroyQuantizeInfo method is: % % QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % */ MagickExport QuantizeInfo *DestroyQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); quantize_info->signature=(~MagickCoreSignature); quantize_info=(QuantizeInfo *) RelinquishMagickMemory(quantize_info); return(quantize_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DitherImage() distributes the difference between an original image and % the corresponding color reduced algorithm to neighboring pixels using % serpentine-scan Floyd-Steinberg error diffusion. DitherImage returns % MagickTrue if the image is dithered otherwise MagickFalse. % % The format of the DitherImage method is: % % MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % % o exception: return any errors or warnings in this structure. % */ static DoublePixelPacket **DestroyPixelThreadSet(DoublePixelPacket **pixels) { register ssize_t i; assert(pixels != (DoublePixelPacket **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (DoublePixelPacket *) NULL) pixels[i]=(DoublePixelPacket *) RelinquishMagickMemory(pixels[i]); pixels=(DoublePixelPacket **) RelinquishMagickMemory(pixels); return(pixels); } static DoublePixelPacket **AcquirePixelThreadSet(const size_t count) { DoublePixelPacket **pixels; register ssize_t i; size_t number_threads; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(DoublePixelPacket **) AcquireQuantumMemory(number_threads, sizeof(*pixels)); if (pixels == (DoublePixelPacket **) NULL) return((DoublePixelPacket **) NULL); (void) ResetMagickMemory(pixels,0,number_threads*sizeof(*pixels)); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=(DoublePixelPacket *) AcquireQuantumMemory(count,2* sizeof(**pixels)); if (pixels[i] == (DoublePixelPacket *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static inline ssize_t CacheOffset(CubeInfo *cube_info, const DoublePixelPacket *pixel) { #define RedShift(pixel) (((pixel) >> CacheShift) << (0*(8-CacheShift))) #define GreenShift(pixel) (((pixel) >> CacheShift) << (1*(8-CacheShift))) #define BlueShift(pixel) (((pixel) >> CacheShift) << (2*(8-CacheShift))) #define AlphaShift(pixel) (((pixel) >> CacheShift) << (3*(8-CacheShift))) ssize_t offset; offset=(ssize_t) (RedShift(ScaleQuantumToChar(ClampPixel(pixel->red))) | GreenShift(ScaleQuantumToChar(ClampPixel(pixel->green))) | BlueShift(ScaleQuantumToChar(ClampPixel(pixel->blue)))); if (cube_info->associate_alpha != MagickFalse) offset|=AlphaShift(ScaleQuantumToChar(ClampPixel(pixel->alpha))); return(offset); } static MagickBooleanType FloydSteinbergDither(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; DoublePixelPacket **pixels; MagickBooleanType status; ssize_t y; /* Distribute quantization error using Floyd-Steinberg. */ pixels=AcquirePixelThreadSet(image->columns); if (pixels == (DoublePixelPacket **) NULL) return(MagickFalse); status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); CubeInfo cube; DoublePixelPacket *current, *previous; register Quantum *magick_restrict q; register ssize_t x; size_t index; ssize_t v; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } cube=(*cube_info); current=pixels[id]+(y & 0x01)*image->columns; previous=pixels[id]+((y+1) & 0x01)*image->columns; v=(ssize_t) ((y & 0x01) != 0 ? -1 : 1); for (x=0; x < (ssize_t) image->columns; x++) { DoublePixelPacket color, pixel; register ssize_t i; ssize_t u; u=(y & 0x01) != 0 ? (ssize_t) image->columns-1-x : x; AssociateAlphaPixel(image,&cube,q+u*GetPixelChannels(image),&pixel); if (x > 0) { pixel.red+=7*current[u-v].red/16; pixel.green+=7*current[u-v].green/16; pixel.blue+=7*current[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=7*current[u-v].alpha/16; } if (y > 0) { if (x < (ssize_t) (image->columns-1)) { pixel.red+=previous[u+v].red/16; pixel.green+=previous[u+v].green/16; pixel.blue+=previous[u+v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=previous[u+v].alpha/16; } pixel.red+=5*previous[u].red/16; pixel.green+=5*previous[u].green/16; pixel.blue+=5*previous[u].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=5*previous[u].alpha/16; if (x > 0) { pixel.red+=3*previous[u-v].red/16; pixel.green+=3*previous[u-v].green/16; pixel.blue+=3*previous[u-v].blue/16; if (cube.associate_alpha != MagickFalse) pixel.alpha+=3*previous[u-v].alpha/16; } } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube.associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(&cube,&pixel); if (cube.cache[i] < 0) { register NodeInfo *node_info; register size_t node_id; /* Identify the deepest node containing the pixel's color. */ node_info=cube.root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { node_id=ColorToNodeId(&cube,&pixel,index); if (node_info->child[node_id] == (NodeInfo *) NULL) break; node_info=node_info->child[node_id]; } /* Find closest color among siblings and their children. */ cube.target=pixel; cube.distance=(double) (4.0*(QuantumRange+1.0)*(QuantumRange+1.0)+ 1.0); ClosestColor(image,&cube,node_info->parent); cube.cache[i]=(ssize_t) cube.color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) cube.cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q+u*GetPixelChannels(image)); if (cube.quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red), q+u*GetPixelChannels(image)); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green), q+u*GetPixelChannels(image)); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue), q+u*GetPixelChannels(image)); if (cube.associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha), q+u*GetPixelChannels(image)); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; /* Store the error. */ AssociateAlphaPixelInfo(&cube,image->colormap+index,&color); current[u].red=pixel.red-color.red; current[u].green=pixel.green-color.green; current[u].blue=pixel.blue-color.blue; if (cube.associate_alpha != MagickFalse) current[u].alpha=pixel.alpha-color.alpha; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; proceed=SetImageProgress(image,DitherImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } } image_view=DestroyCacheView(image_view); pixels=DestroyPixelThreadSet(pixels); return(MagickTrue); } static MagickBooleanType RiemersmaDither(Image *,CacheView *,CubeInfo *,const unsigned int, ExceptionInfo *); static void Riemersma(Image *image,CacheView *image_view,CubeInfo *cube_info, const size_t level,const unsigned int direction,ExceptionInfo *exception) { if (level == 1) switch (direction) { case WestGravity: { (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); break; } case EastGravity: { (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); break; } case NorthGravity: { (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); break; } case SouthGravity: { (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); break; } default: break; } else switch (direction) { case WestGravity: { Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); break; } case EastGravity: { Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); break; } case NorthGravity: { Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,EastGravity, exception); Riemersma(image,image_view,cube_info,level-1,NorthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); break; } case SouthGravity: { Riemersma(image,image_view,cube_info,level-1,EastGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,NorthGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,WestGravity, exception); Riemersma(image,image_view,cube_info,level-1,SouthGravity, exception); (void) RiemersmaDither(image,image_view,cube_info,SouthGravity, exception); Riemersma(image,image_view,cube_info,level-1,WestGravity, exception); break; } default: break; } } static MagickBooleanType RiemersmaDither(Image *image,CacheView *image_view, CubeInfo *cube_info,const unsigned int direction,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" DoublePixelPacket color, pixel; MagickBooleanType proceed; register CubeInfo *p; size_t index; p=cube_info; if ((p->x >= 0) && (p->x < (ssize_t) image->columns) && (p->y >= 0) && (p->y < (ssize_t) image->rows)) { register Quantum *magick_restrict q; register ssize_t i; /* Distribute error. */ q=GetCacheViewAuthenticPixels(image_view,p->x,p->y,1,1,exception); if (q == (Quantum *) NULL) return(MagickFalse); AssociateAlphaPixel(image,cube_info,q,&pixel); for (i=0; i < ErrorQueueLength; i++) { pixel.red+=p->weights[i]*p->error[i].red; pixel.green+=p->weights[i]*p->error[i].green; pixel.blue+=p->weights[i]*p->error[i].blue; if (cube_info->associate_alpha != MagickFalse) pixel.alpha+=p->weights[i]*p->error[i].alpha; } pixel.red=(double) ClampPixel(pixel.red); pixel.green=(double) ClampPixel(pixel.green); pixel.blue=(double) ClampPixel(pixel.blue); if (cube_info->associate_alpha != MagickFalse) pixel.alpha=(double) ClampPixel(pixel.alpha); i=CacheOffset(cube_info,&pixel); if (p->cache[i] < 0) { register NodeInfo *node_info; register size_t id; /* Identify the deepest node containing the pixel's color. */ node_info=p->root; for (index=MaxTreeDepth-1; (ssize_t) index > 0; index--) { id=ColorToNodeId(cube_info,&pixel,index); if (node_info->child[id] == (NodeInfo *) NULL) break; node_info=node_info->child[id]; } /* Find closest color among siblings and their children. */ p->target=pixel; p->distance=(double) (4.0*(QuantumRange+1.0)*((double) QuantumRange+1.0)+1.0); ClosestColor(image,p,node_info->parent); p->cache[i]=(ssize_t) p->color_number; } /* Assign pixel to closest colormap entry. */ index=(size_t) p->cache[i]; if (image->storage_class == PseudoClass) SetPixelIndex(image,(Quantum) index,q); if (cube_info->quantize_info->measure_error == MagickFalse) { SetPixelRed(image,ClampToQuantum(image->colormap[index].red),q); SetPixelGreen(image,ClampToQuantum(image->colormap[index].green),q); SetPixelBlue(image,ClampToQuantum(image->colormap[index].blue),q); if (cube_info->associate_alpha != MagickFalse) SetPixelAlpha(image,ClampToQuantum(image->colormap[index].alpha),q); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) return(MagickFalse); /* Propagate the error as the last entry of the error queue. */ (void) CopyMagickMemory(p->error,p->error+1,(ErrorQueueLength-1)* sizeof(p->error[0])); AssociateAlphaPixelInfo(cube_info,image->colormap+index,&color); p->error[ErrorQueueLength-1].red=pixel.red-color.red; p->error[ErrorQueueLength-1].green=pixel.green-color.green; p->error[ErrorQueueLength-1].blue=pixel.blue-color.blue; if (cube_info->associate_alpha != MagickFalse) p->error[ErrorQueueLength-1].alpha=pixel.alpha-color.alpha; proceed=SetImageProgress(image,DitherImageTag,p->offset,p->span); if (proceed == MagickFalse) return(MagickFalse); p->offset++; } switch (direction) { case WestGravity: p->x--; break; case EastGravity: p->x++; break; case NorthGravity: p->y--; break; case SouthGravity: p->y++; break; } return(MagickTrue); } static MagickBooleanType DitherImage(Image *image,CubeInfo *cube_info, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; size_t depth; if (cube_info->quantize_info->dither_method != RiemersmaDitherMethod) return(FloydSteinbergDither(image,cube_info,exception)); /* Distribute quantization error along a Hilbert curve. */ (void) ResetMagickMemory(cube_info->error,0,ErrorQueueLength* sizeof(*cube_info->error)); cube_info->x=0; cube_info->y=0; i=MagickMax((ssize_t) image->columns,(ssize_t) image->rows); for (depth=1; i != 0; depth++) i>>=1; if ((ssize_t) (1L << depth) < MagickMax((ssize_t) image->columns,(ssize_t) image->rows)) depth++; cube_info->offset=0; cube_info->span=(MagickSizeType) image->columns*image->rows; image_view=AcquireAuthenticCacheView(image,exception); if (depth > 1) Riemersma(image,image_view,cube_info,depth-1,NorthGravity,exception); status=RiemersmaDither(image,image_view,cube_info,ForgetGravity,exception); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t C u b e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCubeInfo() initialize the Cube data structure. % % The format of the GetCubeInfo method is: % % CubeInfo GetCubeInfo(const QuantizeInfo *quantize_info, % const size_t depth,const size_t maximum_colors) % % A description of each parameter follows. % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o depth: Normally, this integer value is zero or one. A zero or % one tells Quantize to choose a optimal tree depth of Log4(number_colors). % A tree of this depth generally allows the best representation of the % reference image with the least amount of memory and the fastest % computational speed. In some cases, such as an image with low color % dispersion (a few number of colors), a value other than % Log4(number_colors) is required. To expand the color tree completely, % use a value of 8. % % o maximum_colors: maximum colors. % */ static CubeInfo *GetCubeInfo(const QuantizeInfo *quantize_info, const size_t depth,const size_t maximum_colors) { CubeInfo *cube_info; double sum, weight; register ssize_t i; size_t length; /* Initialize tree to describe color cube_info. */ cube_info=(CubeInfo *) AcquireMagickMemory(sizeof(*cube_info)); if (cube_info == (CubeInfo *) NULL) return((CubeInfo *) NULL); (void) ResetMagickMemory(cube_info,0,sizeof(*cube_info)); cube_info->depth=depth; if (cube_info->depth > MaxTreeDepth) cube_info->depth=MaxTreeDepth; if (cube_info->depth < 2) cube_info->depth=2; cube_info->maximum_colors=maximum_colors; /* Initialize root node. */ cube_info->root=GetNodeInfo(cube_info,0,0,(NodeInfo *) NULL); if (cube_info->root == (NodeInfo *) NULL) return((CubeInfo *) NULL); cube_info->root->parent=cube_info->root; cube_info->quantize_info=CloneQuantizeInfo(quantize_info); if (cube_info->quantize_info->dither_method == NoDitherMethod) return(cube_info); /* Initialize dither resources. */ length=(size_t) (1UL << (4*(8-CacheShift))); cube_info->memory_info=AcquireVirtualMemory(length,sizeof(*cube_info->cache)); if (cube_info->memory_info == (MemoryInfo *) NULL) return((CubeInfo *) NULL); cube_info->cache=(ssize_t *) GetVirtualMemoryBlob(cube_info->memory_info); /* Initialize color cache. */ (void) ResetMagickMemory(cube_info->cache,(-1),sizeof(*cube_info->cache)* length); /* Distribute weights along a curve of exponential decay. */ weight=1.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[ErrorQueueLength-i-1]=PerceptibleReciprocal(weight); weight*=exp(log(((double) QuantumRange+1.0))/(ErrorQueueLength-1.0)); } /* Normalize the weighting factors. */ weight=0.0; for (i=0; i < ErrorQueueLength; i++) weight+=cube_info->weights[i]; sum=0.0; for (i=0; i < ErrorQueueLength; i++) { cube_info->weights[i]/=weight; sum+=cube_info->weights[i]; } cube_info->weights[0]+=1.0-sum; return(cube_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t N o d e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNodeInfo() allocates memory for a new node in the color cube tree and % presets all fields to zero. % % The format of the GetNodeInfo method is: % % NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, % const size_t level,NodeInfo *parent) % % A description of each parameter follows. % % o node: The GetNodeInfo method returns a pointer to a queue of nodes. % % o id: Specifies the child number of the node. % % o level: Specifies the level in the storage_class the node resides. % */ static NodeInfo *GetNodeInfo(CubeInfo *cube_info,const size_t id, const size_t level,NodeInfo *parent) { NodeInfo *node_info; if (cube_info->free_nodes == 0) { Nodes *nodes; /* Allocate a new queue of nodes. */ nodes=(Nodes *) AcquireMagickMemory(sizeof(*nodes)); if (nodes == (Nodes *) NULL) return((NodeInfo *) NULL); nodes->nodes=(NodeInfo *) AcquireQuantumMemory(NodesInAList, sizeof(*nodes->nodes)); if (nodes->nodes == (NodeInfo *) NULL) return((NodeInfo *) NULL); nodes->next=cube_info->node_queue; cube_info->node_queue=nodes; cube_info->next_node=nodes->nodes; cube_info->free_nodes=NodesInAList; } cube_info->nodes++; cube_info->free_nodes--; node_info=cube_info->next_node++; (void) ResetMagickMemory(node_info,0,sizeof(*node_info)); node_info->parent=parent; node_info->id=id; node_info->level=level; return(node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t i z e E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantizeError() measures the difference between the original % and quantized images. This difference is the total quantization error. % The error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % The format of the GetImageQuantizeError method is: % % MagickBooleanType GetImageQuantizeError(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageQuantizeError(Image *image, ExceptionInfo *exception) { CacheView *image_view; double alpha, area, beta, distance, maximum_error, mean_error, mean_error_per_pixel; size_t index; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->total_colors=GetNumberColors(image,(FILE *) NULL,exception); (void) ResetMagickMemory(&image->error,0,sizeof(image->error)); if (image->storage_class == DirectClass) return(MagickTrue); alpha=1.0; beta=1.0; area=3.0*image->columns*image->rows; maximum_error=0.0; mean_error_per_pixel=0.0; mean_error=0.0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { index=GetPixelIndex(image,p); if (image->alpha_trait == BlendPixelTrait) { alpha=(double) (QuantumScale*GetPixelAlpha(image,p)); beta=(double) (QuantumScale*image->colormap[index].alpha); } distance=fabs((double) (alpha*GetPixelRed(image,p)-beta* image->colormap[index].red)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelGreen(image,p)-beta* image->colormap[index].green)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; distance=fabs((double) (alpha*GetPixelBlue(image,p)-beta* image->colormap[index].blue)); mean_error_per_pixel+=distance; mean_error+=distance*distance; if (distance > maximum_error) maximum_error=distance; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); image->error.mean_error_per_pixel=(double) mean_error_per_pixel/area; image->error.normalized_mean_error=(double) QuantumScale*QuantumScale* mean_error/area; image->error.normalized_maximum_error=(double) QuantumScale*maximum_error; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t Q u a n t i z e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetQuantizeInfo() initializes the QuantizeInfo structure. % % The format of the GetQuantizeInfo method is: % % GetQuantizeInfo(QuantizeInfo *quantize_info) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to a QuantizeInfo structure. % */ MagickExport void GetQuantizeInfo(QuantizeInfo *quantize_info) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(quantize_info != (QuantizeInfo *) NULL); (void) ResetMagickMemory(quantize_info,0,sizeof(*quantize_info)); quantize_info->number_colors=256; quantize_info->dither_method=RiemersmaDitherMethod; quantize_info->colorspace=UndefinedColorspace; quantize_info->measure_error=MagickFalse; quantize_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t e r i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PosterizeImage() reduces the image to a limited number of colors for a % "poster" effect. % % The format of the PosterizeImage method is: % % MagickBooleanType PosterizeImage(Image *image,const size_t levels, % const DitherMethod dither_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o levels: Number of color levels allowed in each channel. Very low values % (2, 3, or 4) have the most visible effect. % % o dither_method: choose from UndefinedDitherMethod, NoDitherMethod, % RiemersmaDitherMethod, FloydSteinbergDitherMethod. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport MagickBooleanType PosterizeImage(Image *image,const size_t levels, const DitherMethod dither_method,ExceptionInfo *exception) { #define PosterizeImageTag "Posterize/Image" #define PosterizePixel(pixel) (Quantum) (QuantumRange*(MagickRound( \ QuantumScale*pixel*(levels-1)))/MagickMax((ssize_t) levels-1,1)) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; QuantizeInfo *quantize_info; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->storage_class == PseudoClass) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,1,1) #endif for (i=0; i < (ssize_t) image->colors; i++) { /* Posterize colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) PosterizePixel(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) PosterizePixel(image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) PosterizePixel(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) PosterizePixel(image->colormap[i].alpha); } /* Posterize image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,PosterizePixel(GetPixelRed(image,q)),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,PosterizePixel(GetPixelGreen(image,q)),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,PosterizePixel(GetPixelBlue(image,q)),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,PosterizePixel(GetPixelBlack(image,q)),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait == BlendPixelTrait)) SetPixelAlpha(image,PosterizePixel(GetPixelAlpha(image,q)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PosterizeImage) #endif proceed=SetImageProgress(image,PosterizeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); quantize_info=AcquireQuantizeInfo((ImageInfo *) NULL); quantize_info->number_colors=(size_t) MagickMin((ssize_t) levels*levels* levels,MaxColormapSize+1); quantize_info->dither_method=dither_method; quantize_info->tree_depth=MaxTreeDepth; status=QuantizeImage(quantize_info,image,exception); quantize_info=DestroyQuantizeInfo(quantize_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e C h i l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneChild() deletes the given node and merges its statistics into its % parent. % % The format of the PruneSubtree method is: % % PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneChild(CubeInfo *cube_info,const NodeInfo *node_info) { NodeInfo *parent; register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneChild(cube_info,node_info->child[i]); /* Merge color statistics into parent. */ parent=node_info->parent; parent->number_unique+=node_info->number_unique; parent->total_color.red+=node_info->total_color.red; parent->total_color.green+=node_info->total_color.green; parent->total_color.blue+=node_info->total_color.blue; parent->total_color.alpha+=node_info->total_color.alpha; parent->child[node_info->id]=(NodeInfo *) NULL; cube_info->nodes--; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e L e v e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneLevel() deletes all nodes at the bottom level of the color tree merging % their color statistics into their parent node. % % The format of the PruneLevel method is: % % PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneLevel(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneLevel(cube_info,node_info->child[i]); if (node_info->level == cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P r u n e T o C u b e D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PruneToCubeDepth() deletes any nodes at a depth greater than % cube_info->depth while merging their color statistics into their parent % node. % % The format of the PruneToCubeDepth method is: % % PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void PruneToCubeDepth(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) PruneToCubeDepth(cube_info,node_info->child[i]); if (node_info->level > cube_info->depth) PruneChild(cube_info,node_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImage() analyzes the colors within a reference image and chooses a % fixed number of colors to represent the image. The goal of the algorithm % is to minimize the color difference between the input and output image while % minimizing the processing time. % % The format of the QuantizeImage method is: % % MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImage(const QuantizeInfo *quantize_info, Image *image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; size_t depth, maximum_colors; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; if (image->alpha_trait != BlendPixelTrait) { if (SetImageGray(image,exception) != MagickFalse) (void) SetGrayscaleImage(image,exception); } if ((image->storage_class == PseudoClass) && (image->colors <= maximum_colors)) { if ((quantize_info->colorspace != UndefinedColorspace) && (quantize_info->colorspace != CMYKColorspace)) (void) TransformImageColorspace(image,quantize_info->colorspace, exception); return(MagickTrue); } depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if ((quantize_info->dither_method != NoDitherMethod) && (depth > 2)) depth--; if ((image->alpha_trait == BlendPixelTrait) && (depth > 5)) depth--; if (SetImageGray(image,exception) != MagickFalse) depth=MaxTreeDepth; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,image,exception); if (status != MagickFalse) { /* Reduce the number of colors in the image if it contains more than the maximum, otherwise we can disable dithering to improve the performance. */ if (cube_info->colors > cube_info->maximum_colors) ReduceImageColors(image,cube_info); else cube_info->quantize_info->dither_method=NoDitherMethod; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeImages() analyzes the colors within a set of reference images and % chooses a fixed number of colors to represent the set. The goal of the % algorithm is to minimize the color difference between the input and output % images while minimizing the processing time. % % The format of the QuantizeImages method is: % % MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, % Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: Specifies a pointer to a list of Image structures. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType QuantizeImages(const QuantizeInfo *quantize_info, Image *images,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType proceed, status; MagickProgressMonitor progress_monitor; register ssize_t i; size_t depth, maximum_colors, number_images; assert(quantize_info != (const QuantizeInfo *) NULL); assert(quantize_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (GetNextImageInList(images) == (Image *) NULL) { /* Handle a single image with QuantizeImage. */ status=QuantizeImage(quantize_info,images,exception); return(status); } status=MagickFalse; maximum_colors=quantize_info->number_colors; if (maximum_colors == 0) maximum_colors=MaxColormapSize; if (maximum_colors > MaxColormapSize) maximum_colors=MaxColormapSize; depth=quantize_info->tree_depth; if (depth == 0) { size_t colors; /* Depth of color tree is: Log4(colormap size)+2. */ colors=maximum_colors; for (depth=1; colors != 0; depth++) colors>>=2; if (quantize_info->dither_method != NoDitherMethod) depth--; } /* Initialize color cube. */ cube_info=GetCubeInfo(quantize_info,depth,maximum_colors); if (cube_info == (CubeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",images->filename); return(MagickFalse); } number_images=GetImageListLength(images); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL, image->client_data); status=ClassifyImageColors(cube_info,image,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor,image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } if (status != MagickFalse) { /* Reduce the number of colors in an image sequence. */ ReduceImageColors(images,cube_info); image=images; for (i=0; image != (Image *) NULL; i++) { progress_monitor=SetImageProgressMonitor(image,(MagickProgressMonitor) NULL,image->client_data); status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; (void) SetImageProgressMonitor(image,progress_monitor, image->client_data); proceed=SetImageProgress(image,AssignImageTag,(MagickOffsetType) i, number_images); if (proceed == MagickFalse) break; image=GetNextImageInList(image); } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u a n t i z e E r r o r F l a t t e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizeErrorFlatten() traverses the color cube and flattens the quantization % error into a sorted 1D array. This accelerates the color reduction process. % % Contributed by Yoya. % % The format of the QuantizeErrorFlatten method is: % % size_t QuantizeErrorFlatten(const CubeInfo *cube_info, % const NodeInfo *node_info,const ssize_t offset, % double *quantize_error) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is current pointer. % % o offset: quantize error offset. % % o quantize_error: the quantization error vector. % */ static size_t QuantizeErrorFlatten(const CubeInfo *cube_info, const NodeInfo *node_info,const ssize_t offset,double *quantize_error) { register ssize_t i; size_t n, number_children; if (offset >= (ssize_t) cube_info->nodes) return(0); quantize_error[offset]=node_info->quantize_error; n=1; number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children ; i++) if (node_info->child[i] != (NodeInfo *) NULL) n+=QuantizeErrorFlatten(cube_info,node_info->child[i],offset+n, quantize_error); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Reduce() traverses the color cube tree and prunes any node whose % quantization error falls below a particular threshold. % % The format of the Reduce method is: % % Reduce(CubeInfo *cube_info,const NodeInfo *node_info) % % A description of each parameter follows. % % o cube_info: A pointer to the Cube structure. % % o node_info: pointer to node in color cube tree that is to be pruned. % */ static void Reduce(CubeInfo *cube_info,const NodeInfo *node_info) { register ssize_t i; size_t number_children; /* Traverse any children. */ number_children=cube_info->associate_alpha == MagickFalse ? 8UL : 16UL; for (i=0; i < (ssize_t) number_children; i++) if (node_info->child[i] != (NodeInfo *) NULL) Reduce(cube_info,node_info->child[i]); if (node_info->quantize_error <= cube_info->pruning_threshold) PruneChild(cube_info,node_info); else { /* Find minimum pruning threshold. */ if (node_info->number_unique > 0) cube_info->colors++; if (node_info->quantize_error < cube_info->next_threshold) cube_info->next_threshold=node_info->quantize_error; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e d u c e I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceImageColors() repeatedly prunes the tree until the number of nodes % with n2 > 0 is less than or equal to the maximum number of colors allowed % in the output image. On any given iteration over the tree, it selects % those nodes whose E value is minimal for pruning and merges their % color statistics upward. It uses a pruning threshold, Ep, to govern % node selection as follows: % % Ep = 0 % while number of nodes with (n2 > 0) > required maximum number of colors % prune all nodes such that E <= Ep % Set Ep to minimum E in remaining nodes % % This has the effect of minimizing any quantization error when merging % two nodes together. % % When a node to be pruned has offspring, the pruning procedure invokes % itself recursively in order to prune the tree from the leaves upward. % n2, Sr, Sg, and Sb in a node being pruned are always added to the % corresponding data in that node's parent. This retains the pruned % node's color characteristics for later averaging. % % For each node, n2 pixels exist for which that node represents the % smallest volume in RGB space containing those pixel's colors. When n2 % > 0 the node will uniquely define a color in the output image. At the % beginning of reduction, n2 = 0 for all nodes except a the leaves of % the tree which represent colors present in the input image. % % The other pixel count, n1, indicates the total number of colors % within the cubic volume which the node represents. This includes n1 - % n2 pixels whose colors should be defined by nodes at a lower level in % the tree. % % The format of the ReduceImageColors method is: % % ReduceImageColors(const Image *image,CubeInfo *cube_info) % % A description of each parameter follows. % % o image: the image. % % o cube_info: A pointer to the Cube structure. % */ static int QuantizeErrorCompare(const void *error_p,const void *error_q) { double *p, *q; p=(double *) error_p; q=(double *) error_q; if (*p > *q) return(1); if (fabs(*q-*p) <= MagickEpsilon) return(0); return(-1); } static void ReduceImageColors(const Image *image,CubeInfo *cube_info) { #define ReduceImageTag "Reduce/Image" MagickBooleanType proceed; MagickOffsetType offset; size_t span; cube_info->next_threshold=0.0; if (cube_info->colors > cube_info->maximum_colors) { double *quantize_error; /* Enable rapid reduction of the number of unique colors. */ quantize_error=(double *) AcquireQuantumMemory(cube_info->nodes, sizeof(*quantize_error)); if (quantize_error != (double *) NULL) { (void) QuantizeErrorFlatten(cube_info,cube_info->root,0, quantize_error); qsort(quantize_error,cube_info->nodes,sizeof(double), QuantizeErrorCompare); if (cube_info->nodes > (110*(cube_info->maximum_colors+1)/100)) cube_info->next_threshold=quantize_error[cube_info->nodes-110* (cube_info->maximum_colors+1)/100]; quantize_error=(double *) RelinquishMagickMemory(quantize_error); } } for (span=cube_info->colors; cube_info->colors > cube_info->maximum_colors; ) { cube_info->pruning_threshold=cube_info->next_threshold; cube_info->next_threshold=cube_info->root->quantize_error-1; cube_info->colors=0; Reduce(cube_info,cube_info->root); offset=(MagickOffsetType) span-cube_info->colors; proceed=SetImageProgress(image,ReduceImageTag,offset,span- cube_info->maximum_colors+1); if (proceed == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImage() replaces the colors of an image with the closest of the colors % from the reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, % Image *image,const Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImage(const QuantizeInfo *quantize_info, Image *image,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; MagickBooleanType status; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(remap_image != (Image *) NULL); assert(remap_image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; status=AssignImageColors(image,cube_info,exception); } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemapImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % The format of the RemapImage method is: % % MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, % Image *images,Image *remap_image,ExceptionInfo *exception) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o remap_image: the reference image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RemapImages(const QuantizeInfo *quantize_info, Image *images,const Image *remap_image,ExceptionInfo *exception) { CubeInfo *cube_info; Image *image; MagickBooleanType status; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; if (remap_image == (Image *) NULL) { /* Create a global colormap for an image sequence. */ status=QuantizeImages(quantize_info,images,exception); return(status); } /* Classify image colors from the reference image. */ cube_info=GetCubeInfo(quantize_info,MaxTreeDepth, quantize_info->number_colors); if (cube_info == (CubeInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ClassifyImageColors(cube_info,remap_image,exception); if (status != MagickFalse) { /* Classify image colors from the reference image. */ cube_info->quantize_info->number_colors=cube_info->colors; image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) { status=AssignImageColors(image,cube_info,exception); if (status == MagickFalse) break; } } DestroyCubeInfo(cube_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetGrayscaleImage() converts an image to a PseudoClass grayscale image. % % The format of the SetGrayscaleImage method is: % % MagickBooleanType SetGrayscaleImage(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { double intensity; PixelInfo *color_1, *color_2; color_1=(PixelInfo *) x; color_2=(PixelInfo *) y; intensity=GetPixelInfoIntensity((const Image *) NULL,color_1)- GetPixelInfoIntensity((const Image *) NULL,color_2); return((int) intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif static MagickBooleanType SetGrayscaleImage(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo *colormap; register ssize_t i; ssize_t *colormap_index, j, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->type != GrayscaleType) (void) TransformImageColorspace(image,GRAYColorspace,exception); colormap_index=(ssize_t *) AcquireQuantumMemory(MaxColormapSize, sizeof(*colormap_index)); if (colormap_index == (ssize_t *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); if (image->storage_class != PseudoClass) { (void) ResetMagickMemory(colormap_index,(-1),MaxColormapSize* sizeof(*colormap_index)); if (AcquireImageColormap(image,MaxColormapSize,exception) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); image->colors=0; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register size_t intensity; intensity=ScaleQuantumToMap(GetPixelRed(image,q)); if (colormap_index[intensity] < 0) { #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SetGrayscaleImage) #endif if (colormap_index[intensity] < 0) { colormap_index[intensity]=(ssize_t) image->colors; image->colormap[image->colors].red=(double) GetPixelRed(image,q); image->colormap[image->colors].green=(double) GetPixelGreen(image,q); image->colormap[image->colors].blue=(double) GetPixelBlue(image,q); image->colors++; } } SetPixelIndex(image,(Quantum) colormap_index[intensity],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); } for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; qsort((void *) image->colormap,image->colors,sizeof(PixelInfo), IntensityCompare); colormap=(PixelInfo *) AcquireQuantumMemory(image->colors,sizeof(*colormap)); if (colormap == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); j=0; colormap[j]=image->colormap[0]; for (i=0; i < (ssize_t) image->colors; i++) { if (IsPixelInfoEquivalent(&colormap[j],&image->colormap[i]) == MagickFalse) { j++; colormap[j]=image->colormap[i]; } colormap_index[(ssize_t) image->colormap[i].alpha]=j; } image->colors=(size_t) (j+1); image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); image->colormap=colormap; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) colormap_index[ScaleQuantumToMap( GetPixelIndex(image,q))],q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); colormap_index=(ssize_t *) RelinquishMagickMemory(colormap_index); image->type=GrayscaleType; if (SetImageMonochrome(image,exception) != MagickFalse) image->type=BilevelType; return(status); }
GB_unaryop__ainv_uint32_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint32_uint32 // op(A') function: GB_tran__ainv_uint32_uint32 // C type: uint32_t // A type: uint32_t // cast: uint32_t cij = (uint32_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint32_t #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ uint32_t z = (uint32_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint32_uint32 ( uint32_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint32_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unop__identity_fp64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_fp64_uint64) // op(A') function: GB (_unop_tran__identity_fp64_uint64) // C type: double // A type: uint64_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_fp64_uint64) ( double *Cx, // Cx and Ax may be aliased const uint64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint64_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_fp64_uint64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
alloc_fail.c
// RUN: %libomptarget-compile-generic // RUN: %libomptarget-run-fail-generic 2>&1 \ // RUN: | %fcheck-generic // CHECK: Libomptarget message: explicit extension not allowed: host address specified is 0x{{.*}} (8 bytes), but device allocation maps to host at 0x{{.*}} (8 bytes) // CHECK: Libomptarget error: Call to getOrAllocTgtPtr returned null pointer (device failure or illegal mapping). // CHECK: Libomptarget fatal error 1: failure of target construct while offloading is mandatory int main() { int arr[4] = {0, 1, 2, 3}; #pragma omp target data map(alloc: arr[0:2]) #pragma omp target data map(alloc: arr[1:2]) ; return 0; }
tensor_cpu-inl.h
/*! * Copyright (c) 2014 by Contributors * \file tensor_cpu-inl.h * \brief implementation of CPU host code * \author Bing Xu, Tianqi Chen */ #ifndef MSHADOW_TENSOR_CPU_INL_H_ #define MSHADOW_TENSOR_CPU_INL_H_ #include <cstring> #include <functional> #include <utility> #include <vector> #include "./base.h" #include "./tensor.h" #include "./packet-inl.h" #include "./dot_engine-inl.h" namespace mshadow { template<> inline void InitTensorEngine<cpu>(int dev_id) { } template<> inline void ShutdownTensorEngine<cpu>(void) { } template<> inline void SetDevice<cpu>(int devid) { } template<> inline Stream<cpu> *NewStream<cpu>(bool create_blas_handle, bool create_dnn_handle, int dev_id) { return new Stream<cpu>(); } template<> inline void DeleteStream<cpu>(Stream<cpu> *stream) { delete stream; } template<int ndim> inline std::ostream &operator<<(std::ostream &os, const Shape<ndim> &shape) { // NOLINT(*) os << '('; for (int i = 0; i < ndim; ++i) { if (i != 0) os << ','; os << shape[i]; } // python style tuple if (ndim == 1) os << ','; os << ')'; return os; } template<typename xpu> inline void *AllocHost_(size_t size); template<typename xpu> inline void FreeHost_(void * dptr); #ifdef __CUDACC__ template<> inline void *AllocHost_<gpu>(size_t size) { void *dptr; MSHADOW_CUDA_CALL(cudaMallocHost(&dptr, size, cudaHostAllocPortable)); return dptr; } template<> inline void FreeHost_<gpu>(void *dptr) { MSHADOW_CUDA_CALL(cudaFreeHost(dptr)); } #endif template<> inline void *AllocHost_<cpu>(size_t size) { size_t pitch; return packet::AlignedMallocPitch(&pitch, size, 1); } template<> inline void FreeHost_<cpu>(void *dptr) { packet::AlignedFree(dptr); } template<typename xpu, int dim, typename DType> inline void AllocHost(Tensor<cpu, dim, DType> *obj) { obj->stride_ = obj->size(dim - 1); CHECK_EQ(obj->CheckContiguous(), true) << "AllocHost"; void *dptr = AllocHost_<xpu>(obj->MSize() * sizeof(DType)); obj->dptr_ = reinterpret_cast<DType*>(dptr); } template<typename xpu, int dim, typename DType> inline void FreeHost(Tensor<cpu, dim, DType> *obj) { if (obj->dptr_ == NULL) { LOG(FATAL) << "FreeHost:: double free"; } FreeHost_<xpu>(obj->dptr_); obj->dptr_ = NULL; } template<int dim, typename DType> inline void AllocSpace(Tensor<cpu, dim, DType> *obj, bool pad) { size_t pitch; void *dptr; if (pad) { dptr = packet::AlignedMallocPitch (&pitch, obj->size(dim - 1) * sizeof(DType), obj->shape_.FlatTo2D()[0]); obj->stride_ = static_cast<index_t>(pitch / sizeof(DType)); } else { obj->stride_ = obj->size(dim - 1); dptr = packet::AlignedMallocPitch (&pitch, obj->shape_.Size() * sizeof(DType), 1); } obj->dptr_ = reinterpret_cast<DType*>(dptr); } template<typename Device, typename DType, int dim> inline Tensor<Device, dim, DType> NewTensor(const Shape<dim> &shape, DType initv, bool pad, Stream<Device> *stream_) { Tensor<Device, dim, DType> obj(shape); obj.stream_ = stream_; AllocSpace(&obj, pad); MapExp<sv::saveto>(&obj, expr::ScalarExp<DType>(initv)); return obj; } template<int dim, typename DType> inline void FreeSpace(Tensor<cpu, dim, DType> *obj) { packet::AlignedFree(obj->dptr_); obj->dptr_ = NULL; } template<int dim, typename DType> inline void Copy(Tensor<cpu, dim, DType> _dst, const Tensor<cpu, dim, DType> &_src, Stream<cpu> *stream) { #pragma GCC diagnostic push #if __GNUC__ >= 8 #pragma GCC diagnostic ignored "-Wclass-memaccess" #endif CHECK_EQ(_dst.shape_, _src.shape_) << "Copy:shape mismatch:" << _dst.shape_ << " vs " << _src.shape_; if (_dst.CheckContiguous() && _src.CheckContiguous()) { memcpy(_dst.dptr_, _src.dptr_, sizeof(DType) * _dst.shape_.Size()); } else { Tensor<cpu, 2, DType> dst = _dst.FlatTo2D(); Tensor<cpu, 2, DType> src = _src.FlatTo2D(); for (index_t y = 0; y < dst.size(0); ++y) { memcpy(dst[y].dptr_, src[y].dptr_, sizeof(DType) * dst.size(1)); } } #pragma GCC diagnostic pop } template<typename Saver, typename R, int dim, typename DType, typename E> inline void MapPlan(TRValue<R, cpu, dim, DType> *dst, const expr::Plan<E, DType> &plan) { Shape<2> shape = expr::ShapeCheck<dim, R>::Check(dst->self()).FlatTo2D(); expr::Plan<R, DType> dplan = expr::MakePlan(dst->self()); #ifndef __CUDACC__ #pragma omp parallel for #endif // temp remove openmp, as default setting throttles CPU for (openmp_index_t y = 0; y < shape[0]; ++y) { for (index_t x = 0; x < shape[1]; ++x) { // trust your compiler! -_- they will optimize it Saver::template Save<DType>(dplan.REval(y, x), plan.Eval(y, x)); } } } // code to handle SSE optimization template<bool pass_check, typename Saver, typename R, int dim, typename DType, typename E, int etype> struct MapExpCPUEngine { inline static void Map(TRValue<R, cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { MapPlan<Saver>(dst, MakePlan(exp.self())); } }; template<typename SV, int dim, typename DType, typename E, int etype> struct MapExpCPUEngine<true, SV, Tensor<cpu, dim, DType>, dim, DType, E, etype> { inline static void Map(Tensor<cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { if (expr::PacketAlignCheck<dim, E, MSHADOW_DEFAULT_PACKET>::Check(exp.self()) && expr::PacketAlignCheck<dim, Tensor<cpu, dim, DType>, MSHADOW_DEFAULT_PACKET>::Check(*dst)) { expr::MapPacketPlan<SV>(dst->self(), expr::MakePacketPlan<MSHADOW_DEFAULT_PACKET>(exp.self())); } else { MapPlan<SV>(dst, MakePlan(exp.self())); } } }; template<typename Saver, typename R, int dim, typename DType, typename E, int etype> inline void MapExp(TRValue<R, cpu, dim, DType> *dst, const expr::Exp<E, DType, etype> &exp) { expr::TypeCheckPass<expr::TypeCheck<cpu, dim, DType, E>::kMapPass> ::Error_All_Tensor_in_Exp_Must_Have_Same_Type(); Shape<dim> eshape = expr::ShapeCheck<dim, E>::Check(exp.self()); Shape<dim> dshape = expr::ShapeCheck<dim, R>::Check(dst->self()); CHECK(eshape[0] == 0 || eshape == dshape) << "Assignment: Shape of Tensors are not consistent with target, " << "eshape: " << eshape << " dshape:" << dshape; MapExpCPUEngine<expr::PacketCheck<E, MSHADOW_DEFAULT_PACKET>::kPass, Saver, R, dim, DType, E, etype> ::Map(dst->ptrself(), exp); } template<typename Saver, typename Reducer, typename R, typename DType, typename E, int etype> inline void MapReduceKeepLowest(TRValue<R, cpu, 1, DType> *dst, const expr::Exp<E, DType, etype> &exp, DType scale) { expr::TypeCheckPass<expr::TypeCheck<cpu, 1, DType, E>::kRedPass> ::Error_TypeCheck_Not_Pass_For_Reduce_Exp(); Shape<2> eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E> ::Check(exp.self()).FlatTo2D(); Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self()); CHECK_EQ(eshape[1], dshape[0]) << "MapReduceKeepLowest::reduction dimension do not match"; CHECK_NE(eshape[0], 0U) << "can not reduce over empty tensor"; // execution expr::Plan<R, DType> dplan = MakePlan(dst->self()); expr::Plan<E, DType> splan = MakePlan(exp.self()); #ifndef __CUDACC__ #pragma omp parallel for #endif for (openmp_index_t x = 0; x < eshape[1]; ++x) { DType res = splan.Eval(0, x); for (index_t y = 1; y < eshape[0]; ++y) { Reducer::Reduce(res, splan.Eval(y, x)); } Saver::template Save<DType>(dplan.REval(0, x), res * scale); } } template<typename Saver, typename Reducer, int dimkeep, typename R, typename DType, typename E, int etype> inline void MapReduceKeepHighDim(TRValue<R, cpu, 1, DType> *dst, const expr::Exp<E, DType, etype> &exp, DType scale) { expr::TypeCheckPass<expr::TypeCheck<cpu, dimkeep, DType, E>::kRedPass> ::Error_TypeCheck_Not_Pass_For_Reduce_Exp(); typedef Shape<expr::ExpInfo<E>::kDim> EShape; EShape eshape = expr::ShapeCheck<expr::ExpInfo<E>::kDim, E> ::Check(exp.self()); Shape<1> dshape = expr::ShapeCheck<1, R>::Check(dst->self()); CHECK_EQ(eshape[dimkeep], dshape[0]) << "MapReduceKeepHighDim::reduction dimension do not match"; // use equvalent form Shape<4> pshape = Shape4(eshape.ProdShape(0, dimkeep), eshape[dimkeep], eshape.ProdShape(dimkeep + 1, EShape::kSubdim), eshape[EShape::kSubdim]); // execution expr::Plan<R, DType> dplan = MakePlan(dst->self()); expr::Plan<E, DType> splan = MakePlan(exp.self()); #ifndef __CUDACC__ #pragma omp parallel for #endif for (openmp_index_t c = 0; c < pshape[1]; ++c) { DType res; Reducer::SetInitValue(res); for (index_t n = 0; n < pshape[0]; ++n) { DType tres; Reducer::SetInitValue(tres); for (index_t y = 0; y < pshape[2]; ++y) { for (index_t x = 0; x < pshape[3]; ++x) { Reducer::Reduce(tres, splan.Eval((n * pshape[1] + c) * pshape[2] + y, x)); } } Reducer::Reduce(res, tres); } Saver::template Save<DType>(dplan.REval(0, c), DType(res * scale)); } } template<typename DType> inline void Softmax(Tensor<cpu, 1, DType> dst, const Tensor<cpu, 1, DType> &energy) { DType mmax = energy[0]; for (index_t x = 1; x < dst.size(0); ++x) { if (mmax < energy[x]) mmax = energy[x]; } DType sum = DType(0.0f); for (index_t x = 0; x < dst.size(0); ++x) { dst[x] = std::exp(energy[x] - mmax); sum += dst[x]; } for (index_t x = 0; x < dst.size(0); ++x) { dst[x] /= sum; } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label) { #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y]); for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k] = src[y][k] - 1.0f; } else { dst[y][x] = src[y][x]; } } } } template<typename DType> inline void SmoothSoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label, const float alpha) { const float smooth_grad = (alpha / (dst.size(1) - 1)); #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const index_t k = static_cast<int>(label[y]); for (index_t x = 0; x < dst.size(1); ++x) { if (x == k) { dst[y][k] = src[y][k] - 1.0f + alpha; } else { dst[y][x] = src[y][x] - smooth_grad; } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label, const DType &ignore_label) { #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (static_cast<int>(ignore_label) == k) { dst[y][x] = 0.0f; } else { if (x == k) { dst[y][k] = src[y][k] - 1.0f; } else { dst[y][x] = src[y][x]; } } } } } template<typename DType> inline void SmoothSoftmaxGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &src, const Tensor<cpu, 1, DType> &label, const DType &ignore_label, const float alpha) { const float smooth_grad = (alpha / (dst.size(1) - 1)); #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (static_cast<int>(ignore_label) == k) { dst[y][x] = 0.0f; } else { if (x == k) { dst[y][k] = src[y][k] - 1.0f + alpha; } else { dst[y][x] = src[y][x] - smooth_grad; } } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label) { #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f; } else { dst[y][x][n] = src[y][x][n]; } } } } } template<typename DType> inline void SmoothSoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label, const float alpha) { const float smooth_grad = (alpha / (dst.size(1) - 1)); #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f + alpha; } else { dst[y][x][n] = src[y][x][n] - smooth_grad; } } } } } template<typename DType> inline void SoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label, const DType &ignore_label) { #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); if (k == static_cast<int>(ignore_label)) { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { dst[y][x][n] = DType(0.0f); } } else { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f; } else { dst[y][x][n] = src[y][x][n]; } } } } } } template<typename DType> inline void SmoothSoftmaxGrad(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &src, const Tensor<cpu, 2, DType> &label, const DType &ignore_label, const float alpha) { const float smooth_grad = (alpha / (dst.size(1) - 1)); #pragma omp parallel for for (openmp_index_t n = 0; n < dst.size(2); ++n) { for (index_t y = 0; y < dst.size(0); ++y) { const int k = static_cast<int>(label[y][n]); if (k == static_cast<int>(ignore_label)) { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { dst[y][x][n] = DType(0.0f); } } else { for (int x = 0; x < static_cast<int>(dst.size(1)); ++x) { if (x == k) { dst[y][k][n] = src[y][k][n] - 1.0f + alpha; } else { dst[y][x][n] = src[y][x][n] - smooth_grad; } } } } } } template<typename DType> inline void Softmax(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 2, DType> &energy) { CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch"; #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { Softmax(dst[y], energy[y]); } } template<typename DType> inline void Softmax(Tensor<cpu, 3, DType> dst, const Tensor<cpu, 3, DType> &energy) { CHECK_EQ(dst.shape_, energy.shape_) << "Softmax: shape mismatch"; #pragma omp parallel for for (openmp_index_t y = 0; y < dst.size(0); ++y) { for (index_t n = 0; n < dst.size(2); ++n) { DType mmax = energy[y][0][n]; for (index_t x = 1; x < dst.size(1); ++x) { if (mmax < energy[y][x][n]) mmax = energy[y][x][n]; } DType sum = DType(0.0f); for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] = std::exp(energy[y][x][n] - mmax); sum += dst[y][x][n]; } for (index_t x = 0; x < dst.size(1); ++x) { dst[y][x][n] /= sum; } } } } template<bool clip, typename IndexType, typename DType> inline void AddTakeGrad(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { const index_t K = dst.shape_[0]; const index_t C = dst.shape_[1]; for (index_t y = 0; y < index.size(0); ++y) { index_t j = index[y]; if (clip) { if (j <= 0) j = 0; else if (j >= K) j = K - 1; } else { j %= K; if (j < 0) j += K; } for (index_t i = 0; i < C; ++i) { dst[j][i] += src[y][i]; } } } template<typename IndexType, typename DType> inline void AddTakeGradLargeBatch(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& sorted, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { for (index_t y = 0; y < sorted.size(0); ++y) { dst[sorted[y]] += src[index[y]]; } } template<typename IndexType, typename DType> inline void IndexFill(Tensor<cpu, 2, DType> dst, const Tensor<cpu, 1, IndexType>& index, const Tensor<cpu, 2, DType> &src) { for (index_t y = 0; y < index.size(0); ++y) { for (index_t j = 0; j < src.size(1); j++) { dst[index[y]][j] = src[y][j]; } } } template<typename KDType, typename VDType> inline void SortByKey(Tensor<cpu, 1, KDType> keys, Tensor<cpu, 1, VDType> values, bool is_ascend) { CHECK_EQ(keys.CheckContiguous(), true); CHECK_EQ(values.CheckContiguous(), true); CHECK_EQ(keys.size(0), values.size(0)) << "The sizes of key/value are not equal! keys_size: " << keys.size(0) << "values_size: " << values.size(0); std::vector<size_t> idx(keys.size(0)); std::vector<KDType> keys_vec(keys.size(0)); std::vector<VDType> values_vec(values.size(0)); for (int i = 0; i < keys.size(0); i++) { idx[i] = i; keys_vec[i] = keys[i]; values_vec[i] = values[i]; } if (is_ascend) { std::stable_sort(idx.begin(), idx.end(), [&keys_vec](size_t i1, size_t i2) {return keys_vec[i1] < keys_vec[i2]; }); } else { std::stable_sort(idx.begin(), idx.end(), [&keys_vec](size_t i1, size_t i2) {return keys_vec[i1] > keys_vec[i2]; }); } for (index_t i = 0; i < values.size(0); i++) { keys[i] = keys_vec[idx[i]]; values[i] = values_vec[idx[i]]; } } template<typename Device, typename VDType, typename SDType> inline void VectorizedSort(Tensor<Device, 1, VDType> values, Tensor<Device, 1, SDType> segments) { // We can sort each segments using two stable sorts SortByKey(values, segments, true); SortByKey(segments, values, true); } // blas related template<typename Device, typename DType> inline void VectorDot(Tensor<Device, 1, DType> dst, const Tensor<Device, 1, DType> &lhs, const Tensor<Device, 1, DType> &rhs) { CHECK_EQ(lhs.size(0), rhs.size(0)) << "VectorDot: Shape mismatch"; CHECK_EQ(dst.size(0), 1U) << "VectorDot: expect dst to be scalar"; expr::BLASEngine<Device, DType>::SetStream(lhs.stream_); mshadow::expr::BLASEngine<Device, DType>::dot( lhs.stream_, lhs.size(0), lhs.dptr_, 1, rhs.dptr_, 1, dst.dptr_); } template<bool transpose_left, bool transpose_right, typename Device, typename DType> inline void BatchGEMM(Tensor<Device, 3, DType> dst, const Tensor<Device, 3, DType> &lhs, const Tensor<Device, 3, DType> &rhs, DType alpha, DType beta, Tensor<Device, 1, DType*> workspace) { index_t batch_size = dst.shape_[0]; expr::BLASEngine<Device, DType>::SetStream(dst.stream_); Shape<3> sleft = transpose_left ? Shape3(lhs.shape_[0], lhs.shape_[2], lhs.shape_[1]) : lhs.shape_; Shape<3> sright = transpose_right ? Shape3(rhs.shape_[0], rhs.shape_[2], rhs.shape_[1]) : rhs.shape_; CHECK_EQ(dst.CheckContiguous(), true); CHECK_EQ(lhs.CheckContiguous(), true); CHECK_EQ(rhs.CheckContiguous(), true); CHECK(sleft[0] == batch_size && sright[0] == batch_size) << "BatchGEMM: batchsize must be equal." << "dst: " << dst.shape_ << "\n" << "lhs: " << sleft << "\n" << "rhs: " << sright << "\n"; CHECK(dst.size(1) == sleft[1] && dst.size(2) == sright[2] && sleft[2] == sright[1]) << "BatchGEMM: matrix shape mismatch" << "dst: " << dst.shape_ << "\n" << "lhs: " << sleft << "\n" << "rhs: " << sright << "\n"; CHECK(workspace.size(0) >= 3 * batch_size) << "Workspace Size must be bigger than " << 3 * batch_size; CHECK_EQ(workspace.CheckContiguous(), true); // use column major argument to compatible with most BLAS expr::BLASEngine<Device, DType>::batched_gemm (dst.stream_, transpose_right, transpose_left, transpose_right ? rhs.size(1) : rhs.size(2), transpose_left ? lhs.size(2) : lhs.size(1), transpose_right ? rhs.size(2) : rhs.size(1), alpha, rhs.dptr_, rhs.stride_, lhs.dptr_, lhs.stride_, beta, dst.dptr_, dst.stride_, batch_size, workspace.dptr_); } } // namespace mshadow #endif // MSHADOW_TENSOR_CPU_INL_H_
parser.c
/* -*- C++ -*- Parser. Copyright (C) 2000-2015 Free Software Foundation, Inc. Written by Mark Mitchell <[email protected]>. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "timevar.h" #include "cpplib.h" #include "hash-set.h" #include "machmode.h" #include "vec.h" #include "double-int.h" #include "input.h" #include "alias.h" #include "symtab.h" #include "wide-int.h" #include "inchash.h" #include "tree.h" #include "print-tree.h" #include "stringpool.h" #include "attribs.h" #include "trans-mem.h" #include "cp-tree.h" #include "intl.h" #include "c-family/c-pragma.h" #include "decl.h" #include "flags.h" #include "diagnostic-core.h" #include "target.h" #include "hash-map.h" #include "is-a.h" #include "plugin-api.h" #include "hard-reg-set.h" #include "input.h" #include "function.h" #include "ipa-ref.h" #include "cgraph.h" #include "c-family/c-common.h" #include "c-family/c-objc.h" #include "plugin.h" #include "tree-pretty-print.h" #include "parser.h" #include "type-utils.h" #include "omp-low.h" #include "gomp-constants.h" /* The lexer. */ /* The cp_lexer_* routines mediate between the lexer proper (in libcpp and c-lex.c) and the C++ parser. */ static cp_token eof_token = { CPP_EOF, RID_MAX, 0, PRAGMA_NONE, false, false, false, 0, { NULL } }; /* The various kinds of non integral constant we encounter. */ typedef enum non_integral_constant { NIC_NONE, /* floating-point literal */ NIC_FLOAT, /* %<this%> */ NIC_THIS, /* %<__FUNCTION__%> */ NIC_FUNC_NAME, /* %<__PRETTY_FUNCTION__%> */ NIC_PRETTY_FUNC, /* %<__func__%> */ NIC_C99_FUNC, /* "%<va_arg%> */ NIC_VA_ARG, /* a cast */ NIC_CAST, /* %<typeid%> operator */ NIC_TYPEID, /* non-constant compound literals */ NIC_NCC, /* a function call */ NIC_FUNC_CALL, /* an increment */ NIC_INC, /* an decrement */ NIC_DEC, /* an array reference */ NIC_ARRAY_REF, /* %<->%> */ NIC_ARROW, /* %<.%> */ NIC_POINT, /* the address of a label */ NIC_ADDR_LABEL, /* %<*%> */ NIC_STAR, /* %<&%> */ NIC_ADDR, /* %<++%> */ NIC_PREINCREMENT, /* %<--%> */ NIC_PREDECREMENT, /* %<new%> */ NIC_NEW, /* %<delete%> */ NIC_DEL, /* calls to overloaded operators */ NIC_OVERLOADED, /* an assignment */ NIC_ASSIGNMENT, /* a comma operator */ NIC_COMMA, /* a call to a constructor */ NIC_CONSTRUCTOR, /* a transaction expression */ NIC_TRANSACTION } non_integral_constant; /* The various kinds of errors about name-lookup failing. */ typedef enum name_lookup_error { /* NULL */ NLE_NULL, /* is not a type */ NLE_TYPE, /* is not a class or namespace */ NLE_CXX98, /* is not a class, namespace, or enumeration */ NLE_NOT_CXX98 } name_lookup_error; /* The various kinds of required token */ typedef enum required_token { RT_NONE, RT_SEMICOLON, /* ';' */ RT_OPEN_PAREN, /* '(' */ RT_CLOSE_BRACE, /* '}' */ RT_OPEN_BRACE, /* '{' */ RT_CLOSE_SQUARE, /* ']' */ RT_OPEN_SQUARE, /* '[' */ RT_COMMA, /* ',' */ RT_SCOPE, /* '::' */ RT_LESS, /* '<' */ RT_GREATER, /* '>' */ RT_EQ, /* '=' */ RT_ELLIPSIS, /* '...' */ RT_MULT, /* '*' */ RT_COMPL, /* '~' */ RT_COLON, /* ':' */ RT_COLON_SCOPE, /* ':' or '::' */ RT_CLOSE_PAREN, /* ')' */ RT_COMMA_CLOSE_PAREN, /* ',' or ')' */ RT_PRAGMA_EOL, /* end of line */ RT_NAME, /* identifier */ /* The type is CPP_KEYWORD */ RT_NEW, /* new */ RT_DELETE, /* delete */ RT_RETURN, /* return */ RT_WHILE, /* while */ RT_EXTERN, /* extern */ RT_STATIC_ASSERT, /* static_assert */ RT_DECLTYPE, /* decltype */ RT_OPERATOR, /* operator */ RT_CLASS, /* class */ RT_TEMPLATE, /* template */ RT_NAMESPACE, /* namespace */ RT_USING, /* using */ RT_ASM, /* asm */ RT_TRY, /* try */ RT_CATCH, /* catch */ RT_THROW, /* throw */ RT_LABEL, /* __label__ */ RT_AT_TRY, /* @try */ RT_AT_SYNCHRONIZED, /* @synchronized */ RT_AT_THROW, /* @throw */ RT_SELECT, /* selection-statement */ RT_INTERATION, /* iteration-statement */ RT_JUMP, /* jump-statement */ RT_CLASS_KEY, /* class-key */ RT_CLASS_TYPENAME_TEMPLATE, /* class, typename, or template */ RT_TRANSACTION_ATOMIC, /* __transaction_atomic */ RT_TRANSACTION_RELAXED, /* __transaction_relaxed */ RT_TRANSACTION_CANCEL /* __transaction_cancel */ } required_token; /* Prototypes. */ static cp_lexer *cp_lexer_new_main (void); static cp_lexer *cp_lexer_new_from_tokens (cp_token_cache *tokens); static void cp_lexer_destroy (cp_lexer *); static int cp_lexer_saving_tokens (const cp_lexer *); static cp_token *cp_lexer_token_at (cp_lexer *, cp_token_position); static void cp_lexer_get_preprocessor_token (cp_lexer *, cp_token *); static inline cp_token *cp_lexer_peek_token (cp_lexer *); static cp_token *cp_lexer_peek_nth_token (cp_lexer *, size_t); static inline bool cp_lexer_next_token_is (cp_lexer *, enum cpp_ttype); static bool cp_lexer_next_token_is_not (cp_lexer *, enum cpp_ttype); static bool cp_lexer_next_token_is_keyword (cp_lexer *, enum rid); static cp_token *cp_lexer_consume_token (cp_lexer *); static void cp_lexer_purge_token (cp_lexer *); static void cp_lexer_purge_tokens_after (cp_lexer *, cp_token_position); static void cp_lexer_save_tokens (cp_lexer *); static void cp_lexer_commit_tokens (cp_lexer *); static void cp_lexer_rollback_tokens (cp_lexer *); static void cp_lexer_print_token (FILE *, cp_token *); static inline bool cp_lexer_debugging_p (cp_lexer *); static void cp_lexer_start_debugging (cp_lexer *) ATTRIBUTE_UNUSED; static void cp_lexer_stop_debugging (cp_lexer *) ATTRIBUTE_UNUSED; static cp_token_cache *cp_token_cache_new (cp_token *, cp_token *); static void cp_parser_initial_pragma (cp_token *); static tree cp_literal_operator_id (const char *); static void cp_parser_cilk_simd (cp_parser *, cp_token *); static tree cp_parser_cilk_for (cp_parser *, tree); static bool cp_parser_omp_declare_reduction_exprs (tree, cp_parser *); static tree cp_parser_cilk_simd_vectorlength (cp_parser *, tree, bool); /* Manifest constants. */ #define CP_LEXER_BUFFER_SIZE ((256 * 1024) / sizeof (cp_token)) #define CP_SAVED_TOKEN_STACK 5 /* Variables. */ /* The stream to which debugging output should be written. */ static FILE *cp_lexer_debug_stream; /* Nonzero if we are parsing an unevaluated operand: an operand to sizeof, typeof, or alignof. */ int cp_unevaluated_operand; /* Dump up to NUM tokens in BUFFER to FILE starting with token START_TOKEN. If START_TOKEN is NULL, the dump starts with the first token in BUFFER. If NUM is 0, dump all the tokens. If CURR_TOKEN is set and it is one of the tokens in BUFFER, it will be highlighted by surrounding it in [[ ]]. */ static void cp_lexer_dump_tokens (FILE *file, vec<cp_token, va_gc> *buffer, cp_token *start_token, unsigned num, cp_token *curr_token) { unsigned i, nprinted; cp_token *token; bool do_print; fprintf (file, "%u tokens\n", vec_safe_length (buffer)); if (buffer == NULL) return; if (num == 0) num = buffer->length (); if (start_token == NULL) start_token = buffer->address (); if (start_token > buffer->address ()) { cp_lexer_print_token (file, &(*buffer)[0]); fprintf (file, " ... "); } do_print = false; nprinted = 0; for (i = 0; buffer->iterate (i, &token) && nprinted < num; i++) { if (token == start_token) do_print = true; if (!do_print) continue; nprinted++; if (token == curr_token) fprintf (file, "[["); cp_lexer_print_token (file, token); if (token == curr_token) fprintf (file, "]]"); switch (token->type) { case CPP_SEMICOLON: case CPP_OPEN_BRACE: case CPP_CLOSE_BRACE: case CPP_EOF: fputc ('\n', file); break; default: fputc (' ', file); } } if (i == num && i < buffer->length ()) { fprintf (file, " ... "); cp_lexer_print_token (file, &buffer->last ()); } fprintf (file, "\n"); } /* Dump all tokens in BUFFER to stderr. */ void cp_lexer_debug_tokens (vec<cp_token, va_gc> *buffer) { cp_lexer_dump_tokens (stderr, buffer, NULL, 0, NULL); } DEBUG_FUNCTION void debug (vec<cp_token, va_gc> &ref) { cp_lexer_dump_tokens (stderr, &ref, NULL, 0, NULL); } DEBUG_FUNCTION void debug (vec<cp_token, va_gc> *ptr) { if (ptr) debug (*ptr); else fprintf (stderr, "<nil>\n"); } /* Dump the cp_parser tree field T to FILE if T is non-NULL. DESC is the description for T. */ static void cp_debug_print_tree_if_set (FILE *file, const char *desc, tree t) { if (t) { fprintf (file, "%s: ", desc); print_node_brief (file, "", t, 0); } } /* Dump parser context C to FILE. */ static void cp_debug_print_context (FILE *file, cp_parser_context *c) { const char *status_s[] = { "OK", "ERROR", "COMMITTED" }; fprintf (file, "{ status = %s, scope = ", status_s[c->status]); print_node_brief (file, "", c->object_type, 0); fprintf (file, "}\n"); } /* Print the stack of parsing contexts to FILE starting with FIRST. */ static void cp_debug_print_context_stack (FILE *file, cp_parser_context *first) { unsigned i; cp_parser_context *c; fprintf (file, "Parsing context stack:\n"); for (i = 0, c = first; c; c = c->next, i++) { fprintf (file, "\t#%u: ", i); cp_debug_print_context (file, c); } } /* Print the value of FLAG to FILE. DESC is a string describing the flag. */ static void cp_debug_print_flag (FILE *file, const char *desc, bool flag) { if (flag) fprintf (file, "%s: true\n", desc); } /* Print an unparsed function entry UF to FILE. */ static void cp_debug_print_unparsed_function (FILE *file, cp_unparsed_functions_entry *uf) { unsigned i; cp_default_arg_entry *default_arg_fn; tree fn; fprintf (file, "\tFunctions with default args:\n"); for (i = 0; vec_safe_iterate (uf->funs_with_default_args, i, &default_arg_fn); i++) { fprintf (file, "\t\tClass type: "); print_node_brief (file, "", default_arg_fn->class_type, 0); fprintf (file, "\t\tDeclaration: "); print_node_brief (file, "", default_arg_fn->decl, 0); fprintf (file, "\n"); } fprintf (file, "\n\tFunctions with definitions that require " "post-processing\n\t\t"); for (i = 0; vec_safe_iterate (uf->funs_with_definitions, i, &fn); i++) { print_node_brief (file, "", fn, 0); fprintf (file, " "); } fprintf (file, "\n"); fprintf (file, "\n\tNon-static data members with initializers that require " "post-processing\n\t\t"); for (i = 0; vec_safe_iterate (uf->nsdmis, i, &fn); i++) { print_node_brief (file, "", fn, 0); fprintf (file, " "); } fprintf (file, "\n"); } /* Print the stack of unparsed member functions S to FILE. */ static void cp_debug_print_unparsed_queues (FILE *file, vec<cp_unparsed_functions_entry, va_gc> *s) { unsigned i; cp_unparsed_functions_entry *uf; fprintf (file, "Unparsed functions\n"); for (i = 0; vec_safe_iterate (s, i, &uf); i++) { fprintf (file, "#%u:\n", i); cp_debug_print_unparsed_function (file, uf); } } /* Dump the tokens in a window of size WINDOW_SIZE around the next_token for the given PARSER. If FILE is NULL, the output is printed on stderr. */ static void cp_debug_parser_tokens (FILE *file, cp_parser *parser, int window_size) { cp_token *next_token, *first_token, *start_token; if (file == NULL) file = stderr; next_token = parser->lexer->next_token; first_token = parser->lexer->buffer->address (); start_token = (next_token > first_token + window_size / 2) ? next_token - window_size / 2 : first_token; cp_lexer_dump_tokens (file, parser->lexer->buffer, start_token, window_size, next_token); } /* Dump debugging information for the given PARSER. If FILE is NULL, the output is printed on stderr. */ void cp_debug_parser (FILE *file, cp_parser *parser) { const size_t window_size = 20; cp_token *token; expanded_location eloc; if (file == NULL) file = stderr; fprintf (file, "Parser state\n\n"); fprintf (file, "Number of tokens: %u\n", vec_safe_length (parser->lexer->buffer)); cp_debug_print_tree_if_set (file, "Lookup scope", parser->scope); cp_debug_print_tree_if_set (file, "Object scope", parser->object_scope); cp_debug_print_tree_if_set (file, "Qualifying scope", parser->qualifying_scope); cp_debug_print_context_stack (file, parser->context); cp_debug_print_flag (file, "Allow GNU extensions", parser->allow_gnu_extensions_p); cp_debug_print_flag (file, "'>' token is greater-than", parser->greater_than_is_operator_p); cp_debug_print_flag (file, "Default args allowed in current " "parameter list", parser->default_arg_ok_p); cp_debug_print_flag (file, "Parsing integral constant-expression", parser->integral_constant_expression_p); cp_debug_print_flag (file, "Allow non-constant expression in current " "constant-expression", parser->allow_non_integral_constant_expression_p); cp_debug_print_flag (file, "Seen non-constant expression", parser->non_integral_constant_expression_p); cp_debug_print_flag (file, "Local names and 'this' forbidden in " "current context", parser->local_variables_forbidden_p); cp_debug_print_flag (file, "In unbraced linkage specification", parser->in_unbraced_linkage_specification_p); cp_debug_print_flag (file, "Parsing a declarator", parser->in_declarator_p); cp_debug_print_flag (file, "In template argument list", parser->in_template_argument_list_p); cp_debug_print_flag (file, "Parsing an iteration statement", parser->in_statement & IN_ITERATION_STMT); cp_debug_print_flag (file, "Parsing a switch statement", parser->in_statement & IN_SWITCH_STMT); cp_debug_print_flag (file, "Parsing a structured OpenMP block", parser->in_statement & IN_OMP_BLOCK); cp_debug_print_flag (file, "Parsing a Cilk Plus for loop", parser->in_statement & IN_CILK_SIMD_FOR); cp_debug_print_flag (file, "Parsing a an OpenMP loop", parser->in_statement & IN_OMP_FOR); cp_debug_print_flag (file, "Parsing an if statement", parser->in_statement & IN_IF_STMT); cp_debug_print_flag (file, "Parsing a type-id in an expression " "context", parser->in_type_id_in_expr_p); cp_debug_print_flag (file, "Declarations are implicitly extern \"C\"", parser->implicit_extern_c); cp_debug_print_flag (file, "String expressions should be translated " "to execution character set", parser->translate_strings_p); cp_debug_print_flag (file, "Parsing function body outside of a " "local class", parser->in_function_body); cp_debug_print_flag (file, "Auto correct a colon to a scope operator", parser->colon_corrects_to_scope_p); cp_debug_print_flag (file, "Colon doesn't start a class definition", parser->colon_doesnt_start_class_def_p); if (parser->type_definition_forbidden_message) fprintf (file, "Error message for forbidden type definitions: %s\n", parser->type_definition_forbidden_message); cp_debug_print_unparsed_queues (file, parser->unparsed_queues); fprintf (file, "Number of class definitions in progress: %u\n", parser->num_classes_being_defined); fprintf (file, "Number of template parameter lists for the current " "declaration: %u\n", parser->num_template_parameter_lists); cp_debug_parser_tokens (file, parser, window_size); token = parser->lexer->next_token; fprintf (file, "Next token to parse:\n"); fprintf (file, "\tToken: "); cp_lexer_print_token (file, token); eloc = expand_location (token->location); fprintf (file, "\n\tFile: %s\n", eloc.file); fprintf (file, "\tLine: %d\n", eloc.line); fprintf (file, "\tColumn: %d\n", eloc.column); } DEBUG_FUNCTION void debug (cp_parser &ref) { cp_debug_parser (stderr, &ref); } DEBUG_FUNCTION void debug (cp_parser *ptr) { if (ptr) debug (*ptr); else fprintf (stderr, "<nil>\n"); } /* Allocate memory for a new lexer object and return it. */ static cp_lexer * cp_lexer_alloc (void) { cp_lexer *lexer; c_common_no_more_pch (); /* Allocate the memory. */ lexer = ggc_cleared_alloc<cp_lexer> (); /* Initially we are not debugging. */ lexer->debugging_p = false; lexer->saved_tokens.create (CP_SAVED_TOKEN_STACK); /* Create the buffer. */ vec_alloc (lexer->buffer, CP_LEXER_BUFFER_SIZE); return lexer; } /* Create a new main C++ lexer, the lexer that gets tokens from the preprocessor. */ static cp_lexer * cp_lexer_new_main (void) { cp_lexer *lexer; cp_token token; /* It's possible that parsing the first pragma will load a PCH file, which is a GC collection point. So we have to do that before allocating any memory. */ cp_parser_initial_pragma (&token); lexer = cp_lexer_alloc (); /* Put the first token in the buffer. */ lexer->buffer->quick_push (token); /* Get the remaining tokens from the preprocessor. */ while (token.type != CPP_EOF) { cp_lexer_get_preprocessor_token (lexer, &token); vec_safe_push (lexer->buffer, token); } lexer->last_token = lexer->buffer->address () + lexer->buffer->length () - 1; lexer->next_token = lexer->buffer->length () ? lexer->buffer->address () : &eof_token; /* Subsequent preprocessor diagnostics should use compiler diagnostic functions to get the compiler source location. */ done_lexing = true; gcc_assert (!lexer->next_token->purged_p); return lexer; } /* Create a new lexer whose token stream is primed with the tokens in CACHE. When these tokens are exhausted, no new tokens will be read. */ static cp_lexer * cp_lexer_new_from_tokens (cp_token_cache *cache) { cp_token *first = cache->first; cp_token *last = cache->last; cp_lexer *lexer = ggc_cleared_alloc<cp_lexer> (); /* We do not own the buffer. */ lexer->buffer = NULL; lexer->next_token = first == last ? &eof_token : first; lexer->last_token = last; lexer->saved_tokens.create (CP_SAVED_TOKEN_STACK); /* Initially we are not debugging. */ lexer->debugging_p = false; gcc_assert (!lexer->next_token->purged_p); return lexer; } /* Frees all resources associated with LEXER. */ static void cp_lexer_destroy (cp_lexer *lexer) { vec_free (lexer->buffer); lexer->saved_tokens.release (); ggc_free (lexer); } /* Returns nonzero if debugging information should be output. */ static inline bool cp_lexer_debugging_p (cp_lexer *lexer) { return lexer->debugging_p; } static inline cp_token_position cp_lexer_token_position (cp_lexer *lexer, bool previous_p) { gcc_assert (!previous_p || lexer->next_token != &eof_token); return lexer->next_token - previous_p; } static inline cp_token * cp_lexer_token_at (cp_lexer * /*lexer*/, cp_token_position pos) { return pos; } static inline void cp_lexer_set_token_position (cp_lexer *lexer, cp_token_position pos) { lexer->next_token = cp_lexer_token_at (lexer, pos); } static inline cp_token_position cp_lexer_previous_token_position (cp_lexer *lexer) { if (lexer->next_token == &eof_token) return lexer->last_token - 1; else return cp_lexer_token_position (lexer, true); } static inline cp_token * cp_lexer_previous_token (cp_lexer *lexer) { cp_token_position tp = cp_lexer_previous_token_position (lexer); return cp_lexer_token_at (lexer, tp); } /* nonzero if we are presently saving tokens. */ static inline int cp_lexer_saving_tokens (const cp_lexer* lexer) { return lexer->saved_tokens.length () != 0; } /* Store the next token from the preprocessor in *TOKEN. Return true if we reach EOF. If LEXER is NULL, assume we are handling an initial #pragma pch_preprocess, and thus want the lexer to return processed strings. */ static void cp_lexer_get_preprocessor_token (cp_lexer *lexer, cp_token *token) { static int is_extern_c = 0; /* Get a new token from the preprocessor. */ token->type = c_lex_with_flags (&token->u.value, &token->location, &token->flags, lexer == NULL ? 0 : C_LEX_STRING_NO_JOIN); token->keyword = RID_MAX; token->pragma_kind = PRAGMA_NONE; token->purged_p = false; token->error_reported = false; /* On some systems, some header files are surrounded by an implicit extern "C" block. Set a flag in the token if it comes from such a header. */ is_extern_c += pending_lang_change; pending_lang_change = 0; token->implicit_extern_c = is_extern_c > 0; /* Check to see if this token is a keyword. */ if (token->type == CPP_NAME) { if (C_IS_RESERVED_WORD (token->u.value)) { /* Mark this token as a keyword. */ token->type = CPP_KEYWORD; /* Record which keyword. */ token->keyword = C_RID_CODE (token->u.value); } else { if (warn_cxx0x_compat && C_RID_CODE (token->u.value) >= RID_FIRST_CXX0X && C_RID_CODE (token->u.value) <= RID_LAST_CXX0X) { /* Warn about the C++0x keyword (but still treat it as an identifier). */ warning (OPT_Wc__0x_compat, "identifier %qE is a keyword in C++11", token->u.value); /* Clear out the C_RID_CODE so we don't warn about this particular identifier-turned-keyword again. */ C_SET_RID_CODE (token->u.value, RID_MAX); } token->keyword = RID_MAX; } } else if (token->type == CPP_AT_NAME) { /* This only happens in Objective-C++; it must be a keyword. */ token->type = CPP_KEYWORD; switch (C_RID_CODE (token->u.value)) { /* Replace 'class' with '@class', 'private' with '@private', etc. This prevents confusion with the C++ keyword 'class', and makes the tokens consistent with other Objective-C 'AT' keywords. For example '@class' is reported as RID_AT_CLASS which is consistent with '@synchronized', which is reported as RID_AT_SYNCHRONIZED. */ case RID_CLASS: token->keyword = RID_AT_CLASS; break; case RID_PRIVATE: token->keyword = RID_AT_PRIVATE; break; case RID_PROTECTED: token->keyword = RID_AT_PROTECTED; break; case RID_PUBLIC: token->keyword = RID_AT_PUBLIC; break; case RID_THROW: token->keyword = RID_AT_THROW; break; case RID_TRY: token->keyword = RID_AT_TRY; break; case RID_CATCH: token->keyword = RID_AT_CATCH; break; default: token->keyword = C_RID_CODE (token->u.value); } } else if (token->type == CPP_PRAGMA) { /* We smuggled the cpp_token->u.pragma value in an INTEGER_CST. */ token->pragma_kind = ((enum pragma_kind) TREE_INT_CST_LOW (token->u.value)); token->u.value = NULL_TREE; } } /* Update the globals input_location and the input file stack from TOKEN. */ static inline void cp_lexer_set_source_position_from_token (cp_token *token) { if (token->type != CPP_EOF) { input_location = token->location; } } /* Update the globals input_location and the input file stack from LEXER. */ static inline void cp_lexer_set_source_position (cp_lexer *lexer) { cp_token *token = cp_lexer_peek_token (lexer); cp_lexer_set_source_position_from_token (token); } /* Return a pointer to the next token in the token stream, but do not consume it. */ static inline cp_token * cp_lexer_peek_token (cp_lexer *lexer) { if (cp_lexer_debugging_p (lexer)) { fputs ("cp_lexer: peeking at token: ", cp_lexer_debug_stream); cp_lexer_print_token (cp_lexer_debug_stream, lexer->next_token); putc ('\n', cp_lexer_debug_stream); } return lexer->next_token; } /* Return true if the next token has the indicated TYPE. */ static inline bool cp_lexer_next_token_is (cp_lexer* lexer, enum cpp_ttype type) { return cp_lexer_peek_token (lexer)->type == type; } /* Return true if the next token does not have the indicated TYPE. */ static inline bool cp_lexer_next_token_is_not (cp_lexer* lexer, enum cpp_ttype type) { return !cp_lexer_next_token_is (lexer, type); } /* Return true if the next token is the indicated KEYWORD. */ static inline bool cp_lexer_next_token_is_keyword (cp_lexer* lexer, enum rid keyword) { return cp_lexer_peek_token (lexer)->keyword == keyword; } static inline bool cp_lexer_nth_token_is (cp_lexer* lexer, size_t n, enum cpp_ttype type) { return cp_lexer_peek_nth_token (lexer, n)->type == type; } static inline bool cp_lexer_nth_token_is_keyword (cp_lexer* lexer, size_t n, enum rid keyword) { return cp_lexer_peek_nth_token (lexer, n)->keyword == keyword; } /* Return true if the next token is not the indicated KEYWORD. */ static inline bool cp_lexer_next_token_is_not_keyword (cp_lexer* lexer, enum rid keyword) { return cp_lexer_peek_token (lexer)->keyword != keyword; } /* Return true if the next token is a keyword for a decl-specifier. */ static bool cp_lexer_next_token_is_decl_specifier_keyword (cp_lexer *lexer) { cp_token *token; token = cp_lexer_peek_token (lexer); switch (token->keyword) { /* auto specifier: storage-class-specifier in C++, simple-type-specifier in C++0x. */ case RID_AUTO: /* Storage classes. */ case RID_REGISTER: case RID_STATIC: case RID_EXTERN: case RID_MUTABLE: case RID_THREAD: /* Elaborated type specifiers. */ case RID_ENUM: case RID_CLASS: case RID_STRUCT: case RID_UNION: case RID_TYPENAME: /* Simple type specifiers. */ case RID_CHAR: case RID_CHAR16: case RID_CHAR32: case RID_WCHAR: case RID_BOOL: case RID_SHORT: case RID_INT: case RID_LONG: case RID_SIGNED: case RID_UNSIGNED: case RID_FLOAT: case RID_DOUBLE: case RID_VOID: /* GNU extensions. */ case RID_ATTRIBUTE: case RID_TYPEOF: /* C++0x extensions. */ case RID_DECLTYPE: case RID_UNDERLYING_TYPE: return true; default: if (token->keyword >= RID_FIRST_INT_N && token->keyword < RID_FIRST_INT_N + NUM_INT_N_ENTS && int_n_enabled_p[token->keyword - RID_FIRST_INT_N]) return true; return false; } } /* Returns TRUE iff the token T begins a decltype type. */ static bool token_is_decltype (cp_token *t) { return (t->keyword == RID_DECLTYPE || t->type == CPP_DECLTYPE); } /* Returns TRUE iff the next token begins a decltype type. */ static bool cp_lexer_next_token_is_decltype (cp_lexer *lexer) { cp_token *t = cp_lexer_peek_token (lexer); return token_is_decltype (t); } /* Return a pointer to the Nth token in the token stream. If N is 1, then this is precisely equivalent to cp_lexer_peek_token (except that it is not inline). One would like to disallow that case, but there is one case (cp_parser_nth_token_starts_template_id) where the caller passes a variable for N and it might be 1. */ static cp_token * cp_lexer_peek_nth_token (cp_lexer* lexer, size_t n) { cp_token *token; /* N is 1-based, not zero-based. */ gcc_assert (n > 0); if (cp_lexer_debugging_p (lexer)) fprintf (cp_lexer_debug_stream, "cp_lexer: peeking ahead %ld at token: ", (long)n); --n; token = lexer->next_token; gcc_assert (!n || token != &eof_token); while (n != 0) { ++token; if (token == lexer->last_token) { token = &eof_token; break; } if (!token->purged_p) --n; } if (cp_lexer_debugging_p (lexer)) { cp_lexer_print_token (cp_lexer_debug_stream, token); putc ('\n', cp_lexer_debug_stream); } return token; } /* Return the next token, and advance the lexer's next_token pointer to point to the next non-purged token. */ static cp_token * cp_lexer_consume_token (cp_lexer* lexer) { cp_token *token = lexer->next_token; gcc_assert (token != &eof_token); gcc_assert (!lexer->in_pragma || token->type != CPP_PRAGMA_EOL); do { lexer->next_token++; if (lexer->next_token == lexer->last_token) { lexer->next_token = &eof_token; break; } } while (lexer->next_token->purged_p); cp_lexer_set_source_position_from_token (token); /* Provide debugging output. */ if (cp_lexer_debugging_p (lexer)) { fputs ("cp_lexer: consuming token: ", cp_lexer_debug_stream); cp_lexer_print_token (cp_lexer_debug_stream, token); putc ('\n', cp_lexer_debug_stream); } return token; } /* Permanently remove the next token from the token stream, and advance the next_token pointer to refer to the next non-purged token. */ static void cp_lexer_purge_token (cp_lexer *lexer) { cp_token *tok = lexer->next_token; gcc_assert (tok != &eof_token); tok->purged_p = true; tok->location = UNKNOWN_LOCATION; tok->u.value = NULL_TREE; tok->keyword = RID_MAX; do { tok++; if (tok == lexer->last_token) { tok = &eof_token; break; } } while (tok->purged_p); lexer->next_token = tok; } /* Permanently remove all tokens after TOK, up to, but not including, the token that will be returned next by cp_lexer_peek_token. */ static void cp_lexer_purge_tokens_after (cp_lexer *lexer, cp_token *tok) { cp_token *peek = lexer->next_token; if (peek == &eof_token) peek = lexer->last_token; gcc_assert (tok < peek); for ( tok += 1; tok != peek; tok += 1) { tok->purged_p = true; tok->location = UNKNOWN_LOCATION; tok->u.value = NULL_TREE; tok->keyword = RID_MAX; } } /* Begin saving tokens. All tokens consumed after this point will be preserved. */ static void cp_lexer_save_tokens (cp_lexer* lexer) { /* Provide debugging output. */ if (cp_lexer_debugging_p (lexer)) fprintf (cp_lexer_debug_stream, "cp_lexer: saving tokens\n"); lexer->saved_tokens.safe_push (lexer->next_token); } /* Commit to the portion of the token stream most recently saved. */ static void cp_lexer_commit_tokens (cp_lexer* lexer) { /* Provide debugging output. */ if (cp_lexer_debugging_p (lexer)) fprintf (cp_lexer_debug_stream, "cp_lexer: committing tokens\n"); lexer->saved_tokens.pop (); } /* Return all tokens saved since the last call to cp_lexer_save_tokens to the token stream. Stop saving tokens. */ static void cp_lexer_rollback_tokens (cp_lexer* lexer) { /* Provide debugging output. */ if (cp_lexer_debugging_p (lexer)) fprintf (cp_lexer_debug_stream, "cp_lexer: restoring tokens\n"); lexer->next_token = lexer->saved_tokens.pop (); } /* RAII wrapper around the above functions, with sanity checking. Creating a variable saves tokens, which are committed when the variable is destroyed unless they are explicitly rolled back by calling the rollback member function. */ struct saved_token_sentinel { cp_lexer *lexer; unsigned len; bool commit; saved_token_sentinel(cp_lexer *lexer): lexer(lexer), commit(true) { len = lexer->saved_tokens.length (); cp_lexer_save_tokens (lexer); } void rollback () { cp_lexer_rollback_tokens (lexer); commit = false; } ~saved_token_sentinel() { if (commit) cp_lexer_commit_tokens (lexer); gcc_assert (lexer->saved_tokens.length () == len); } }; /* Print a representation of the TOKEN on the STREAM. */ static void cp_lexer_print_token (FILE * stream, cp_token *token) { /* We don't use cpp_type2name here because the parser defines a few tokens of its own. */ static const char *const token_names[] = { /* cpplib-defined token types */ #define OP(e, s) #e, #define TK(e, s) #e, TTYPE_TABLE #undef OP #undef TK /* C++ parser token types - see "Manifest constants", above. */ "KEYWORD", "TEMPLATE_ID", "NESTED_NAME_SPECIFIER", }; /* For some tokens, print the associated data. */ switch (token->type) { case CPP_KEYWORD: /* Some keywords have a value that is not an IDENTIFIER_NODE. For example, `struct' is mapped to an INTEGER_CST. */ if (!identifier_p (token->u.value)) break; /* else fall through */ case CPP_NAME: fputs (IDENTIFIER_POINTER (token->u.value), stream); break; case CPP_STRING: case CPP_STRING16: case CPP_STRING32: case CPP_WSTRING: case CPP_UTF8STRING: fprintf (stream, " \"%s\"", TREE_STRING_POINTER (token->u.value)); break; case CPP_NUMBER: print_generic_expr (stream, token->u.value, 0); break; default: /* If we have a name for the token, print it out. Otherwise, we simply give the numeric code. */ if (token->type < ARRAY_SIZE(token_names)) fputs (token_names[token->type], stream); else fprintf (stream, "[%d]", token->type); break; } } DEBUG_FUNCTION void debug (cp_token &ref) { cp_lexer_print_token (stderr, &ref); fprintf (stderr, "\n"); } DEBUG_FUNCTION void debug (cp_token *ptr) { if (ptr) debug (*ptr); else fprintf (stderr, "<nil>\n"); } /* Start emitting debugging information. */ static void cp_lexer_start_debugging (cp_lexer* lexer) { lexer->debugging_p = true; cp_lexer_debug_stream = stderr; } /* Stop emitting debugging information. */ static void cp_lexer_stop_debugging (cp_lexer* lexer) { lexer->debugging_p = false; cp_lexer_debug_stream = NULL; } /* Create a new cp_token_cache, representing a range of tokens. */ static cp_token_cache * cp_token_cache_new (cp_token *first, cp_token *last) { cp_token_cache *cache = ggc_alloc<cp_token_cache> (); cache->first = first; cache->last = last; return cache; } /* Diagnose if #pragma omp declare simd isn't followed immediately by function declaration or definition. */ static inline void cp_ensure_no_omp_declare_simd (cp_parser *parser) { if (parser->omp_declare_simd && !parser->omp_declare_simd->error_seen) { error ("%<#pragma omp declare simd%> not immediately followed by " "function declaration or definition"); parser->omp_declare_simd = NULL; } } /* Finalize #pragma omp declare simd clauses after FNDECL has been parsed, and put that into "omp declare simd" attribute. */ static inline void cp_finalize_omp_declare_simd (cp_parser *parser, tree fndecl) { if (__builtin_expect (parser->omp_declare_simd != NULL, 0)) { if (fndecl == error_mark_node) { parser->omp_declare_simd = NULL; return; } if (TREE_CODE (fndecl) != FUNCTION_DECL) { cp_ensure_no_omp_declare_simd (parser); return; } } } /* Decl-specifiers. */ /* Set *DECL_SPECS to represent an empty decl-specifier-seq. */ static void clear_decl_specs (cp_decl_specifier_seq *decl_specs) { memset (decl_specs, 0, sizeof (cp_decl_specifier_seq)); } /* Declarators. */ /* Nothing other than the parser should be creating declarators; declarators are a semi-syntactic representation of C++ entities. Other parts of the front end that need to create entities (like VAR_DECLs or FUNCTION_DECLs) should do that directly. */ static cp_declarator *make_call_declarator (cp_declarator *, tree, cp_cv_quals, cp_virt_specifiers, cp_ref_qualifier, tree, tree); static cp_declarator *make_array_declarator (cp_declarator *, tree); static cp_declarator *make_pointer_declarator (cp_cv_quals, cp_declarator *, tree); static cp_declarator *make_reference_declarator (cp_cv_quals, cp_declarator *, bool, tree); static cp_parameter_declarator *make_parameter_declarator (cp_decl_specifier_seq *, cp_declarator *, tree); static cp_declarator *make_ptrmem_declarator (cp_cv_quals, tree, cp_declarator *, tree); /* An erroneous declarator. */ static cp_declarator *cp_error_declarator; /* The obstack on which declarators and related data structures are allocated. */ static struct obstack declarator_obstack; /* Alloc BYTES from the declarator memory pool. */ static inline void * alloc_declarator (size_t bytes) { return obstack_alloc (&declarator_obstack, bytes); } /* Allocate a declarator of the indicated KIND. Clear fields that are common to all declarators. */ static cp_declarator * make_declarator (cp_declarator_kind kind) { cp_declarator *declarator; declarator = (cp_declarator *) alloc_declarator (sizeof (cp_declarator)); declarator->kind = kind; declarator->attributes = NULL_TREE; declarator->std_attributes = NULL_TREE; declarator->declarator = NULL; declarator->parameter_pack_p = false; declarator->id_loc = UNKNOWN_LOCATION; return declarator; } /* Make a declarator for a generalized identifier. If QUALIFYING_SCOPE is non-NULL, the identifier is QUALIFYING_SCOPE::UNQUALIFIED_NAME; otherwise, it is just UNQUALIFIED_NAME. SFK indicates the kind of special function this is, if any. */ static cp_declarator * make_id_declarator (tree qualifying_scope, tree unqualified_name, special_function_kind sfk) { cp_declarator *declarator; /* It is valid to write: class C { void f(); }; typedef C D; void D::f(); The standard is not clear about whether `typedef const C D' is legal; as of 2002-09-15 the committee is considering that question. EDG 3.0 allows that syntax. Therefore, we do as well. */ if (qualifying_scope && TYPE_P (qualifying_scope)) qualifying_scope = TYPE_MAIN_VARIANT (qualifying_scope); gcc_assert (identifier_p (unqualified_name) || TREE_CODE (unqualified_name) == BIT_NOT_EXPR || TREE_CODE (unqualified_name) == TEMPLATE_ID_EXPR); declarator = make_declarator (cdk_id); declarator->u.id.qualifying_scope = qualifying_scope; declarator->u.id.unqualified_name = unqualified_name; declarator->u.id.sfk = sfk; return declarator; } /* Make a declarator for a pointer to TARGET. CV_QUALIFIERS is a list of modifiers such as const or volatile to apply to the pointer type, represented as identifiers. ATTRIBUTES represent the attributes that appertain to the pointer or reference. */ cp_declarator * make_pointer_declarator (cp_cv_quals cv_qualifiers, cp_declarator *target, tree attributes) { cp_declarator *declarator; declarator = make_declarator (cdk_pointer); declarator->declarator = target; declarator->u.pointer.qualifiers = cv_qualifiers; declarator->u.pointer.class_type = NULL_TREE; if (target) { declarator->id_loc = target->id_loc; declarator->parameter_pack_p = target->parameter_pack_p; target->parameter_pack_p = false; } else declarator->parameter_pack_p = false; declarator->std_attributes = attributes; return declarator; } /* Like make_pointer_declarator -- but for references. ATTRIBUTES represent the attributes that appertain to the pointer or reference. */ cp_declarator * make_reference_declarator (cp_cv_quals cv_qualifiers, cp_declarator *target, bool rvalue_ref, tree attributes) { cp_declarator *declarator; declarator = make_declarator (cdk_reference); declarator->declarator = target; declarator->u.reference.qualifiers = cv_qualifiers; declarator->u.reference.rvalue_ref = rvalue_ref; if (target) { declarator->id_loc = target->id_loc; declarator->parameter_pack_p = target->parameter_pack_p; target->parameter_pack_p = false; } else declarator->parameter_pack_p = false; declarator->std_attributes = attributes; return declarator; } /* Like make_pointer_declarator -- but for a pointer to a non-static member of CLASS_TYPE. ATTRIBUTES represent the attributes that appertain to the pointer or reference. */ cp_declarator * make_ptrmem_declarator (cp_cv_quals cv_qualifiers, tree class_type, cp_declarator *pointee, tree attributes) { cp_declarator *declarator; declarator = make_declarator (cdk_ptrmem); declarator->declarator = pointee; declarator->u.pointer.qualifiers = cv_qualifiers; declarator->u.pointer.class_type = class_type; if (pointee) { declarator->parameter_pack_p = pointee->parameter_pack_p; pointee->parameter_pack_p = false; } else declarator->parameter_pack_p = false; declarator->std_attributes = attributes; return declarator; } /* Make a declarator for the function given by TARGET, with the indicated PARMS. The CV_QUALIFIERS aply to the function, as in "const"-qualified member function. The EXCEPTION_SPECIFICATION indicates what exceptions can be thrown. */ cp_declarator * make_call_declarator (cp_declarator *target, tree parms, cp_cv_quals cv_qualifiers, cp_virt_specifiers virt_specifiers, cp_ref_qualifier ref_qualifier, tree exception_specification, tree late_return_type) { cp_declarator *declarator; declarator = make_declarator (cdk_function); declarator->declarator = target; declarator->u.function.parameters = parms; declarator->u.function.qualifiers = cv_qualifiers; declarator->u.function.virt_specifiers = virt_specifiers; declarator->u.function.ref_qualifier = ref_qualifier; declarator->u.function.exception_specification = exception_specification; declarator->u.function.late_return_type = late_return_type; if (target) { declarator->id_loc = target->id_loc; declarator->parameter_pack_p = target->parameter_pack_p; target->parameter_pack_p = false; } else declarator->parameter_pack_p = false; return declarator; } /* Make a declarator for an array of BOUNDS elements, each of which is defined by ELEMENT. */ cp_declarator * make_array_declarator (cp_declarator *element, tree bounds) { cp_declarator *declarator; declarator = make_declarator (cdk_array); declarator->declarator = element; declarator->u.array.bounds = bounds; if (element) { declarator->id_loc = element->id_loc; declarator->parameter_pack_p = element->parameter_pack_p; element->parameter_pack_p = false; } else declarator->parameter_pack_p = false; return declarator; } /* Determine whether the declarator we've seen so far can be a parameter pack, when followed by an ellipsis. */ static bool declarator_can_be_parameter_pack (cp_declarator *declarator) { /* Search for a declarator name, or any other declarator that goes after the point where the ellipsis could appear in a parameter pack. If we find any of these, then this declarator can not be made into a parameter pack. */ bool found = false; while (declarator && !found) { switch ((int)declarator->kind) { case cdk_id: case cdk_array: found = true; break; case cdk_error: return true; default: declarator = declarator->declarator; break; } } return !found; } cp_parameter_declarator *no_parameters; /* Create a parameter declarator with the indicated DECL_SPECIFIERS, DECLARATOR and DEFAULT_ARGUMENT. */ cp_parameter_declarator * make_parameter_declarator (cp_decl_specifier_seq *decl_specifiers, cp_declarator *declarator, tree default_argument) { cp_parameter_declarator *parameter; parameter = ((cp_parameter_declarator *) alloc_declarator (sizeof (cp_parameter_declarator))); parameter->next = NULL; if (decl_specifiers) parameter->decl_specifiers = *decl_specifiers; else clear_decl_specs (&parameter->decl_specifiers); parameter->declarator = declarator; parameter->default_argument = default_argument; parameter->ellipsis_p = false; return parameter; } /* Returns true iff DECLARATOR is a declaration for a function. */ static bool function_declarator_p (const cp_declarator *declarator) { while (declarator) { if (declarator->kind == cdk_function && declarator->declarator->kind == cdk_id) return true; if (declarator->kind == cdk_id || declarator->kind == cdk_error) return false; declarator = declarator->declarator; } return false; } /* The parser. */ /* Overview -------- A cp_parser parses the token stream as specified by the C++ grammar. Its job is purely parsing, not semantic analysis. For example, the parser breaks the token stream into declarators, expressions, statements, and other similar syntactic constructs. It does not check that the types of the expressions on either side of an assignment-statement are compatible, or that a function is not declared with a parameter of type `void'. The parser invokes routines elsewhere in the compiler to perform semantic analysis and to build up the abstract syntax tree for the code processed. The parser (and the template instantiation code, which is, in a way, a close relative of parsing) are the only parts of the compiler that should be calling push_scope and pop_scope, or related functions. The parser (and template instantiation code) keeps track of what scope is presently active; everything else should simply honor that. (The code that generates static initializers may also need to set the scope, in order to check access control correctly when emitting the initializers.) Methodology ----------- The parser is of the standard recursive-descent variety. Upcoming tokens in the token stream are examined in order to determine which production to use when parsing a non-terminal. Some C++ constructs require arbitrary look ahead to disambiguate. For example, it is impossible, in the general case, to tell whether a statement is an expression or declaration without scanning the entire statement. Therefore, the parser is capable of "parsing tentatively." When the parser is not sure what construct comes next, it enters this mode. Then, while we attempt to parse the construct, the parser queues up error messages, rather than issuing them immediately, and saves the tokens it consumes. If the construct is parsed successfully, the parser "commits", i.e., it issues any queued error messages and the tokens that were being preserved are permanently discarded. If, however, the construct is not parsed successfully, the parser rolls back its state completely so that it can resume parsing using a different alternative. Future Improvements ------------------- The performance of the parser could probably be improved substantially. We could often eliminate the need to parse tentatively by looking ahead a little bit. In some places, this approach might not entirely eliminate the need to parse tentatively, but it might still speed up the average case. */ /* Flags that are passed to some parsing functions. These values can be bitwise-ored together. */ enum { /* No flags. */ CP_PARSER_FLAGS_NONE = 0x0, /* The construct is optional. If it is not present, then no error should be issued. */ CP_PARSER_FLAGS_OPTIONAL = 0x1, /* When parsing a type-specifier, treat user-defined type-names as non-type identifiers. */ CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES = 0x2, /* When parsing a type-specifier, do not try to parse a class-specifier or enum-specifier. */ CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS = 0x4, /* When parsing a decl-specifier-seq, only allow type-specifier or constexpr. */ CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR = 0x8 }; /* This type is used for parameters and variables which hold combinations of the above flags. */ typedef int cp_parser_flags; /* The different kinds of declarators we want to parse. */ typedef enum cp_parser_declarator_kind { /* We want an abstract declarator. */ CP_PARSER_DECLARATOR_ABSTRACT, /* We want a named declarator. */ CP_PARSER_DECLARATOR_NAMED, /* We don't mind, but the name must be an unqualified-id. */ CP_PARSER_DECLARATOR_EITHER } cp_parser_declarator_kind; /* The precedence values used to parse binary expressions. The minimum value of PREC must be 1, because zero is reserved to quickly discriminate binary operators from other tokens. */ enum cp_parser_prec { PREC_NOT_OPERATOR, PREC_LOGICAL_OR_EXPRESSION, PREC_LOGICAL_AND_EXPRESSION, PREC_INCLUSIVE_OR_EXPRESSION, PREC_EXCLUSIVE_OR_EXPRESSION, PREC_AND_EXPRESSION, PREC_EQUALITY_EXPRESSION, PREC_RELATIONAL_EXPRESSION, PREC_SHIFT_EXPRESSION, PREC_ADDITIVE_EXPRESSION, PREC_MULTIPLICATIVE_EXPRESSION, PREC_PM_EXPRESSION, NUM_PREC_VALUES = PREC_PM_EXPRESSION }; /* A mapping from a token type to a corresponding tree node type, with a precedence value. */ typedef struct cp_parser_binary_operations_map_node { /* The token type. */ enum cpp_ttype token_type; /* The corresponding tree code. */ enum tree_code tree_type; /* The precedence of this operator. */ enum cp_parser_prec prec; } cp_parser_binary_operations_map_node; typedef struct cp_parser_expression_stack_entry { /* Left hand side of the binary operation we are currently parsing. */ tree lhs; /* Original tree code for left hand side, if it was a binary expression itself (used for -Wparentheses). */ enum tree_code lhs_type; /* Tree code for the binary operation we are parsing. */ enum tree_code tree_type; /* Precedence of the binary operation we are parsing. */ enum cp_parser_prec prec; /* Location of the binary operation we are parsing. */ location_t loc; } cp_parser_expression_stack_entry; /* The stack for storing partial expressions. We only need NUM_PREC_VALUES entries because precedence levels on the stack are monotonically increasing. */ typedef struct cp_parser_expression_stack_entry cp_parser_expression_stack[NUM_PREC_VALUES]; /* Prototypes. */ /* Constructors and destructors. */ static cp_parser_context *cp_parser_context_new (cp_parser_context *); /* Class variables. */ static GTY((deletable)) cp_parser_context* cp_parser_context_free_list; /* The operator-precedence table used by cp_parser_binary_expression. Transformed into an associative array (binops_by_token) by cp_parser_new. */ static const cp_parser_binary_operations_map_node binops[] = { { CPP_DEREF_STAR, MEMBER_REF, PREC_PM_EXPRESSION }, { CPP_DOT_STAR, DOTSTAR_EXPR, PREC_PM_EXPRESSION }, { CPP_MULT, MULT_EXPR, PREC_MULTIPLICATIVE_EXPRESSION }, { CPP_DIV, TRUNC_DIV_EXPR, PREC_MULTIPLICATIVE_EXPRESSION }, { CPP_MOD, TRUNC_MOD_EXPR, PREC_MULTIPLICATIVE_EXPRESSION }, { CPP_PLUS, PLUS_EXPR, PREC_ADDITIVE_EXPRESSION }, { CPP_MINUS, MINUS_EXPR, PREC_ADDITIVE_EXPRESSION }, { CPP_LSHIFT, LSHIFT_EXPR, PREC_SHIFT_EXPRESSION }, { CPP_RSHIFT, RSHIFT_EXPR, PREC_SHIFT_EXPRESSION }, { CPP_LESS, LT_EXPR, PREC_RELATIONAL_EXPRESSION }, { CPP_GREATER, GT_EXPR, PREC_RELATIONAL_EXPRESSION }, { CPP_LESS_EQ, LE_EXPR, PREC_RELATIONAL_EXPRESSION }, { CPP_GREATER_EQ, GE_EXPR, PREC_RELATIONAL_EXPRESSION }, { CPP_EQ_EQ, EQ_EXPR, PREC_EQUALITY_EXPRESSION }, { CPP_NOT_EQ, NE_EXPR, PREC_EQUALITY_EXPRESSION }, { CPP_AND, BIT_AND_EXPR, PREC_AND_EXPRESSION }, { CPP_XOR, BIT_XOR_EXPR, PREC_EXCLUSIVE_OR_EXPRESSION }, { CPP_OR, BIT_IOR_EXPR, PREC_INCLUSIVE_OR_EXPRESSION }, { CPP_AND_AND, TRUTH_ANDIF_EXPR, PREC_LOGICAL_AND_EXPRESSION }, { CPP_OR_OR, TRUTH_ORIF_EXPR, PREC_LOGICAL_OR_EXPRESSION } }; /* The same as binops, but initialized by cp_parser_new so that binops_by_token[N].token_type == N. Used in cp_parser_binary_expression for speed. */ static cp_parser_binary_operations_map_node binops_by_token[N_CP_TTYPES]; /* Constructors and destructors. */ /* Construct a new context. The context below this one on the stack is given by NEXT. */ static cp_parser_context * cp_parser_context_new (cp_parser_context* next) { cp_parser_context *context; /* Allocate the storage. */ if (cp_parser_context_free_list != NULL) { /* Pull the first entry from the free list. */ context = cp_parser_context_free_list; cp_parser_context_free_list = context->next; memset (context, 0, sizeof (*context)); } else context = ggc_cleared_alloc<cp_parser_context> (); /* No errors have occurred yet in this context. */ context->status = CP_PARSER_STATUS_KIND_NO_ERROR; /* If this is not the bottommost context, copy information that we need from the previous context. */ if (next) { /* If, in the NEXT context, we are parsing an `x->' or `x.' expression, then we are parsing one in this context, too. */ context->object_type = next->object_type; /* Thread the stack. */ context->next = next; } return context; } /* Managing the unparsed function queues. */ #define unparsed_funs_with_default_args \ parser->unparsed_queues->last ().funs_with_default_args #define unparsed_funs_with_definitions \ parser->unparsed_queues->last ().funs_with_definitions #define unparsed_nsdmis \ parser->unparsed_queues->last ().nsdmis #define unparsed_classes \ parser->unparsed_queues->last ().classes static void push_unparsed_function_queues (cp_parser *parser) { cp_unparsed_functions_entry e = {NULL, make_tree_vector (), NULL, NULL}; vec_safe_push (parser->unparsed_queues, e); } static void pop_unparsed_function_queues (cp_parser *parser) { release_tree_vector (unparsed_funs_with_definitions); parser->unparsed_queues->pop (); } /* Prototypes. */ /* Constructors and destructors. */ static cp_parser *cp_parser_new (void); /* Routines to parse various constructs. Those that return `tree' will return the error_mark_node (rather than NULL_TREE) if a parse error occurs, unless otherwise noted. Sometimes, they will return an ordinary node if error-recovery was attempted, even though a parse error occurred. So, to check whether or not a parse error occurred, you should always use cp_parser_error_occurred. If the construct is optional (indicated either by an `_opt' in the name of the function that does the parsing or via a FLAGS parameter), then NULL_TREE is returned if the construct is not present. */ /* Lexical conventions [gram.lex] */ static tree cp_parser_identifier (cp_parser *); static tree cp_parser_string_literal (cp_parser *, bool, bool, bool); static tree cp_parser_userdef_char_literal (cp_parser *); static tree cp_parser_userdef_string_literal (tree); static tree cp_parser_userdef_numeric_literal (cp_parser *); /* Basic concepts [gram.basic] */ static bool cp_parser_translation_unit (cp_parser *); /* Expressions [gram.expr] */ static tree cp_parser_primary_expression (cp_parser *, bool, bool, bool, cp_id_kind *); static tree cp_parser_id_expression (cp_parser *, bool, bool, bool *, bool, bool); static tree cp_parser_unqualified_id (cp_parser *, bool, bool, bool, bool); static tree cp_parser_nested_name_specifier_opt (cp_parser *, bool, bool, bool, bool); static tree cp_parser_nested_name_specifier (cp_parser *, bool, bool, bool, bool); static tree cp_parser_qualifying_entity (cp_parser *, bool, bool, bool, bool, bool); static tree cp_parser_postfix_expression (cp_parser *, bool, bool, bool, bool, cp_id_kind *); static tree cp_parser_postfix_open_square_expression (cp_parser *, tree, bool, bool); static tree cp_parser_postfix_dot_deref_expression (cp_parser *, enum cpp_ttype, tree, bool, cp_id_kind *, location_t); static vec<tree, va_gc> *cp_parser_parenthesized_expression_list (cp_parser *, int, bool, bool, bool *, bool = false); /* Values for the second parameter of cp_parser_parenthesized_expression_list. */ enum { non_attr = 0, normal_attr = 1, id_attr = 2 }; static void cp_parser_pseudo_destructor_name (cp_parser *, tree, tree *, tree *); static tree cp_parser_unary_expression (cp_parser *, cp_id_kind * = NULL, bool = false, bool = false, bool = false); static enum tree_code cp_parser_unary_operator (cp_token *); static tree cp_parser_new_expression (cp_parser *); static vec<tree, va_gc> *cp_parser_new_placement (cp_parser *); static tree cp_parser_new_type_id (cp_parser *, tree *); static cp_declarator *cp_parser_new_declarator_opt (cp_parser *); static cp_declarator *cp_parser_direct_new_declarator (cp_parser *); static vec<tree, va_gc> *cp_parser_new_initializer (cp_parser *); static tree cp_parser_delete_expression (cp_parser *); static tree cp_parser_cast_expression (cp_parser *, bool, bool, bool, cp_id_kind *); static tree cp_parser_binary_expression (cp_parser *, bool, bool, enum cp_parser_prec, cp_id_kind *); static tree cp_parser_question_colon_clause (cp_parser *, tree); static tree cp_parser_assignment_expression (cp_parser *, cp_id_kind * = NULL, bool = false, bool = false); static enum tree_code cp_parser_assignment_operator_opt (cp_parser *); static tree cp_parser_expression (cp_parser *, cp_id_kind * = NULL, bool = false, bool = false); static tree cp_parser_constant_expression (cp_parser *, bool = false, bool * = NULL); static tree cp_parser_builtin_offsetof (cp_parser *); static tree cp_parser_lambda_expression (cp_parser *); static void cp_parser_lambda_introducer (cp_parser *, tree); static bool cp_parser_lambda_declarator_opt (cp_parser *, tree); static void cp_parser_lambda_body (cp_parser *, tree); /* Statements [gram.stmt.stmt] */ static void cp_parser_statement (cp_parser *, tree, bool, bool *); static void cp_parser_label_for_labeled_statement (cp_parser *, tree); static tree cp_parser_expression_statement (cp_parser *, tree); static tree cp_parser_compound_statement (cp_parser *, tree, bool, bool); static void cp_parser_statement_seq_opt (cp_parser *, tree); static tree cp_parser_selection_statement (cp_parser *, bool *); static tree cp_parser_condition (cp_parser *); static tree cp_parser_iteration_statement (cp_parser *, bool); static bool cp_parser_for_init_statement (cp_parser *, tree *decl); static tree cp_parser_for (cp_parser *, bool); static tree cp_parser_c_for (cp_parser *, tree, tree, bool); static tree cp_parser_range_for (cp_parser *, tree, tree, tree, bool); static void do_range_for_auto_deduction (tree, tree); static tree cp_parser_perform_range_for_lookup (tree, tree *, tree *); static tree cp_parser_range_for_member_function (tree, tree); static tree cp_parser_jump_statement (cp_parser *); static void cp_parser_declaration_statement (cp_parser *); static tree cp_parser_implicitly_scoped_statement (cp_parser *, bool *); static void cp_parser_already_scoped_statement (cp_parser *); /* Declarations [gram.dcl.dcl] */ static void cp_parser_declaration_seq_opt (cp_parser *); static void cp_parser_declaration (cp_parser *); static void cp_parser_block_declaration (cp_parser *, bool); static void cp_parser_simple_declaration (cp_parser *, bool, tree *); static void cp_parser_decl_specifier_seq (cp_parser *, cp_parser_flags, cp_decl_specifier_seq *, int *); static tree cp_parser_storage_class_specifier_opt (cp_parser *); static tree cp_parser_function_specifier_opt (cp_parser *, cp_decl_specifier_seq *); static tree cp_parser_type_specifier (cp_parser *, cp_parser_flags, cp_decl_specifier_seq *, bool, int *, bool *); static tree cp_parser_simple_type_specifier (cp_parser *, cp_decl_specifier_seq *, cp_parser_flags); static tree cp_parser_type_name (cp_parser *); static tree cp_parser_nonclass_name (cp_parser* parser); static tree cp_parser_elaborated_type_specifier (cp_parser *, bool, bool); static tree cp_parser_enum_specifier (cp_parser *); static void cp_parser_enumerator_list (cp_parser *, tree); static void cp_parser_enumerator_definition (cp_parser *, tree); static tree cp_parser_namespace_name (cp_parser *); static void cp_parser_namespace_definition (cp_parser *); static void cp_parser_namespace_body (cp_parser *); static tree cp_parser_qualified_namespace_specifier (cp_parser *); static void cp_parser_namespace_alias_definition (cp_parser *); static bool cp_parser_using_declaration (cp_parser *, bool); static void cp_parser_using_directive (cp_parser *); static tree cp_parser_alias_declaration (cp_parser *); static void cp_parser_asm_definition (cp_parser *); static void cp_parser_linkage_specification (cp_parser *); static void cp_parser_static_assert (cp_parser *, bool); static tree cp_parser_decltype (cp_parser *); /* Declarators [gram.dcl.decl] */ static tree cp_parser_init_declarator (cp_parser *, cp_decl_specifier_seq *, vec<deferred_access_check, va_gc> *, bool, bool, int, bool *, tree *, location_t *); static cp_declarator *cp_parser_declarator (cp_parser *, cp_parser_declarator_kind, int *, bool *, bool, bool); static cp_declarator *cp_parser_direct_declarator (cp_parser *, cp_parser_declarator_kind, int *, bool, bool); static enum tree_code cp_parser_ptr_operator (cp_parser *, tree *, cp_cv_quals *, tree *); static cp_cv_quals cp_parser_cv_qualifier_seq_opt (cp_parser *); static cp_virt_specifiers cp_parser_virt_specifier_seq_opt (cp_parser *); static cp_ref_qualifier cp_parser_ref_qualifier_opt (cp_parser *); static tree cp_parser_late_return_type_opt (cp_parser *, cp_declarator *, cp_cv_quals); static tree cp_parser_declarator_id (cp_parser *, bool); static tree cp_parser_type_id (cp_parser *); static tree cp_parser_template_type_arg (cp_parser *); static tree cp_parser_trailing_type_id (cp_parser *); static tree cp_parser_type_id_1 (cp_parser *, bool, bool); static void cp_parser_type_specifier_seq (cp_parser *, bool, bool, cp_decl_specifier_seq *); static tree cp_parser_parameter_declaration_clause (cp_parser *); static tree cp_parser_parameter_declaration_list (cp_parser *, bool *); static cp_parameter_declarator *cp_parser_parameter_declaration (cp_parser *, bool, bool *); static tree cp_parser_default_argument (cp_parser *, bool); static void cp_parser_function_body (cp_parser *, bool); static tree cp_parser_initializer (cp_parser *, bool *, bool *); static tree cp_parser_initializer_clause (cp_parser *, bool *); static tree cp_parser_braced_list (cp_parser*, bool*); static vec<constructor_elt, va_gc> *cp_parser_initializer_list (cp_parser *, bool *); static bool cp_parser_ctor_initializer_opt_and_function_body (cp_parser *, bool); static tree cp_parser_late_parsing_omp_declare_simd (cp_parser *, tree); static tree cp_parser_late_parsing_cilk_simd_fn_info (cp_parser *, tree); static tree synthesize_implicit_template_parm (cp_parser *); static tree finish_fully_implicit_template (cp_parser *, tree); /* Classes [gram.class] */ static tree cp_parser_class_name (cp_parser *, bool, bool, enum tag_types, bool, bool, bool); static tree cp_parser_class_specifier (cp_parser *); static tree cp_parser_class_head (cp_parser *, bool *); static enum tag_types cp_parser_class_key (cp_parser *); static void cp_parser_type_parameter_key (cp_parser* parser); static void cp_parser_member_specification_opt (cp_parser *); static void cp_parser_member_declaration (cp_parser *); static tree cp_parser_pure_specifier (cp_parser *); static tree cp_parser_constant_initializer (cp_parser *); /* Derived classes [gram.class.derived] */ static tree cp_parser_base_clause (cp_parser *); static tree cp_parser_base_specifier (cp_parser *); /* Special member functions [gram.special] */ static tree cp_parser_conversion_function_id (cp_parser *); static tree cp_parser_conversion_type_id (cp_parser *); static cp_declarator *cp_parser_conversion_declarator_opt (cp_parser *); static bool cp_parser_ctor_initializer_opt (cp_parser *); static void cp_parser_mem_initializer_list (cp_parser *); static tree cp_parser_mem_initializer (cp_parser *); static tree cp_parser_mem_initializer_id (cp_parser *); /* Overloading [gram.over] */ static tree cp_parser_operator_function_id (cp_parser *); static tree cp_parser_operator (cp_parser *); /* Templates [gram.temp] */ static void cp_parser_template_declaration (cp_parser *, bool); static tree cp_parser_template_parameter_list (cp_parser *); static tree cp_parser_template_parameter (cp_parser *, bool *, bool *); static tree cp_parser_type_parameter (cp_parser *, bool *); static tree cp_parser_template_id (cp_parser *, bool, bool, enum tag_types, bool); static tree cp_parser_template_name (cp_parser *, bool, bool, bool, enum tag_types, bool *); static tree cp_parser_template_argument_list (cp_parser *); static tree cp_parser_template_argument (cp_parser *); static void cp_parser_explicit_instantiation (cp_parser *); static void cp_parser_explicit_specialization (cp_parser *); /* Exception handling [gram.exception] */ static tree cp_parser_try_block (cp_parser *); static bool cp_parser_function_try_block (cp_parser *); static void cp_parser_handler_seq (cp_parser *); static void cp_parser_handler (cp_parser *); static tree cp_parser_exception_declaration (cp_parser *); static tree cp_parser_throw_expression (cp_parser *); static tree cp_parser_exception_specification_opt (cp_parser *); static tree cp_parser_type_id_list (cp_parser *); /* GNU Extensions */ static tree cp_parser_asm_specification_opt (cp_parser *); static tree cp_parser_asm_operand_list (cp_parser *); static tree cp_parser_asm_clobber_list (cp_parser *); static tree cp_parser_asm_label_list (cp_parser *); static bool cp_next_tokens_can_be_attribute_p (cp_parser *); static bool cp_next_tokens_can_be_gnu_attribute_p (cp_parser *); static bool cp_next_tokens_can_be_std_attribute_p (cp_parser *); static bool cp_nth_tokens_can_be_std_attribute_p (cp_parser *, size_t); static bool cp_nth_tokens_can_be_gnu_attribute_p (cp_parser *, size_t); static bool cp_nth_tokens_can_be_attribute_p (cp_parser *, size_t); static tree cp_parser_attributes_opt (cp_parser *); static tree cp_parser_gnu_attributes_opt (cp_parser *); static tree cp_parser_gnu_attribute_list (cp_parser *); static tree cp_parser_std_attribute (cp_parser *); static tree cp_parser_std_attribute_spec (cp_parser *); static tree cp_parser_std_attribute_spec_seq (cp_parser *); static bool cp_parser_extension_opt (cp_parser *, int *); static void cp_parser_label_declaration (cp_parser *); /* Transactional Memory Extensions */ static tree cp_parser_transaction (cp_parser *, enum rid); static tree cp_parser_transaction_expression (cp_parser *, enum rid); static bool cp_parser_function_transaction (cp_parser *, enum rid); static tree cp_parser_transaction_cancel (cp_parser *); enum pragma_context { pragma_external, pragma_member, pragma_objc_icode, pragma_stmt, pragma_compound }; static bool cp_parser_pragma (cp_parser *, enum pragma_context); /* Objective-C++ Productions */ static tree cp_parser_objc_message_receiver (cp_parser *); static tree cp_parser_objc_message_args (cp_parser *); static tree cp_parser_objc_message_expression (cp_parser *); static tree cp_parser_objc_encode_expression (cp_parser *); static tree cp_parser_objc_defs_expression (cp_parser *); static tree cp_parser_objc_protocol_expression (cp_parser *); static tree cp_parser_objc_selector_expression (cp_parser *); static tree cp_parser_objc_expression (cp_parser *); static bool cp_parser_objc_selector_p (enum cpp_ttype); static tree cp_parser_objc_selector (cp_parser *); static tree cp_parser_objc_protocol_refs_opt (cp_parser *); static void cp_parser_objc_declaration (cp_parser *, tree); static tree cp_parser_objc_statement (cp_parser *); static bool cp_parser_objc_valid_prefix_attributes (cp_parser *, tree *); static void cp_parser_objc_at_property_declaration (cp_parser *) ; static void cp_parser_objc_at_synthesize_declaration (cp_parser *) ; static void cp_parser_objc_at_dynamic_declaration (cp_parser *) ; static tree cp_parser_objc_struct_declaration (cp_parser *) ; /* Utility Routines */ static tree cp_parser_lookup_name (cp_parser *, tree, enum tag_types, bool, bool, bool, tree *, location_t); static tree cp_parser_lookup_name_simple (cp_parser *, tree, location_t); static tree cp_parser_maybe_treat_template_as_class (tree, bool); static bool cp_parser_check_declarator_template_parameters (cp_parser *, cp_declarator *, location_t); static bool cp_parser_check_template_parameters (cp_parser *, unsigned, location_t, cp_declarator *); static tree cp_parser_simple_cast_expression (cp_parser *); static tree cp_parser_global_scope_opt (cp_parser *, bool); static bool cp_parser_constructor_declarator_p (cp_parser *, bool); static tree cp_parser_function_definition_from_specifiers_and_declarator (cp_parser *, cp_decl_specifier_seq *, tree, const cp_declarator *); static tree cp_parser_function_definition_after_declarator (cp_parser *, bool); static void cp_parser_template_declaration_after_export (cp_parser *, bool); static void cp_parser_perform_template_parameter_access_checks (vec<deferred_access_check, va_gc> *); static tree cp_parser_single_declaration (cp_parser *, vec<deferred_access_check, va_gc> *, bool, bool, bool *); static tree cp_parser_functional_cast (cp_parser *, tree); static tree cp_parser_save_member_function_body (cp_parser *, cp_decl_specifier_seq *, cp_declarator *, tree); static tree cp_parser_save_nsdmi (cp_parser *); static tree cp_parser_enclosed_template_argument_list (cp_parser *); static void cp_parser_save_default_args (cp_parser *, tree); static void cp_parser_late_parsing_for_member (cp_parser *, tree); static tree cp_parser_late_parse_one_default_arg (cp_parser *, tree, tree, tree); static void cp_parser_late_parsing_nsdmi (cp_parser *, tree); static void cp_parser_late_parsing_default_args (cp_parser *, tree); static tree cp_parser_sizeof_operand (cp_parser *, enum rid); static tree cp_parser_trait_expr (cp_parser *, enum rid); static bool cp_parser_declares_only_class_p (cp_parser *); static void cp_parser_set_storage_class (cp_parser *, cp_decl_specifier_seq *, enum rid, cp_token *); static void cp_parser_set_decl_spec_type (cp_decl_specifier_seq *, tree, cp_token *, bool); static void set_and_check_decl_spec_loc (cp_decl_specifier_seq *decl_specs, cp_decl_spec ds, cp_token *); static bool cp_parser_friend_p (const cp_decl_specifier_seq *); static void cp_parser_required_error (cp_parser *, required_token, bool); static cp_token *cp_parser_require (cp_parser *, enum cpp_ttype, required_token); static cp_token *cp_parser_require_keyword (cp_parser *, enum rid, required_token); static bool cp_parser_token_starts_function_definition_p (cp_token *); static bool cp_parser_next_token_starts_class_definition_p (cp_parser *); static bool cp_parser_next_token_ends_template_argument_p (cp_parser *); static bool cp_parser_nth_token_starts_template_argument_list_p (cp_parser *, size_t); static enum tag_types cp_parser_token_is_class_key (cp_token *); static enum tag_types cp_parser_token_is_type_parameter_key (cp_token *); static void cp_parser_check_class_key (enum tag_types, tree type); static void cp_parser_check_access_in_redeclaration (tree type, location_t location); static bool cp_parser_optional_template_keyword (cp_parser *); static void cp_parser_pre_parsed_nested_name_specifier (cp_parser *); static bool cp_parser_cache_group (cp_parser *, enum cpp_ttype, unsigned); static tree cp_parser_cache_defarg (cp_parser *parser, bool nsdmi); static void cp_parser_parse_tentatively (cp_parser *); static void cp_parser_commit_to_tentative_parse (cp_parser *); static void cp_parser_commit_to_topmost_tentative_parse (cp_parser *); static void cp_parser_abort_tentative_parse (cp_parser *); static bool cp_parser_parse_definitely (cp_parser *); static inline bool cp_parser_parsing_tentatively (cp_parser *); static bool cp_parser_uncommitted_to_tentative_parse_p (cp_parser *); static void cp_parser_error (cp_parser *, const char *); static void cp_parser_name_lookup_error (cp_parser *, tree, tree, name_lookup_error, location_t); static bool cp_parser_simulate_error (cp_parser *); static bool cp_parser_check_type_definition (cp_parser *); static void cp_parser_check_for_definition_in_return_type (cp_declarator *, tree, location_t type_location); static void cp_parser_check_for_invalid_template_id (cp_parser *, tree, enum tag_types, location_t location); static bool cp_parser_non_integral_constant_expression (cp_parser *, non_integral_constant); static void cp_parser_diagnose_invalid_type_name (cp_parser *, tree, location_t); static bool cp_parser_parse_and_diagnose_invalid_type_name (cp_parser *); static int cp_parser_skip_to_closing_parenthesis (cp_parser *, bool, bool, bool); static void cp_parser_skip_to_end_of_statement (cp_parser *); static void cp_parser_consume_semicolon_at_end_of_statement (cp_parser *); static void cp_parser_skip_to_end_of_block_or_statement (cp_parser *); static bool cp_parser_skip_to_closing_brace (cp_parser *); static void cp_parser_skip_to_end_of_template_parameter_list (cp_parser *); static void cp_parser_skip_to_pragma_eol (cp_parser*, cp_token *); static bool cp_parser_error_occurred (cp_parser *); static bool cp_parser_allow_gnu_extensions_p (cp_parser *); static bool cp_parser_is_pure_string_literal (cp_token *); static bool cp_parser_is_string_literal (cp_token *); static bool cp_parser_is_keyword (cp_token *, enum rid); static tree cp_parser_make_typename_type (cp_parser *, tree, location_t location); static cp_declarator * cp_parser_make_indirect_declarator (enum tree_code, tree, cp_cv_quals, cp_declarator *, tree); static bool cp_parser_compound_literal_p (cp_parser *); static bool cp_parser_array_designator_p (cp_parser *); static bool cp_parser_skip_to_closing_square_bracket (cp_parser *); /* Returns nonzero if we are parsing tentatively. */ static inline bool cp_parser_parsing_tentatively (cp_parser* parser) { return parser->context->next != NULL; } /* Returns nonzero if TOKEN is a string literal. */ static bool cp_parser_is_pure_string_literal (cp_token* token) { return (token->type == CPP_STRING || token->type == CPP_STRING16 || token->type == CPP_STRING32 || token->type == CPP_WSTRING || token->type == CPP_UTF8STRING); } /* Returns nonzero if TOKEN is a string literal of a user-defined string literal. */ static bool cp_parser_is_string_literal (cp_token* token) { return (cp_parser_is_pure_string_literal (token) || token->type == CPP_STRING_USERDEF || token->type == CPP_STRING16_USERDEF || token->type == CPP_STRING32_USERDEF || token->type == CPP_WSTRING_USERDEF || token->type == CPP_UTF8STRING_USERDEF); } /* Returns nonzero if TOKEN is the indicated KEYWORD. */ static bool cp_parser_is_keyword (cp_token* token, enum rid keyword) { return token->keyword == keyword; } /* If not parsing tentatively, issue a diagnostic of the form FILE:LINE: MESSAGE before TOKEN where TOKEN is the next token in the input stream. MESSAGE (specified by the caller) is usually of the form "expected OTHER-TOKEN". */ static void cp_parser_error (cp_parser* parser, const char* gmsgid) { if (!cp_parser_simulate_error (parser)) { cp_token *token = cp_lexer_peek_token (parser->lexer); /* This diagnostic makes more sense if it is tagged to the line of the token we just peeked at. */ cp_lexer_set_source_position_from_token (token); if (token->type == CPP_PRAGMA) { error_at (token->location, "%<#pragma%> is not allowed here"); cp_parser_skip_to_pragma_eol (parser, token); return; } c_parse_error (gmsgid, /* Because c_parser_error does not understand CPP_KEYWORD, keywords are treated like identifiers. */ (token->type == CPP_KEYWORD ? CPP_NAME : token->type), token->u.value, token->flags); } } /* Issue an error about name-lookup failing. NAME is the IDENTIFIER_NODE DECL is the result of the lookup (as returned from cp_parser_lookup_name). DESIRED is the thing that we hoped to find. */ static void cp_parser_name_lookup_error (cp_parser* parser, tree name, tree decl, name_lookup_error desired, location_t location) { /* If name lookup completely failed, tell the user that NAME was not declared. */ if (decl == error_mark_node) { if (parser->scope && parser->scope != global_namespace) error_at (location, "%<%E::%E%> has not been declared", parser->scope, name); else if (parser->scope == global_namespace) error_at (location, "%<::%E%> has not been declared", name); else if (parser->object_scope && !CLASS_TYPE_P (parser->object_scope)) error_at (location, "request for member %qE in non-class type %qT", name, parser->object_scope); else if (parser->object_scope) error_at (location, "%<%T::%E%> has not been declared", parser->object_scope, name); else error_at (location, "%qE has not been declared", name); } else if (parser->scope && parser->scope != global_namespace) { switch (desired) { case NLE_TYPE: error_at (location, "%<%E::%E%> is not a type", parser->scope, name); break; case NLE_CXX98: error_at (location, "%<%E::%E%> is not a class or namespace", parser->scope, name); break; case NLE_NOT_CXX98: error_at (location, "%<%E::%E%> is not a class, namespace, or enumeration", parser->scope, name); break; default: gcc_unreachable (); } } else if (parser->scope == global_namespace) { switch (desired) { case NLE_TYPE: error_at (location, "%<::%E%> is not a type", name); break; case NLE_CXX98: error_at (location, "%<::%E%> is not a class or namespace", name); break; case NLE_NOT_CXX98: error_at (location, "%<::%E%> is not a class, namespace, or enumeration", name); break; default: gcc_unreachable (); } } else { switch (desired) { case NLE_TYPE: error_at (location, "%qE is not a type", name); break; case NLE_CXX98: error_at (location, "%qE is not a class or namespace", name); break; case NLE_NOT_CXX98: error_at (location, "%qE is not a class, namespace, or enumeration", name); break; default: gcc_unreachable (); } } } /* If we are parsing tentatively, remember that an error has occurred during this tentative parse. Returns true if the error was simulated; false if a message should be issued by the caller. */ static bool cp_parser_simulate_error (cp_parser* parser) { if (cp_parser_uncommitted_to_tentative_parse_p (parser)) { parser->context->status = CP_PARSER_STATUS_KIND_ERROR; return true; } return false; } /* This function is called when a type is defined. If type definitions are forbidden at this point, an error message is issued. */ static bool cp_parser_check_type_definition (cp_parser* parser) { /* If types are forbidden here, issue a message. */ if (parser->type_definition_forbidden_message) { /* Don't use `%s' to print the string, because quotations (`%<', `%>') in the message need to be interpreted. */ error (parser->type_definition_forbidden_message); return false; } return true; } /* This function is called when the DECLARATOR is processed. The TYPE was a type defined in the decl-specifiers. If it is invalid to define a type in the decl-specifiers for DECLARATOR, an error is issued. TYPE_LOCATION is the location of TYPE and is used for error reporting. */ static void cp_parser_check_for_definition_in_return_type (cp_declarator *declarator, tree type, location_t type_location) { /* [dcl.fct] forbids type definitions in return types. Unfortunately, it's not easy to know whether or not we are processing a return type until after the fact. */ while (declarator && (declarator->kind == cdk_pointer || declarator->kind == cdk_reference || declarator->kind == cdk_ptrmem)) declarator = declarator->declarator; if (declarator && declarator->kind == cdk_function) { error_at (type_location, "new types may not be defined in a return type"); inform (type_location, "(perhaps a semicolon is missing after the definition of %qT)", type); } } /* A type-specifier (TYPE) has been parsed which cannot be followed by "<" in any valid C++ program. If the next token is indeed "<", issue a message warning the user about what appears to be an invalid attempt to form a template-id. LOCATION is the location of the type-specifier (TYPE) */ static void cp_parser_check_for_invalid_template_id (cp_parser* parser, tree type, enum tag_types tag_type, location_t location) { cp_token_position start = 0; if (cp_lexer_next_token_is (parser->lexer, CPP_LESS)) { if (TYPE_P (type)) error_at (location, "%qT is not a template", type); else if (identifier_p (type)) { if (tag_type != none_type) error_at (location, "%qE is not a class template", type); else error_at (location, "%qE is not a template", type); } else error_at (location, "invalid template-id"); /* Remember the location of the invalid "<". */ if (cp_parser_uncommitted_to_tentative_parse_p (parser)) start = cp_lexer_token_position (parser->lexer, true); /* Consume the "<". */ cp_lexer_consume_token (parser->lexer); /* Parse the template arguments. */ cp_parser_enclosed_template_argument_list (parser); /* Permanently remove the invalid template arguments so that this error message is not issued again. */ if (start) cp_lexer_purge_tokens_after (parser->lexer, start); } } /* If parsing an integral constant-expression, issue an error message about the fact that THING appeared and return true. Otherwise, return false. In either case, set PARSER->NON_INTEGRAL_CONSTANT_EXPRESSION_P. */ static bool cp_parser_non_integral_constant_expression (cp_parser *parser, non_integral_constant thing) { parser->non_integral_constant_expression_p = true; if (parser->integral_constant_expression_p) { if (!parser->allow_non_integral_constant_expression_p) { const char *msg = NULL; switch (thing) { case NIC_FLOAT: error ("floating-point literal " "cannot appear in a constant-expression"); return true; case NIC_CAST: error ("a cast to a type other than an integral or " "enumeration type cannot appear in a " "constant-expression"); return true; case NIC_TYPEID: error ("%<typeid%> operator " "cannot appear in a constant-expression"); return true; case NIC_NCC: error ("non-constant compound literals " "cannot appear in a constant-expression"); return true; case NIC_FUNC_CALL: error ("a function call " "cannot appear in a constant-expression"); return true; case NIC_INC: error ("an increment " "cannot appear in a constant-expression"); return true; case NIC_DEC: error ("an decrement " "cannot appear in a constant-expression"); return true; case NIC_ARRAY_REF: error ("an array reference " "cannot appear in a constant-expression"); return true; case NIC_ADDR_LABEL: error ("the address of a label " "cannot appear in a constant-expression"); return true; case NIC_OVERLOADED: error ("calls to overloaded operators " "cannot appear in a constant-expression"); return true; case NIC_ASSIGNMENT: error ("an assignment cannot appear in a constant-expression"); return true; case NIC_COMMA: error ("a comma operator " "cannot appear in a constant-expression"); return true; case NIC_CONSTRUCTOR: error ("a call to a constructor " "cannot appear in a constant-expression"); return true; case NIC_TRANSACTION: error ("a transaction expression " "cannot appear in a constant-expression"); return true; case NIC_THIS: msg = "this"; break; case NIC_FUNC_NAME: msg = "__FUNCTION__"; break; case NIC_PRETTY_FUNC: msg = "__PRETTY_FUNCTION__"; break; case NIC_C99_FUNC: msg = "__func__"; break; case NIC_VA_ARG: msg = "va_arg"; break; case NIC_ARROW: msg = "->"; break; case NIC_POINT: msg = "."; break; case NIC_STAR: msg = "*"; break; case NIC_ADDR: msg = "&"; break; case NIC_PREINCREMENT: msg = "++"; break; case NIC_PREDECREMENT: msg = "--"; break; case NIC_NEW: msg = "new"; break; case NIC_DEL: msg = "delete"; break; default: gcc_unreachable (); } if (msg) error ("%qs cannot appear in a constant-expression", msg); return true; } } return false; } /* Emit a diagnostic for an invalid type name. This function commits to the current active tentative parse, if any. (Otherwise, the problematic construct might be encountered again later, resulting in duplicate error messages.) LOCATION is the location of ID. */ static void cp_parser_diagnose_invalid_type_name (cp_parser *parser, tree id, location_t location) { tree decl, ambiguous_decls; cp_parser_commit_to_tentative_parse (parser); /* Try to lookup the identifier. */ decl = cp_parser_lookup_name (parser, id, none_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, &ambiguous_decls, location); if (ambiguous_decls) /* If the lookup was ambiguous, an error will already have been issued. */ return; /* If the lookup found a template-name, it means that the user forgot to specify an argument list. Emit a useful error message. */ if (TREE_CODE (decl) == TEMPLATE_DECL) error_at (location, "invalid use of template-name %qE without an argument list", decl); else if (TREE_CODE (id) == BIT_NOT_EXPR) error_at (location, "invalid use of destructor %qD as a type", id); else if (TREE_CODE (decl) == TYPE_DECL) /* Something like 'unsigned A a;' */ error_at (location, "invalid combination of multiple type-specifiers"); else if (!parser->scope) { /* Issue an error message. */ error_at (location, "%qE does not name a type", id); /* If we're in a template class, it's possible that the user was referring to a type from a base class. For example: template <typename T> struct A { typedef T X; }; template <typename T> struct B : public A<T> { X x; }; The user should have said "typename A<T>::X". */ if (cxx_dialect < cxx11 && id == ridpointers[(int)RID_CONSTEXPR]) inform (location, "C++11 %<constexpr%> only available with " "-std=c++11 or -std=gnu++11"); else if (cxx_dialect < cxx11 && id == ridpointers[(int)RID_NOEXCEPT]) inform (location, "C++11 %<noexcept%> only available with " "-std=c++11 or -std=gnu++11"); else if (cxx_dialect < cxx11 && TREE_CODE (id) == IDENTIFIER_NODE && !strcmp (IDENTIFIER_POINTER (id), "thread_local")) inform (location, "C++11 %<thread_local%> only available with " "-std=c++11 or -std=gnu++11"); else if (processing_template_decl && current_class_type && TYPE_BINFO (current_class_type)) { tree b; for (b = TREE_CHAIN (TYPE_BINFO (current_class_type)); b; b = TREE_CHAIN (b)) { tree base_type = BINFO_TYPE (b); if (CLASS_TYPE_P (base_type) && dependent_type_p (base_type)) { tree field; /* Go from a particular instantiation of the template (which will have an empty TYPE_FIELDs), to the main version. */ base_type = CLASSTYPE_PRIMARY_TEMPLATE_TYPE (base_type); for (field = TYPE_FIELDS (base_type); field; field = DECL_CHAIN (field)) if (TREE_CODE (field) == TYPE_DECL && DECL_NAME (field) == id) { inform (location, "(perhaps %<typename %T::%E%> was intended)", BINFO_TYPE (b), id); break; } if (field) break; } } } } /* Here we diagnose qualified-ids where the scope is actually correct, but the identifier does not resolve to a valid type name. */ else if (parser->scope != error_mark_node) { if (TREE_CODE (parser->scope) == NAMESPACE_DECL) { if (cp_lexer_next_token_is (parser->lexer, CPP_LESS)) error_at (location_of (id), "%qE in namespace %qE does not name a template type", id, parser->scope); else error_at (location_of (id), "%qE in namespace %qE does not name a type", id, parser->scope); } else if (CLASS_TYPE_P (parser->scope) && constructor_name_p (id, parser->scope)) { /* A<T>::A<T>() */ error_at (location, "%<%T::%E%> names the constructor, not" " the type", parser->scope, id); if (cp_lexer_next_token_is (parser->lexer, CPP_LESS)) error_at (location, "and %qT has no template constructors", parser->scope); } else if (TYPE_P (parser->scope) && dependent_scope_p (parser->scope)) error_at (location, "need %<typename%> before %<%T::%E%> because " "%qT is a dependent scope", parser->scope, id, parser->scope); else if (TYPE_P (parser->scope)) { if (cp_lexer_next_token_is (parser->lexer, CPP_LESS)) error_at (location_of (id), "%qE in %q#T does not name a template type", id, parser->scope); else error_at (location_of (id), "%qE in %q#T does not name a type", id, parser->scope); } else gcc_unreachable (); } } /* Check for a common situation where a type-name should be present, but is not, and issue a sensible error message. Returns true if an invalid type-name was detected. The situation handled by this function are variable declarations of the form `ID a', where `ID' is an id-expression and `a' is a plain identifier. Usually, `ID' should name a type, but if we got here it means that it does not. We try to emit the best possible error message depending on how exactly the id-expression looks like. */ static bool cp_parser_parse_and_diagnose_invalid_type_name (cp_parser *parser) { tree id; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Avoid duplicate error about ambiguous lookup. */ if (token->type == CPP_NESTED_NAME_SPECIFIER) { cp_token *next = cp_lexer_peek_nth_token (parser->lexer, 2); if (next->type == CPP_NAME && next->error_reported) goto out; } cp_parser_parse_tentatively (parser); id = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*template_p=*/NULL, /*declarator_p=*/true, /*optional_p=*/false); /* If the next token is a (, this is a function with no explicit return type, i.e. constructor, destructor or conversion op. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN) || TREE_CODE (id) == TYPE_DECL) { cp_parser_abort_tentative_parse (parser); return false; } if (!cp_parser_parse_definitely (parser)) return false; /* Emit a diagnostic for the invalid type. */ cp_parser_diagnose_invalid_type_name (parser, id, token->location); out: /* If we aren't in the middle of a declarator (i.e. in a parameter-declaration-clause), skip to the end of the declaration; there's no point in trying to process it. */ if (!parser->in_declarator_p) cp_parser_skip_to_end_of_block_or_statement (parser); return true; } /* Consume tokens up to, and including, the next non-nested closing `)'. Returns 1 iff we found a closing `)'. RECOVERING is true, if we are doing error recovery. Returns -1 if OR_COMMA is true and we found an unnested comma. */ static int cp_parser_skip_to_closing_parenthesis (cp_parser *parser, bool recovering, bool or_comma, bool consume_paren) { unsigned paren_depth = 0; unsigned brace_depth = 0; unsigned square_depth = 0; if (recovering && !or_comma && cp_parser_uncommitted_to_tentative_parse_p (parser)) return 0; while (true) { cp_token * token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EOF: case CPP_PRAGMA_EOL: /* If we've run out of tokens, then there is no closing `)'. */ return 0; /* This is good for lambda expression capture-lists. */ case CPP_OPEN_SQUARE: ++square_depth; break; case CPP_CLOSE_SQUARE: if (!square_depth--) return 0; break; case CPP_SEMICOLON: /* This matches the processing in skip_to_end_of_statement. */ if (!brace_depth) return 0; break; case CPP_OPEN_BRACE: ++brace_depth; break; case CPP_CLOSE_BRACE: if (!brace_depth--) return 0; break; case CPP_COMMA: if (recovering && or_comma && !brace_depth && !paren_depth && !square_depth) return -1; break; case CPP_OPEN_PAREN: if (!brace_depth) ++paren_depth; break; case CPP_CLOSE_PAREN: if (!brace_depth && !paren_depth--) { if (consume_paren) cp_lexer_consume_token (parser->lexer); return 1; } break; default: break; } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } } /* Consume tokens until we reach the end of the current statement. Normally, that will be just before consuming a `;'. However, if a non-nested `}' comes first, then we stop before consuming that. */ static void cp_parser_skip_to_end_of_statement (cp_parser* parser) { unsigned nesting_depth = 0; /* Unwind generic function template scope if necessary. */ if (parser->fully_implicit_function_template_p) finish_fully_implicit_template (parser, /*member_decl_opt=*/0); while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EOF: case CPP_PRAGMA_EOL: /* If we've run out of tokens, stop. */ return; case CPP_SEMICOLON: /* If the next token is a `;', we have reached the end of the statement. */ if (!nesting_depth) return; break; case CPP_CLOSE_BRACE: /* If this is a non-nested '}', stop before consuming it. That way, when confronted with something like: { 3 + } we stop before consuming the closing '}', even though we have not yet reached a `;'. */ if (nesting_depth == 0) return; /* If it is the closing '}' for a block that we have scanned, stop -- but only after consuming the token. That way given: void f g () { ... } typedef int I; we will stop after the body of the erroneously declared function, but before consuming the following `typedef' declaration. */ if (--nesting_depth == 0) { cp_lexer_consume_token (parser->lexer); return; } case CPP_OPEN_BRACE: ++nesting_depth; break; default: break; } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } } /* This function is called at the end of a statement or declaration. If the next token is a semicolon, it is consumed; otherwise, error recovery is attempted. */ static void cp_parser_consume_semicolon_at_end_of_statement (cp_parser *parser) { /* Look for the trailing `;'. */ if (!cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON)) { /* If there is additional (erroneous) input, skip to the end of the statement. */ cp_parser_skip_to_end_of_statement (parser); /* If the next token is now a `;', consume it. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); } } /* Skip tokens until we have consumed an entire block, or until we have consumed a non-nested `;'. */ static void cp_parser_skip_to_end_of_block_or_statement (cp_parser* parser) { int nesting_depth = 0; /* Unwind generic function template scope if necessary. */ if (parser->fully_implicit_function_template_p) finish_fully_implicit_template (parser, /*member_decl_opt=*/0); while (nesting_depth >= 0) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EOF: case CPP_PRAGMA_EOL: /* If we've run out of tokens, stop. */ return; case CPP_SEMICOLON: /* Stop if this is an unnested ';'. */ if (!nesting_depth) nesting_depth = -1; break; case CPP_CLOSE_BRACE: /* Stop if this is an unnested '}', or closes the outermost nesting level. */ nesting_depth--; if (nesting_depth < 0) return; if (!nesting_depth) nesting_depth = -1; break; case CPP_OPEN_BRACE: /* Nest. */ nesting_depth++; break; default: break; } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } } /* Skip tokens until a non-nested closing curly brace is the next token, or there are no more tokens. Return true in the first case, false otherwise. */ static bool cp_parser_skip_to_closing_brace (cp_parser *parser) { unsigned nesting_depth = 0; while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EOF: case CPP_PRAGMA_EOL: /* If we've run out of tokens, stop. */ return false; case CPP_CLOSE_BRACE: /* If the next token is a non-nested `}', then we have reached the end of the current block. */ if (nesting_depth-- == 0) return true; break; case CPP_OPEN_BRACE: /* If it the next token is a `{', then we are entering a new block. Consume the entire block. */ ++nesting_depth; break; default: break; } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } } /* Consume tokens until we reach the end of the pragma. The PRAGMA_TOK parameter is the PRAGMA token, allowing us to purge the entire pragma sequence. */ static void cp_parser_skip_to_pragma_eol (cp_parser* parser, cp_token *pragma_tok) { cp_token *token; parser->lexer->in_pragma = false; do token = cp_lexer_consume_token (parser->lexer); while (token->type != CPP_PRAGMA_EOL && token->type != CPP_EOF); /* Ensure that the pragma is not parsed again. */ cp_lexer_purge_tokens_after (parser->lexer, pragma_tok); } /* Require pragma end of line, resyncing with it as necessary. The arguments are as for cp_parser_skip_to_pragma_eol. */ static void cp_parser_require_pragma_eol (cp_parser *parser, cp_token *pragma_tok) { parser->lexer->in_pragma = false; if (!cp_parser_require (parser, CPP_PRAGMA_EOL, RT_PRAGMA_EOL)) cp_parser_skip_to_pragma_eol (parser, pragma_tok); } /* This is a simple wrapper around make_typename_type. When the id is an unresolved identifier node, we can provide a superior diagnostic using cp_parser_diagnose_invalid_type_name. */ static tree cp_parser_make_typename_type (cp_parser *parser, tree id, location_t id_location) { tree result; if (identifier_p (id)) { result = make_typename_type (parser->scope, id, typename_type, /*complain=*/tf_none); if (result == error_mark_node) cp_parser_diagnose_invalid_type_name (parser, id, id_location); return result; } return make_typename_type (parser->scope, id, typename_type, tf_error); } /* This is a wrapper around the make_{pointer,ptrmem,reference}_declarator functions that decides which one to call based on the CODE and CLASS_TYPE arguments. The CODE argument should be one of the values returned by cp_parser_ptr_operator. ATTRIBUTES represent the attributes that appertain to the pointer or reference. */ static cp_declarator * cp_parser_make_indirect_declarator (enum tree_code code, tree class_type, cp_cv_quals cv_qualifiers, cp_declarator *target, tree attributes) { if (code == ERROR_MARK) return cp_error_declarator; if (code == INDIRECT_REF) if (class_type == NULL_TREE) return make_pointer_declarator (cv_qualifiers, target, attributes); else return make_ptrmem_declarator (cv_qualifiers, class_type, target, attributes); else if (code == ADDR_EXPR && class_type == NULL_TREE) return make_reference_declarator (cv_qualifiers, target, false, attributes); else if (code == NON_LVALUE_EXPR && class_type == NULL_TREE) return make_reference_declarator (cv_qualifiers, target, true, attributes); gcc_unreachable (); } /* Create a new C++ parser. */ static cp_parser * cp_parser_new (void) { cp_parser *parser; cp_lexer *lexer; unsigned i; /* cp_lexer_new_main is called before doing GC allocation because cp_lexer_new_main might load a PCH file. */ lexer = cp_lexer_new_main (); /* Initialize the binops_by_token so that we can get the tree directly from the token. */ for (i = 0; i < sizeof (binops) / sizeof (binops[0]); i++) binops_by_token[binops[i].token_type] = binops[i]; parser = ggc_cleared_alloc<cp_parser> (); parser->lexer = lexer; parser->context = cp_parser_context_new (NULL); /* For now, we always accept GNU extensions. */ parser->allow_gnu_extensions_p = 1; /* The `>' token is a greater-than operator, not the end of a template-id. */ parser->greater_than_is_operator_p = true; parser->default_arg_ok_p = true; /* We are not parsing a constant-expression. */ parser->integral_constant_expression_p = false; parser->allow_non_integral_constant_expression_p = false; parser->non_integral_constant_expression_p = false; /* Local variable names are not forbidden. */ parser->local_variables_forbidden_p = false; /* We are not processing an `extern "C"' declaration. */ parser->in_unbraced_linkage_specification_p = false; /* We are not processing a declarator. */ parser->in_declarator_p = false; /* We are not processing a template-argument-list. */ parser->in_template_argument_list_p = false; /* We are not in an iteration statement. */ parser->in_statement = 0; /* We are not in a switch statement. */ parser->in_switch_statement_p = false; /* We are not parsing a type-id inside an expression. */ parser->in_type_id_in_expr_p = false; /* Declarations aren't implicitly extern "C". */ parser->implicit_extern_c = false; /* String literals should be translated to the execution character set. */ parser->translate_strings_p = true; /* We are not parsing a function body. */ parser->in_function_body = false; /* We can correct until told otherwise. */ parser->colon_corrects_to_scope_p = true; /* The unparsed function queue is empty. */ push_unparsed_function_queues (parser); /* There are no classes being defined. */ parser->num_classes_being_defined = 0; /* No template parameters apply. */ parser->num_template_parameter_lists = 0; /* Not declaring an implicit function template. */ parser->auto_is_implicit_function_template_parm_p = false; parser->fully_implicit_function_template_p = false; parser->implicit_template_parms = 0; parser->implicit_template_scope = 0; return parser; } /* Create a cp_lexer structure which will emit the tokens in CACHE and push it onto the parser's lexer stack. This is used for delayed parsing of in-class method bodies and default arguments, and should not be confused with tentative parsing. */ static void cp_parser_push_lexer_for_tokens (cp_parser *parser, cp_token_cache *cache) { cp_lexer *lexer = cp_lexer_new_from_tokens (cache); lexer->next = parser->lexer; parser->lexer = lexer; /* Move the current source position to that of the first token in the new lexer. */ cp_lexer_set_source_position_from_token (lexer->next_token); } /* Pop the top lexer off the parser stack. This is never used for the "main" lexer, only for those pushed by cp_parser_push_lexer_for_tokens. */ static void cp_parser_pop_lexer (cp_parser *parser) { cp_lexer *lexer = parser->lexer; parser->lexer = lexer->next; cp_lexer_destroy (lexer); /* Put the current source position back where it was before this lexer was pushed. */ cp_lexer_set_source_position_from_token (parser->lexer->next_token); } /* Lexical conventions [gram.lex] */ /* Parse an identifier. Returns an IDENTIFIER_NODE representing the identifier. */ static tree cp_parser_identifier (cp_parser* parser) { cp_token *token; /* Look for the identifier. */ token = cp_parser_require (parser, CPP_NAME, RT_NAME); /* Return the value. */ return token ? token->u.value : error_mark_node; } /* Parse a sequence of adjacent string constants. Returns a TREE_STRING representing the combined, nul-terminated string constant. If TRANSLATE is true, translate the string to the execution character set. If WIDE_OK is true, a wide string is invalid here. C++98 [lex.string] says that if a narrow string literal token is adjacent to a wide string literal token, the behavior is undefined. However, C99 6.4.5p4 says that this results in a wide string literal. We follow C99 here, for consistency with the C front end. This code is largely lifted from lex_string() in c-lex.c. FUTURE: ObjC++ will need to handle @-strings here. */ static tree cp_parser_string_literal (cp_parser *parser, bool translate, bool wide_ok, bool lookup_udlit = true) { tree value; size_t count; struct obstack str_ob; cpp_string str, istr, *strs; cp_token *tok; enum cpp_ttype type, curr_type; int have_suffix_p = 0; tree string_tree; tree suffix_id = NULL_TREE; bool curr_tok_is_userdef_p = false; tok = cp_lexer_peek_token (parser->lexer); if (!cp_parser_is_string_literal (tok)) { cp_parser_error (parser, "expected string-literal"); return error_mark_node; } if (cpp_userdef_string_p (tok->type)) { string_tree = USERDEF_LITERAL_VALUE (tok->u.value); curr_type = cpp_userdef_string_remove_type (tok->type); curr_tok_is_userdef_p = true; } else { string_tree = tok->u.value; curr_type = tok->type; } type = curr_type; /* Try to avoid the overhead of creating and destroying an obstack for the common case of just one string. */ if (!cp_parser_is_string_literal (cp_lexer_peek_nth_token (parser->lexer, 2))) { cp_lexer_consume_token (parser->lexer); str.text = (const unsigned char *)TREE_STRING_POINTER (string_tree); str.len = TREE_STRING_LENGTH (string_tree); count = 1; if (curr_tok_is_userdef_p) { suffix_id = USERDEF_LITERAL_SUFFIX_ID (tok->u.value); have_suffix_p = 1; curr_type = cpp_userdef_string_remove_type (tok->type); } else curr_type = tok->type; strs = &str; } else { gcc_obstack_init (&str_ob); count = 0; do { cp_lexer_consume_token (parser->lexer); count++; str.text = (const unsigned char *)TREE_STRING_POINTER (string_tree); str.len = TREE_STRING_LENGTH (string_tree); if (curr_tok_is_userdef_p) { tree curr_suffix_id = USERDEF_LITERAL_SUFFIX_ID (tok->u.value); if (have_suffix_p == 0) { suffix_id = curr_suffix_id; have_suffix_p = 1; } else if (have_suffix_p == 1 && curr_suffix_id != suffix_id) { error ("inconsistent user-defined literal suffixes" " %qD and %qD in string literal", suffix_id, curr_suffix_id); have_suffix_p = -1; } curr_type = cpp_userdef_string_remove_type (tok->type); } else curr_type = tok->type; if (type != curr_type) { if (type == CPP_STRING) type = curr_type; else if (curr_type != CPP_STRING) error_at (tok->location, "unsupported non-standard concatenation " "of string literals"); } obstack_grow (&str_ob, &str, sizeof (cpp_string)); tok = cp_lexer_peek_token (parser->lexer); if (cpp_userdef_string_p (tok->type)) { string_tree = USERDEF_LITERAL_VALUE (tok->u.value); curr_type = cpp_userdef_string_remove_type (tok->type); curr_tok_is_userdef_p = true; } else { string_tree = tok->u.value; curr_type = tok->type; curr_tok_is_userdef_p = false; } } while (cp_parser_is_string_literal (tok)); strs = (cpp_string *) obstack_finish (&str_ob); } if (type != CPP_STRING && !wide_ok) { cp_parser_error (parser, "a wide string is invalid in this context"); type = CPP_STRING; } if ((translate ? cpp_interpret_string : cpp_interpret_string_notranslate) (parse_in, strs, count, &istr, type)) { value = build_string (istr.len, (const char *)istr.text); free (CONST_CAST (unsigned char *, istr.text)); switch (type) { default: case CPP_STRING: case CPP_UTF8STRING: TREE_TYPE (value) = char_array_type_node; break; case CPP_STRING16: TREE_TYPE (value) = char16_array_type_node; break; case CPP_STRING32: TREE_TYPE (value) = char32_array_type_node; break; case CPP_WSTRING: TREE_TYPE (value) = wchar_array_type_node; break; } value = fix_string_type (value); if (have_suffix_p) { tree literal = build_userdef_literal (suffix_id, value, OT_NONE, NULL_TREE); if (lookup_udlit) value = cp_parser_userdef_string_literal (literal); else value = literal; } } else /* cpp_interpret_string has issued an error. */ value = error_mark_node; if (count > 1) obstack_free (&str_ob, 0); return value; } /* Look up a literal operator with the name and the exact arguments. */ static tree lookup_literal_operator (tree name, vec<tree, va_gc> *args) { tree decl, fns; decl = lookup_name (name); if (!decl || !is_overloaded_fn (decl)) return error_mark_node; for (fns = decl; fns; fns = OVL_NEXT (fns)) { unsigned int ix; bool found = true; tree fn = OVL_CURRENT (fns); tree parmtypes = TYPE_ARG_TYPES (TREE_TYPE (fn)); if (parmtypes != NULL_TREE) { for (ix = 0; ix < vec_safe_length (args) && parmtypes != NULL_TREE; ++ix, parmtypes = TREE_CHAIN (parmtypes)) { tree tparm = TREE_VALUE (parmtypes); tree targ = TREE_TYPE ((*args)[ix]); bool ptr = TYPE_PTR_P (tparm); bool arr = TREE_CODE (targ) == ARRAY_TYPE; if ((ptr || arr || !same_type_p (tparm, targ)) && (!ptr || !arr || !same_type_p (TREE_TYPE (tparm), TREE_TYPE (targ)))) found = false; } if (found && ix == vec_safe_length (args) /* May be this should be sufficient_parms_p instead, depending on how exactly should user-defined literals work in presence of default arguments on the literal operator parameters. */ && parmtypes == void_list_node) return decl; } } return error_mark_node; } /* Parse a user-defined char constant. Returns a call to a user-defined literal operator taking the character as an argument. */ static tree cp_parser_userdef_char_literal (cp_parser *parser) { cp_token *token = cp_lexer_consume_token (parser->lexer); tree literal = token->u.value; tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal); tree value = USERDEF_LITERAL_VALUE (literal); tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id)); tree decl, result; /* Build up a call to the user-defined operator */ /* Lookup the name we got back from the id-expression. */ vec<tree, va_gc> *args = make_tree_vector (); vec_safe_push (args, value); decl = lookup_literal_operator (name, args); if (!decl || decl == error_mark_node) { error ("unable to find character literal operator %qD with %qT argument", name, TREE_TYPE (value)); release_tree_vector (args); return error_mark_node; } result = finish_call_expr (decl, &args, false, true, tf_warning_or_error); release_tree_vector (args); return result; } /* A subroutine of cp_parser_userdef_numeric_literal to create a char... template parameter pack from a string node. */ static tree make_char_string_pack (tree value) { tree charvec; tree argpack = make_node (NONTYPE_ARGUMENT_PACK); const char *str = TREE_STRING_POINTER (value); int i, len = TREE_STRING_LENGTH (value) - 1; tree argvec = make_tree_vec (1); /* Fill in CHARVEC with all of the parameters. */ charvec = make_tree_vec (len); for (i = 0; i < len; ++i) TREE_VEC_ELT (charvec, i) = build_int_cst (char_type_node, str[i]); /* Build the argument packs. */ SET_ARGUMENT_PACK_ARGS (argpack, charvec); TREE_TYPE (argpack) = char_type_node; TREE_VEC_ELT (argvec, 0) = argpack; return argvec; } /* A subroutine of cp_parser_userdef_numeric_literal to create a char... template parameter pack from a string node. */ static tree make_string_pack (tree value) { tree charvec; tree argpack = make_node (NONTYPE_ARGUMENT_PACK); const unsigned char *str = (const unsigned char *) TREE_STRING_POINTER (value); int sz = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (value)))); int len = TREE_STRING_LENGTH (value) / sz - 1; tree argvec = make_tree_vec (2); tree str_char_type_node = TREE_TYPE (TREE_TYPE (value)); str_char_type_node = TYPE_MAIN_VARIANT (str_char_type_node); /* First template parm is character type. */ TREE_VEC_ELT (argvec, 0) = str_char_type_node; /* Fill in CHARVEC with all of the parameters. */ charvec = make_tree_vec (len); for (int i = 0; i < len; ++i) TREE_VEC_ELT (charvec, i) = double_int_to_tree (str_char_type_node, double_int::from_buffer (str + i * sz, sz)); /* Build the argument packs. */ SET_ARGUMENT_PACK_ARGS (argpack, charvec); TREE_TYPE (argpack) = str_char_type_node; TREE_VEC_ELT (argvec, 1) = argpack; return argvec; } /* Parse a user-defined numeric constant. returns a call to a user-defined literal operator. */ static tree cp_parser_userdef_numeric_literal (cp_parser *parser) { cp_token *token = cp_lexer_consume_token (parser->lexer); tree literal = token->u.value; tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal); tree value = USERDEF_LITERAL_VALUE (literal); int overflow = USERDEF_LITERAL_OVERFLOW (literal); tree num_string = USERDEF_LITERAL_NUM_STRING (literal); tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id)); tree decl, result; vec<tree, va_gc> *args; /* Look for a literal operator taking the exact type of numeric argument as the literal value. */ args = make_tree_vector (); vec_safe_push (args, value); decl = lookup_literal_operator (name, args); if (decl && decl != error_mark_node) { result = finish_call_expr (decl, &args, false, true, tf_warning_or_error); if (TREE_CODE (TREE_TYPE (value)) == INTEGER_TYPE && overflow > 0) { warning_at (token->location, OPT_Woverflow, "integer literal exceeds range of %qT type", long_long_unsigned_type_node); } else { if (overflow > 0) warning_at (token->location, OPT_Woverflow, "floating literal exceeds range of %qT type", long_double_type_node); else if (overflow < 0) warning_at (token->location, OPT_Woverflow, "floating literal truncated to zero"); } release_tree_vector (args); return result; } release_tree_vector (args); /* If the numeric argument didn't work, look for a raw literal operator taking a const char* argument consisting of the number in string format. */ args = make_tree_vector (); vec_safe_push (args, num_string); decl = lookup_literal_operator (name, args); if (decl && decl != error_mark_node) { result = finish_call_expr (decl, &args, false, true, tf_warning_or_error); release_tree_vector (args); return result; } release_tree_vector (args); /* If the raw literal didn't work, look for a non-type template function with parameter pack char.... Call the function with template parameter characters representing the number. */ args = make_tree_vector (); decl = lookup_literal_operator (name, args); if (decl && decl != error_mark_node) { tree tmpl_args = make_char_string_pack (num_string); decl = lookup_template_function (decl, tmpl_args); result = finish_call_expr (decl, &args, false, true, tf_warning_or_error); release_tree_vector (args); return result; } release_tree_vector (args); error ("unable to find numeric literal operator %qD", name); if (!cpp_get_options (parse_in)->ext_numeric_literals) inform (token->location, "use -std=gnu++11 or -fext-numeric-literals " "to enable more built-in suffixes"); return error_mark_node; } /* Parse a user-defined string constant. Returns a call to a user-defined literal operator taking a character pointer and the length of the string as arguments. */ static tree cp_parser_userdef_string_literal (tree literal) { tree suffix_id = USERDEF_LITERAL_SUFFIX_ID (literal); tree name = cp_literal_operator_id (IDENTIFIER_POINTER (suffix_id)); tree value = USERDEF_LITERAL_VALUE (literal); int len = TREE_STRING_LENGTH (value) / TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (value)))) - 1; tree decl, result; vec<tree, va_gc> *args; /* Build up a call to the user-defined operator. */ /* Lookup the name we got back from the id-expression. */ args = make_tree_vector (); vec_safe_push (args, value); vec_safe_push (args, build_int_cst (size_type_node, len)); decl = lookup_literal_operator (name, args); if (decl && decl != error_mark_node) { result = finish_call_expr (decl, &args, false, true, tf_warning_or_error); release_tree_vector (args); return result; } release_tree_vector (args); /* Look for a template function with typename parameter CharT and parameter pack CharT... Call the function with template parameter characters representing the string. */ args = make_tree_vector (); decl = lookup_literal_operator (name, args); if (decl && decl != error_mark_node) { tree tmpl_args = make_string_pack (value); decl = lookup_template_function (decl, tmpl_args); result = finish_call_expr (decl, &args, false, true, tf_warning_or_error); release_tree_vector (args); return result; } release_tree_vector (args); error ("unable to find string literal operator %qD with %qT, %qT arguments", name, TREE_TYPE (value), size_type_node); return error_mark_node; } /* Basic concepts [gram.basic] */ /* Parse a translation-unit. translation-unit: declaration-seq [opt] Returns TRUE if all went well. */ static bool cp_parser_translation_unit (cp_parser* parser) { /* The address of the first non-permanent object on the declarator obstack. */ static void *declarator_obstack_base; bool success; /* Create the declarator obstack, if necessary. */ if (!cp_error_declarator) { gcc_obstack_init (&declarator_obstack); /* Create the error declarator. */ cp_error_declarator = make_declarator (cdk_error); /* Create the empty parameter list. */ no_parameters = make_parameter_declarator (NULL, NULL, NULL_TREE); /* Remember where the base of the declarator obstack lies. */ declarator_obstack_base = obstack_next_free (&declarator_obstack); } cp_parser_declaration_seq_opt (parser); /* If there are no tokens left then all went well. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EOF)) { /* Get rid of the token array; we don't need it any more. */ cp_lexer_destroy (parser->lexer); parser->lexer = NULL; /* This file might have been a context that's implicitly extern "C". If so, pop the lang context. (Only relevant for PCH.) */ if (parser->implicit_extern_c) { pop_lang_context (); parser->implicit_extern_c = false; } /* Finish up. */ finish_translation_unit (); success = true; } else { cp_parser_error (parser, "expected declaration"); success = false; } /* Make sure the declarator obstack was fully cleaned up. */ gcc_assert (obstack_next_free (&declarator_obstack) == declarator_obstack_base); /* All went well. */ return success; } /* Return the appropriate tsubst flags for parsing, possibly in N3276 decltype context. */ static inline tsubst_flags_t complain_flags (bool decltype_p) { tsubst_flags_t complain = tf_warning_or_error; if (decltype_p) complain |= tf_decltype; return complain; } /* We're about to parse a collection of statements. If we're currently parsing tentatively, set up a firewall so that any nested cp_parser_commit_to_tentative_parse won't affect the current context. */ static cp_token_position cp_parser_start_tentative_firewall (cp_parser *parser) { if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) return 0; cp_parser_parse_tentatively (parser); cp_parser_commit_to_topmost_tentative_parse (parser); return cp_lexer_token_position (parser->lexer, false); } /* We've finished parsing the collection of statements. Wrap up the firewall and replace the relevant tokens with the parsed form. */ static void cp_parser_end_tentative_firewall (cp_parser *parser, cp_token_position start, tree expr) { if (!start) return; /* Finish the firewall level. */ cp_parser_parse_definitely (parser); /* And remember the result of the parse for when we try again. */ cp_token *token = cp_lexer_token_at (parser->lexer, start); token->type = CPP_PREPARSED_EXPR; token->u.value = expr; token->keyword = RID_MAX; cp_lexer_purge_tokens_after (parser->lexer, start); } /* Parse a GNU statement-expression, i.e. ({ stmts }), except for the enclosing parentheses. */ static tree cp_parser_statement_expr (cp_parser *parser) { cp_token_position start = cp_parser_start_tentative_firewall (parser); /* Consume the '('. */ cp_lexer_consume_token (parser->lexer); /* Start the statement-expression. */ tree expr = begin_stmt_expr (); /* Parse the compound-statement. */ cp_parser_compound_statement (parser, expr, false, false); /* Finish up. */ expr = finish_stmt_expr (expr, false); /* Consume the ')'. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_end_of_statement (parser); cp_parser_end_tentative_firewall (parser, start, expr); return expr; } /* Expressions [gram.expr] */ /* Parse a primary-expression. primary-expression: literal this ( expression ) id-expression lambda-expression (C++11) GNU Extensions: primary-expression: ( compound-statement ) __builtin_va_arg ( assignment-expression , type-id ) __builtin_offsetof ( type-id , offsetof-expression ) C++ Extensions: __has_nothrow_assign ( type-id ) __has_nothrow_constructor ( type-id ) __has_nothrow_copy ( type-id ) __has_trivial_assign ( type-id ) __has_trivial_constructor ( type-id ) __has_trivial_copy ( type-id ) __has_trivial_destructor ( type-id ) __has_virtual_destructor ( type-id ) __is_abstract ( type-id ) __is_base_of ( type-id , type-id ) __is_class ( type-id ) __is_empty ( type-id ) __is_enum ( type-id ) __is_final ( type-id ) __is_literal_type ( type-id ) __is_pod ( type-id ) __is_polymorphic ( type-id ) __is_std_layout ( type-id ) __is_trivial ( type-id ) __is_union ( type-id ) Objective-C++ Extension: primary-expression: objc-expression literal: __null ADDRESS_P is true iff this expression was immediately preceded by "&" and therefore might denote a pointer-to-member. CAST_P is true iff this expression is the target of a cast. TEMPLATE_ARG_P is true iff this expression is a template argument. Returns a representation of the expression. Upon return, *IDK indicates what kind of id-expression (if any) was present. */ static tree cp_parser_primary_expression (cp_parser *parser, bool address_p, bool cast_p, bool template_arg_p, bool decltype_p, cp_id_kind *idk) { cp_token *token = NULL; /* Assume the primary expression is not an id-expression. */ *idk = CP_ID_KIND_NONE; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); switch ((int) token->type) { /* literal: integer-literal character-literal floating-literal string-literal boolean-literal pointer-literal user-defined-literal */ case CPP_CHAR: case CPP_CHAR16: case CPP_CHAR32: case CPP_WCHAR: case CPP_NUMBER: case CPP_PREPARSED_EXPR: if (TREE_CODE (token->u.value) == USERDEF_LITERAL) return cp_parser_userdef_numeric_literal (parser); token = cp_lexer_consume_token (parser->lexer); if (TREE_CODE (token->u.value) == FIXED_CST) { error_at (token->location, "fixed-point types not supported in C++"); return error_mark_node; } /* Floating-point literals are only allowed in an integral constant expression if they are cast to an integral or enumeration type. */ if (TREE_CODE (token->u.value) == REAL_CST && parser->integral_constant_expression_p && pedantic) { /* CAST_P will be set even in invalid code like "int(2.7 + ...)". Therefore, we have to check that the next token is sure to end the cast. */ if (cast_p) { cp_token *next_token; next_token = cp_lexer_peek_token (parser->lexer); if (/* The comma at the end of an enumerator-definition. */ next_token->type != CPP_COMMA /* The curly brace at the end of an enum-specifier. */ && next_token->type != CPP_CLOSE_BRACE /* The end of a statement. */ && next_token->type != CPP_SEMICOLON /* The end of the cast-expression. */ && next_token->type != CPP_CLOSE_PAREN /* The end of an array bound. */ && next_token->type != CPP_CLOSE_SQUARE /* The closing ">" in a template-argument-list. */ && (next_token->type != CPP_GREATER || parser->greater_than_is_operator_p) /* C++0x only: A ">>" treated like two ">" tokens, in a template-argument-list. */ && (next_token->type != CPP_RSHIFT || (cxx_dialect == cxx98) || parser->greater_than_is_operator_p)) cast_p = false; } /* If we are within a cast, then the constraint that the cast is to an integral or enumeration type will be checked at that point. If we are not within a cast, then this code is invalid. */ if (!cast_p) cp_parser_non_integral_constant_expression (parser, NIC_FLOAT); } return token->u.value; case CPP_CHAR_USERDEF: case CPP_CHAR16_USERDEF: case CPP_CHAR32_USERDEF: case CPP_WCHAR_USERDEF: return cp_parser_userdef_char_literal (parser); case CPP_STRING: case CPP_STRING16: case CPP_STRING32: case CPP_WSTRING: case CPP_UTF8STRING: case CPP_STRING_USERDEF: case CPP_STRING16_USERDEF: case CPP_STRING32_USERDEF: case CPP_WSTRING_USERDEF: case CPP_UTF8STRING_USERDEF: /* ??? Should wide strings be allowed when parser->translate_strings_p is false (i.e. in attributes)? If not, we can kill the third argument to cp_parser_string_literal. */ return cp_parser_string_literal (parser, parser->translate_strings_p, true); case CPP_OPEN_PAREN: /* If we see `( { ' then we are looking at the beginning of a GNU statement-expression. */ if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_nth_token_is (parser->lexer, 2, CPP_OPEN_BRACE)) { /* Statement-expressions are not allowed by the standard. */ pedwarn (token->location, OPT_Wpedantic, "ISO C++ forbids braced-groups within expressions"); /* And they're not allowed outside of a function-body; you cannot, for example, write: int i = ({ int j = 3; j + 1; }); at class or namespace scope. */ if (!parser->in_function_body || parser->in_template_argument_list_p) { error_at (token->location, "statement-expressions are not allowed outside " "functions nor in template-argument lists"); cp_parser_skip_to_end_of_block_or_statement (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN)) cp_lexer_consume_token (parser->lexer); return error_mark_node; } else return cp_parser_statement_expr (parser); } /* Otherwise it's a normal parenthesized expression. */ { tree expr; bool saved_greater_than_is_operator_p; /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Within a parenthesized expression, a `>' token is always the greater-than operator. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = true; /* Parse the parenthesized expression. */ expr = cp_parser_expression (parser, idk, cast_p, decltype_p); /* Let the front end know that this expression was enclosed in parentheses. This matters in case, for example, the expression is of the form `A::B', since `&A::B' might be a pointer-to-member, but `&(A::B)' is not. */ expr = finish_parenthesized_expr (expr); /* DR 705: Wrapping an unqualified name in parentheses suppresses arg-dependent lookup. We want to pass back CP_ID_KIND_QUALIFIED for suppressing vtable lookup (c++/37862), but none of the others. */ if (*idk != CP_ID_KIND_QUALIFIED) *idk = CP_ID_KIND_NONE; /* The `>' token might be the end of a template-id or template-parameter-list now. */ parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; /* Consume the `)'. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_end_of_statement (parser); return expr; } case CPP_OPEN_SQUARE: { if (c_dialect_objc ()) { /* We might have an Objective-C++ message. */ cp_parser_parse_tentatively (parser); tree msg = cp_parser_objc_message_expression (parser); /* If that works out, we're done ... */ if (cp_parser_parse_definitely (parser)) return msg; /* ... else, fall though to see if it's a lambda. */ } tree lam = cp_parser_lambda_expression (parser); /* Don't warn about a failed tentative parse. */ if (cp_parser_error_occurred (parser)) return error_mark_node; maybe_warn_cpp0x (CPP0X_LAMBDA_EXPR); return lam; } case CPP_OBJC_STRING: if (c_dialect_objc ()) /* We have an Objective-C++ string literal. */ return cp_parser_objc_expression (parser); cp_parser_error (parser, "expected primary-expression"); return error_mark_node; case CPP_KEYWORD: switch (token->keyword) { /* These two are the boolean literals. */ case RID_TRUE: cp_lexer_consume_token (parser->lexer); return boolean_true_node; case RID_FALSE: cp_lexer_consume_token (parser->lexer); return boolean_false_node; /* The `__null' literal. */ case RID_NULL: cp_lexer_consume_token (parser->lexer); return null_node; /* The `nullptr' literal. */ case RID_NULLPTR: cp_lexer_consume_token (parser->lexer); return nullptr_node; /* Recognize the `this' keyword. */ case RID_THIS: cp_lexer_consume_token (parser->lexer); if (parser->local_variables_forbidden_p) { error_at (token->location, "%<this%> may not be used in this context"); return error_mark_node; } /* Pointers cannot appear in constant-expressions. */ if (cp_parser_non_integral_constant_expression (parser, NIC_THIS)) return error_mark_node; return finish_this_expr (); /* The `operator' keyword can be the beginning of an id-expression. */ case RID_OPERATOR: goto id_expression; case RID_FUNCTION_NAME: case RID_PRETTY_FUNCTION_NAME: case RID_C99_FUNCTION_NAME: { non_integral_constant name; /* The symbols __FUNCTION__, __PRETTY_FUNCTION__, and __func__ are the names of variables -- but they are treated specially. Therefore, they are handled here, rather than relying on the generic id-expression logic below. Grammatically, these names are id-expressions. Consume the token. */ token = cp_lexer_consume_token (parser->lexer); switch (token->keyword) { case RID_FUNCTION_NAME: name = NIC_FUNC_NAME; break; case RID_PRETTY_FUNCTION_NAME: name = NIC_PRETTY_FUNC; break; case RID_C99_FUNCTION_NAME: name = NIC_C99_FUNC; break; default: gcc_unreachable (); } if (cp_parser_non_integral_constant_expression (parser, name)) return error_mark_node; /* Look up the name. */ return finish_fname (token->u.value); } case RID_VA_ARG: { tree expression; tree type; source_location type_location; /* The `__builtin_va_arg' construct is used to handle `va_arg'. Consume the `__builtin_va_arg' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the opening `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Now, parse the assignment-expression. */ expression = cp_parser_assignment_expression (parser); /* Look for the `,'. */ cp_parser_require (parser, CPP_COMMA, RT_COMMA); type_location = cp_lexer_peek_token (parser->lexer)->location; /* Parse the type-id. */ type = cp_parser_type_id (parser); /* Look for the closing `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Using `va_arg' in a constant-expression is not allowed. */ if (cp_parser_non_integral_constant_expression (parser, NIC_VA_ARG)) return error_mark_node; return build_x_va_arg (type_location, expression, type); } case RID_OFFSETOF: return cp_parser_builtin_offsetof (parser); case RID_HAS_NOTHROW_ASSIGN: case RID_HAS_NOTHROW_CONSTRUCTOR: case RID_HAS_NOTHROW_COPY: case RID_HAS_TRIVIAL_ASSIGN: case RID_HAS_TRIVIAL_CONSTRUCTOR: case RID_HAS_TRIVIAL_COPY: case RID_HAS_TRIVIAL_DESTRUCTOR: case RID_HAS_VIRTUAL_DESTRUCTOR: case RID_IS_ABSTRACT: case RID_IS_BASE_OF: case RID_IS_CLASS: case RID_IS_EMPTY: case RID_IS_ENUM: case RID_IS_FINAL: case RID_IS_LITERAL_TYPE: case RID_IS_POD: case RID_IS_POLYMORPHIC: case RID_IS_STD_LAYOUT: case RID_IS_TRIVIAL: case RID_IS_TRIVIALLY_ASSIGNABLE: case RID_IS_TRIVIALLY_CONSTRUCTIBLE: case RID_IS_TRIVIALLY_COPYABLE: case RID_IS_UNION: return cp_parser_trait_expr (parser, token->keyword); /* Objective-C++ expressions. */ case RID_AT_ENCODE: case RID_AT_PROTOCOL: case RID_AT_SELECTOR: return cp_parser_objc_expression (parser); case RID_TEMPLATE: if (parser->in_function_body && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS)) { error_at (token->location, "a template declaration cannot appear at block scope"); cp_parser_skip_to_end_of_block_or_statement (parser); return error_mark_node; } default: cp_parser_error (parser, "expected primary-expression"); return error_mark_node; } /* An id-expression can start with either an identifier, a `::' as the beginning of a qualified-id, or the "operator" keyword. */ case CPP_NAME: case CPP_SCOPE: case CPP_TEMPLATE_ID: case CPP_NESTED_NAME_SPECIFIER: { tree id_expression; tree decl; const char *error_msg; bool template_p; bool done; cp_token *id_expr_token; id_expression: /* Parse the id-expression. */ id_expression = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, &template_p, /*declarator_p=*/false, /*optional_p=*/false); if (id_expression == error_mark_node) return error_mark_node; id_expr_token = token; token = cp_lexer_peek_token (parser->lexer); done = (token->type != CPP_OPEN_SQUARE && token->type != CPP_OPEN_PAREN && token->type != CPP_DOT && token->type != CPP_DEREF && token->type != CPP_PLUS_PLUS && token->type != CPP_MINUS_MINUS); /* If we have a template-id, then no further lookup is required. If the template-id was for a template-class, we will sometimes have a TYPE_DECL at this point. */ if (TREE_CODE (id_expression) == TEMPLATE_ID_EXPR || TREE_CODE (id_expression) == TYPE_DECL) decl = id_expression; /* Look up the name. */ else { tree ambiguous_decls; /* If we already know that this lookup is ambiguous, then we've already issued an error message; there's no reason to check again. */ if (id_expr_token->type == CPP_NAME && id_expr_token->error_reported) { cp_parser_simulate_error (parser); return error_mark_node; } decl = cp_parser_lookup_name (parser, id_expression, none_type, template_p, /*is_namespace=*/false, /*check_dependency=*/true, &ambiguous_decls, id_expr_token->location); /* If the lookup was ambiguous, an error will already have been issued. */ if (ambiguous_decls) return error_mark_node; /* In Objective-C++, we may have an Objective-C 2.0 dot-syntax for classes here. */ if (c_dialect_objc () && cp_lexer_peek_token (parser->lexer)->type == CPP_DOT && TREE_CODE (decl) == TYPE_DECL && objc_is_class_name (decl)) { tree component; cp_lexer_consume_token (parser->lexer); component = cp_parser_identifier (parser); if (component == error_mark_node) return error_mark_node; return objc_build_class_component_ref (id_expression, component); } /* In Objective-C++, an instance variable (ivar) may be preferred to whatever cp_parser_lookup_name() found. */ decl = objc_lookup_ivar (decl, id_expression); /* If name lookup gives us a SCOPE_REF, then the qualifying scope was dependent. */ if (TREE_CODE (decl) == SCOPE_REF) { /* At this point, we do not know if DECL is a valid integral constant expression. We assume that it is in fact such an expression, so that code like: template <int N> struct A { int a[B<N>::i]; }; is accepted. At template-instantiation time, we will check that B<N>::i is actually a constant. */ return decl; } /* Check to see if DECL is a local variable in a context where that is forbidden. */ if (parser->local_variables_forbidden_p && local_variable_p (decl)) { /* It might be that we only found DECL because we are trying to be generous with pre-ISO scoping rules. For example, consider: int i; void g() { for (int i = 0; i < 10; ++i) {} extern void f(int j = i); } Here, name look up will originally find the out of scope `i'. We need to issue a warning message, but then use the global `i'. */ decl = check_for_out_of_scope_variable (decl); if (local_variable_p (decl)) { error_at (id_expr_token->location, "local variable %qD may not appear in this context", decl); return error_mark_node; } } } decl = (finish_id_expression (id_expression, decl, parser->scope, idk, parser->integral_constant_expression_p, parser->allow_non_integral_constant_expression_p, &parser->non_integral_constant_expression_p, template_p, done, address_p, template_arg_p, &error_msg, id_expr_token->location)); if (error_msg) cp_parser_error (parser, error_msg); return decl; } /* Anything else is an error. */ default: cp_parser_error (parser, "expected primary-expression"); return error_mark_node; } } static inline tree cp_parser_primary_expression (cp_parser *parser, bool address_p, bool cast_p, bool template_arg_p, cp_id_kind *idk) { return cp_parser_primary_expression (parser, address_p, cast_p, template_arg_p, /*decltype*/false, idk); } /* Parse an id-expression. id-expression: unqualified-id qualified-id qualified-id: :: [opt] nested-name-specifier template [opt] unqualified-id :: identifier :: operator-function-id :: template-id Return a representation of the unqualified portion of the identifier. Sets PARSER->SCOPE to the qualifying scope if there is a `::' or nested-name-specifier. Often, if the id-expression was a qualified-id, the caller will want to make a SCOPE_REF to represent the qualified-id. This function does not do this in order to avoid wastefully creating SCOPE_REFs when they are not required. If TEMPLATE_KEYWORD_P is true, then we have just seen the `template' keyword. If CHECK_DEPENDENCY_P is false, then names are looked up inside uninstantiated templates. If *TEMPLATE_P is non-NULL, it is set to true iff the `template' keyword is used to explicitly indicate that the entity named is a template. If DECLARATOR_P is true, the id-expression is appearing as part of a declarator, rather than as part of an expression. */ static tree cp_parser_id_expression (cp_parser *parser, bool template_keyword_p, bool check_dependency_p, bool *template_p, bool declarator_p, bool optional_p) { bool global_scope_p; bool nested_name_specifier_p; /* Assume the `template' keyword was not used. */ if (template_p) *template_p = template_keyword_p; /* Look for the optional `::' operator. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the optional nested-name-specifier. */ nested_name_specifier_p = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, check_dependency_p, /*type_p=*/false, declarator_p) != NULL_TREE); /* If there is a nested-name-specifier, then we are looking at the first qualified-id production. */ if (nested_name_specifier_p) { tree saved_scope; tree saved_object_scope; tree saved_qualifying_scope; tree unqualified_id; bool is_template; /* See if the next token is the `template' keyword. */ if (!template_p) template_p = &is_template; *template_p = cp_parser_optional_template_keyword (parser); /* Name lookup we do during the processing of the unqualified-id might obliterate SCOPE. */ saved_scope = parser->scope; saved_object_scope = parser->object_scope; saved_qualifying_scope = parser->qualifying_scope; /* Process the final unqualified-id. */ unqualified_id = cp_parser_unqualified_id (parser, *template_p, check_dependency_p, declarator_p, /*optional_p=*/false); /* Restore the SAVED_SCOPE for our caller. */ parser->scope = saved_scope; parser->object_scope = saved_object_scope; parser->qualifying_scope = saved_qualifying_scope; return unqualified_id; } /* Otherwise, if we are in global scope, then we are looking at one of the other qualified-id productions. */ else if (global_scope_p) { cp_token *token; tree id; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's an identifier, and the next token is not a "<", then we can avoid the template-id case. This is an optimization for this common case. */ if (token->type == CPP_NAME && !cp_parser_nth_token_starts_template_argument_list_p (parser, 2)) return cp_parser_identifier (parser); cp_parser_parse_tentatively (parser); /* Try a template-id. */ id = cp_parser_template_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, none_type, declarator_p); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) return id; /* Peek at the next token. (Changes in the token buffer may have invalidated the pointer obtained above.) */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_NAME: return cp_parser_identifier (parser); case CPP_KEYWORD: if (token->keyword == RID_OPERATOR) return cp_parser_operator_function_id (parser); /* Fall through. */ default: cp_parser_error (parser, "expected id-expression"); return error_mark_node; } } else return cp_parser_unqualified_id (parser, template_keyword_p, /*check_dependency_p=*/true, declarator_p, optional_p); } /* Parse an unqualified-id. unqualified-id: identifier operator-function-id conversion-function-id ~ class-name template-id If TEMPLATE_KEYWORD_P is TRUE, we have just seen the `template' keyword, in a construct like `A::template ...'. Returns a representation of unqualified-id. For the `identifier' production, an IDENTIFIER_NODE is returned. For the `~ class-name' production a BIT_NOT_EXPR is returned; the operand of the BIT_NOT_EXPR is an IDENTIFIER_NODE for the class-name. For the other productions, see the documentation accompanying the corresponding parsing functions. If CHECK_DEPENDENCY_P is false, names are looked up in uninstantiated templates. If DECLARATOR_P is true, the unqualified-id is appearing as part of a declarator, rather than as part of an expression. */ static tree cp_parser_unqualified_id (cp_parser* parser, bool template_keyword_p, bool check_dependency_p, bool declarator_p, bool optional_p) { cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); switch ((int) token->type) { case CPP_NAME: { tree id; /* We don't know yet whether or not this will be a template-id. */ cp_parser_parse_tentatively (parser); /* Try a template-id. */ id = cp_parser_template_id (parser, template_keyword_p, check_dependency_p, none_type, declarator_p); /* If it worked, we're done. */ if (cp_parser_parse_definitely (parser)) return id; /* Otherwise, it's an ordinary identifier. */ return cp_parser_identifier (parser); } case CPP_TEMPLATE_ID: return cp_parser_template_id (parser, template_keyword_p, check_dependency_p, none_type, declarator_p); case CPP_COMPL: { tree type_decl; tree qualifying_scope; tree object_scope; tree scope; bool done; /* Consume the `~' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the class-name. The standard, as written, seems to say that: template <typename T> struct S { ~S (); }; template <typename T> S<T>::~S() {} is invalid, since `~' must be followed by a class-name, but `S<T>' is dependent, and so not known to be a class. That's not right; we need to look in uninstantiated templates. A further complication arises from: template <typename T> void f(T t) { t.T::~T(); } Here, it is not possible to look up `T' in the scope of `T' itself. We must look in both the current scope, and the scope of the containing complete expression. Yet another issue is: struct S { int S; ~S(); }; S::~S() {} The standard does not seem to say that the `S' in `~S' should refer to the type `S' and not the data member `S::S'. */ /* DR 244 says that we look up the name after the "~" in the same scope as we looked up the qualifying name. That idea isn't fully worked out; it's more complicated than that. */ scope = parser->scope; object_scope = parser->object_scope; qualifying_scope = parser->qualifying_scope; /* Check for invalid scopes. */ if (scope == error_mark_node) { if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) cp_lexer_consume_token (parser->lexer); return error_mark_node; } if (scope && TREE_CODE (scope) == NAMESPACE_DECL) { if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) error_at (token->location, "scope %qT before %<~%> is not a class-name", scope); cp_parser_simulate_error (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) cp_lexer_consume_token (parser->lexer); return error_mark_node; } gcc_assert (!scope || TYPE_P (scope)); /* If the name is of the form "X::~X" it's OK even if X is a typedef. */ token = cp_lexer_peek_token (parser->lexer); if (scope && token->type == CPP_NAME && (cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_LESS) && (token->u.value == TYPE_IDENTIFIER (scope) || (CLASS_TYPE_P (scope) && constructor_name_p (token->u.value, scope)))) { cp_lexer_consume_token (parser->lexer); return build_nt (BIT_NOT_EXPR, scope); } /* ~auto means the destructor of whatever the object is. */ if (cp_parser_is_keyword (token, RID_AUTO)) { if (cxx_dialect < cxx14) pedwarn (input_location, 0, "%<~auto%> only available with " "-std=c++14 or -std=gnu++14"); cp_lexer_consume_token (parser->lexer); return build_nt (BIT_NOT_EXPR, make_auto ()); } /* If there was an explicit qualification (S::~T), first look in the scope given by the qualification (i.e., S). Note: in the calls to cp_parser_class_name below we pass typename_type so that lookup finds the injected-class-name rather than the constructor. */ done = false; type_decl = NULL_TREE; if (scope) { cp_parser_parse_tentatively (parser); type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, typename_type, /*check_dependency=*/false, /*class_head_p=*/false, declarator_p); if (cp_parser_parse_definitely (parser)) done = true; } /* In "N::S::~S", look in "N" as well. */ if (!done && scope && qualifying_scope) { cp_parser_parse_tentatively (parser); parser->scope = qualifying_scope; parser->object_scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, typename_type, /*check_dependency=*/false, /*class_head_p=*/false, declarator_p); if (cp_parser_parse_definitely (parser)) done = true; } /* In "p->S::~T", look in the scope given by "*p" as well. */ else if (!done && object_scope) { cp_parser_parse_tentatively (parser); parser->scope = object_scope; parser->object_scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, typename_type, /*check_dependency=*/false, /*class_head_p=*/false, declarator_p); if (cp_parser_parse_definitely (parser)) done = true; } /* Look in the surrounding context. */ if (!done) { parser->scope = NULL_TREE; parser->object_scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; if (processing_template_decl) cp_parser_parse_tentatively (parser); type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, typename_type, /*check_dependency=*/false, /*class_head_p=*/false, declarator_p); if (processing_template_decl && ! cp_parser_parse_definitely (parser)) { /* We couldn't find a type with this name, so just accept it and check for a match at instantiation time. */ type_decl = cp_parser_identifier (parser); if (type_decl != error_mark_node) type_decl = build_nt (BIT_NOT_EXPR, type_decl); return type_decl; } } /* If an error occurred, assume that the name of the destructor is the same as the name of the qualifying class. That allows us to keep parsing after running into ill-formed destructor names. */ if (type_decl == error_mark_node && scope) return build_nt (BIT_NOT_EXPR, scope); else if (type_decl == error_mark_node) return error_mark_node; /* Check that destructor name and scope match. */ if (declarator_p && scope && !check_dtor_name (scope, type_decl)) { if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) error_at (token->location, "declaration of %<~%T%> as member of %qT", type_decl, scope); cp_parser_simulate_error (parser); return error_mark_node; } /* [class.dtor] A typedef-name that names a class shall not be used as the identifier in the declarator for a destructor declaration. */ if (declarator_p && !DECL_IMPLICIT_TYPEDEF_P (type_decl) && !DECL_SELF_REFERENCE_P (type_decl) && !cp_parser_uncommitted_to_tentative_parse_p (parser)) error_at (token->location, "typedef-name %qD used as destructor declarator", type_decl); return build_nt (BIT_NOT_EXPR, TREE_TYPE (type_decl)); } case CPP_KEYWORD: if (token->keyword == RID_OPERATOR) { tree id; /* This could be a template-id, so we try that first. */ cp_parser_parse_tentatively (parser); /* Try a template-id. */ id = cp_parser_template_id (parser, template_keyword_p, /*check_dependency_p=*/true, none_type, declarator_p); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) return id; /* We still don't know whether we're looking at an operator-function-id or a conversion-function-id. */ cp_parser_parse_tentatively (parser); /* Try an operator-function-id. */ id = cp_parser_operator_function_id (parser); /* If that didn't work, try a conversion-function-id. */ if (!cp_parser_parse_definitely (parser)) id = cp_parser_conversion_function_id (parser); else if (UDLIT_OPER_P (id)) { /* 17.6.3.3.5 */ const char *name = UDLIT_OP_SUFFIX (id); if (name[0] != '_' && !in_system_header_at (input_location) && declarator_p) warning (0, "literal operator suffixes not preceded by %<_%>" " are reserved for future standardization"); } return id; } /* Fall through. */ default: if (optional_p) return NULL_TREE; cp_parser_error (parser, "expected unqualified-id"); return error_mark_node; } } /* Parse an (optional) nested-name-specifier. nested-name-specifier: [C++98] class-or-namespace-name :: nested-name-specifier [opt] class-or-namespace-name :: template nested-name-specifier [opt] nested-name-specifier: [C++0x] type-name :: namespace-name :: nested-name-specifier identifier :: nested-name-specifier template [opt] simple-template-id :: PARSER->SCOPE should be set appropriately before this function is called. TYPENAME_KEYWORD_P is TRUE if the `typename' keyword is in effect. TYPE_P is TRUE if we non-type bindings should be ignored in name lookups. Sets PARSER->SCOPE to the class (TYPE) or namespace (NAMESPACE_DECL) specified by the nested-name-specifier, or leaves it unchanged if there is no nested-name-specifier. Returns the new scope iff there is a nested-name-specifier, or NULL_TREE otherwise. If IS_DECLARATION is TRUE, the nested-name-specifier is known to be part of a declaration and/or decl-specifier. */ static tree cp_parser_nested_name_specifier_opt (cp_parser *parser, bool typename_keyword_p, bool check_dependency_p, bool type_p, bool is_declaration) { bool success = false; cp_token_position start = 0; cp_token *token; /* Remember where the nested-name-specifier starts. */ if (cp_parser_uncommitted_to_tentative_parse_p (parser)) { start = cp_lexer_token_position (parser->lexer, false); push_deferring_access_checks (dk_deferred); } while (true) { tree new_scope; tree old_scope; tree saved_qualifying_scope; bool template_keyword_p; /* Spot cases that cannot be the beginning of a nested-name-specifier. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token is CPP_NESTED_NAME_SPECIFIER, just process the already parsed nested-name-specifier. */ if (token->type == CPP_NESTED_NAME_SPECIFIER) { /* Grab the nested-name-specifier and continue the loop. */ cp_parser_pre_parsed_nested_name_specifier (parser); /* If we originally encountered this nested-name-specifier with IS_DECLARATION set to false, we will not have resolved TYPENAME_TYPEs, so we must do so here. */ if (is_declaration && TREE_CODE (parser->scope) == TYPENAME_TYPE) { new_scope = resolve_typename_type (parser->scope, /*only_current_p=*/false); if (TREE_CODE (new_scope) != TYPENAME_TYPE) parser->scope = new_scope; } success = true; continue; } /* Spot cases that cannot be the beginning of a nested-name-specifier. On the second and subsequent times through the loop, we look for the `template' keyword. */ if (success && token->keyword == RID_TEMPLATE) ; /* A template-id can start a nested-name-specifier. */ else if (token->type == CPP_TEMPLATE_ID) ; /* DR 743: decltype can be used in a nested-name-specifier. */ else if (token_is_decltype (token)) ; else { /* If the next token is not an identifier, then it is definitely not a type-name or namespace-name. */ if (token->type != CPP_NAME) break; /* If the following token is neither a `<' (to begin a template-id), nor a `::', then we are not looking at a nested-name-specifier. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); if (token->type == CPP_COLON && parser->colon_corrects_to_scope_p && cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_NAME) { error_at (token->location, "found %<:%> in nested-name-specifier, expected %<::%>"); token->type = CPP_SCOPE; } if (token->type != CPP_SCOPE && !cp_parser_nth_token_starts_template_argument_list_p (parser, 2)) break; } /* The nested-name-specifier is optional, so we parse tentatively. */ cp_parser_parse_tentatively (parser); /* Look for the optional `template' keyword, if this isn't the first time through the loop. */ if (success) template_keyword_p = cp_parser_optional_template_keyword (parser); else template_keyword_p = false; /* Save the old scope since the name lookup we are about to do might destroy it. */ old_scope = parser->scope; saved_qualifying_scope = parser->qualifying_scope; /* In a declarator-id like "X<T>::I::Y<T>" we must be able to look up names in "X<T>::I" in order to determine that "Y" is a template. So, if we have a typename at this point, we make an effort to look through it. */ if (is_declaration && !typename_keyword_p && parser->scope && TREE_CODE (parser->scope) == TYPENAME_TYPE) parser->scope = resolve_typename_type (parser->scope, /*only_current_p=*/false); /* Parse the qualifying entity. */ new_scope = cp_parser_qualifying_entity (parser, typename_keyword_p, template_keyword_p, check_dependency_p, type_p, is_declaration); /* Look for the `::' token. */ cp_parser_require (parser, CPP_SCOPE, RT_SCOPE); /* If we found what we wanted, we keep going; otherwise, we're done. */ if (!cp_parser_parse_definitely (parser)) { bool error_p = false; /* Restore the OLD_SCOPE since it was valid before the failed attempt at finding the last class-or-namespace-name. */ parser->scope = old_scope; parser->qualifying_scope = saved_qualifying_scope; /* If the next token is a decltype, and the one after that is a `::', then the decltype has failed to resolve to a class or enumeration type. Give this error even when parsing tentatively since it can't possibly be valid--and we're going to replace it with a CPP_NESTED_NAME_SPECIFIER below, so we won't get another chance.*/ if (cp_lexer_next_token_is (parser->lexer, CPP_DECLTYPE) && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_SCOPE)) { token = cp_lexer_consume_token (parser->lexer); error_at (token->location, "decltype evaluates to %qT, " "which is not a class or enumeration type", token->u.value); parser->scope = error_mark_node; error_p = true; /* As below. */ success = true; cp_lexer_consume_token (parser->lexer); } if (cp_lexer_next_token_is (parser->lexer, CPP_TEMPLATE_ID) && cp_lexer_nth_token_is (parser->lexer, 2, CPP_SCOPE)) { /* If we have a non-type template-id followed by ::, it can't possibly be valid. */ token = cp_lexer_peek_token (parser->lexer); tree tid = token->u.tree_check_value->value; if (TREE_CODE (tid) == TEMPLATE_ID_EXPR && TREE_CODE (TREE_OPERAND (tid, 0)) != IDENTIFIER_NODE) { tree tmpl = NULL_TREE; if (is_overloaded_fn (tid)) { tree fns = get_fns (tid); if (!OVL_CHAIN (fns)) tmpl = OVL_CURRENT (fns); error_at (token->location, "function template-id %qD " "in nested-name-specifier", tid); } else { /* Variable template. */ tmpl = TREE_OPERAND (tid, 0); gcc_assert (variable_template_p (tmpl)); error_at (token->location, "variable template-id %qD " "in nested-name-specifier", tid); } if (tmpl) inform (DECL_SOURCE_LOCATION (tmpl), "%qD declared here", tmpl); parser->scope = error_mark_node; error_p = true; /* As below. */ success = true; cp_lexer_consume_token (parser->lexer); cp_lexer_consume_token (parser->lexer); } } if (cp_parser_uncommitted_to_tentative_parse_p (parser)) break; /* If the next token is an identifier, and the one after that is a `::', then any valid interpretation would have found a class-or-namespace-name. */ while (cp_lexer_next_token_is (parser->lexer, CPP_NAME) && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_SCOPE) && (cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_COMPL)) { token = cp_lexer_consume_token (parser->lexer); if (!error_p) { if (!token->error_reported) { tree decl; tree ambiguous_decls; decl = cp_parser_lookup_name (parser, token->u.value, none_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, &ambiguous_decls, token->location); if (TREE_CODE (decl) == TEMPLATE_DECL) error_at (token->location, "%qD used without template parameters", decl); else if (ambiguous_decls) { // cp_parser_lookup_name has the same diagnostic, // thus make sure to emit it at most once. if (cp_parser_uncommitted_to_tentative_parse_p (parser)) { error_at (token->location, "reference to %qD is ambiguous", token->u.value); print_candidates (ambiguous_decls); } decl = error_mark_node; } else { if (cxx_dialect != cxx98) cp_parser_name_lookup_error (parser, token->u.value, decl, NLE_NOT_CXX98, token->location); else cp_parser_name_lookup_error (parser, token->u.value, decl, NLE_CXX98, token->location); } } parser->scope = error_mark_node; error_p = true; /* Treat this as a successful nested-name-specifier due to: [basic.lookup.qual] If the name found is not a class-name (clause _class_) or namespace-name (_namespace.def_), the program is ill-formed. */ success = true; } cp_lexer_consume_token (parser->lexer); } break; } /* We've found one valid nested-name-specifier. */ success = true; /* Name lookup always gives us a DECL. */ if (TREE_CODE (new_scope) == TYPE_DECL) new_scope = TREE_TYPE (new_scope); /* Uses of "template" must be followed by actual templates. */ if (template_keyword_p && !(CLASS_TYPE_P (new_scope) && ((CLASSTYPE_USE_TEMPLATE (new_scope) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (new_scope))) || CLASSTYPE_IS_TEMPLATE (new_scope))) && !(TREE_CODE (new_scope) == TYPENAME_TYPE && (TREE_CODE (TYPENAME_TYPE_FULLNAME (new_scope)) == TEMPLATE_ID_EXPR))) permerror (input_location, TYPE_P (new_scope) ? G_("%qT is not a template") : G_("%qD is not a template"), new_scope); /* If it is a class scope, try to complete it; we are about to be looking up names inside the class. */ if (TYPE_P (new_scope) /* Since checking types for dependency can be expensive, avoid doing it if the type is already complete. */ && !COMPLETE_TYPE_P (new_scope) /* Do not try to complete dependent types. */ && !dependent_type_p (new_scope)) { new_scope = complete_type (new_scope); /* If it is a typedef to current class, use the current class instead, as the typedef won't have any names inside it yet. */ if (!COMPLETE_TYPE_P (new_scope) && currently_open_class (new_scope)) new_scope = TYPE_MAIN_VARIANT (new_scope); } /* Make sure we look in the right scope the next time through the loop. */ parser->scope = new_scope; } /* If parsing tentatively, replace the sequence of tokens that makes up the nested-name-specifier with a CPP_NESTED_NAME_SPECIFIER token. That way, should we re-parse the token stream, we will not have to repeat the effort required to do the parse, nor will we issue duplicate error messages. */ if (success && start) { cp_token *token; token = cp_lexer_token_at (parser->lexer, start); /* Reset the contents of the START token. */ token->type = CPP_NESTED_NAME_SPECIFIER; /* Retrieve any deferred checks. Do not pop this access checks yet so the memory will not be reclaimed during token replacing below. */ token->u.tree_check_value = ggc_cleared_alloc<struct tree_check> (); token->u.tree_check_value->value = parser->scope; token->u.tree_check_value->checks = get_deferred_access_checks (); token->u.tree_check_value->qualifying_scope = parser->qualifying_scope; token->keyword = RID_MAX; /* Purge all subsequent tokens. */ cp_lexer_purge_tokens_after (parser->lexer, start); } if (start) pop_to_parent_deferring_access_checks (); return success ? parser->scope : NULL_TREE; } /* Parse a nested-name-specifier. See cp_parser_nested_name_specifier_opt for details. This function behaves identically, except that it will an issue an error if no nested-name-specifier is present. */ static tree cp_parser_nested_name_specifier (cp_parser *parser, bool typename_keyword_p, bool check_dependency_p, bool type_p, bool is_declaration) { tree scope; /* Look for the nested-name-specifier. */ scope = cp_parser_nested_name_specifier_opt (parser, typename_keyword_p, check_dependency_p, type_p, is_declaration); /* If it was not present, issue an error message. */ if (!scope) { cp_parser_error (parser, "expected nested-name-specifier"); parser->scope = NULL_TREE; } return scope; } /* Parse the qualifying entity in a nested-name-specifier. For C++98, this is either a class-name or a namespace-name (which corresponds to the class-or-namespace-name production in the grammar). For C++0x, it can also be a type-name that refers to an enumeration type or a simple-template-id. TYPENAME_KEYWORD_P is TRUE iff the `typename' keyword is in effect. TEMPLATE_KEYWORD_P is TRUE iff the `template' keyword is in effect. CHECK_DEPENDENCY_P is FALSE iff dependent names should be looked up. TYPE_P is TRUE iff the next name should be taken as a class-name, even the same name is declared to be another entity in the same scope. Returns the class (TYPE_DECL) or namespace (NAMESPACE_DECL) specified by the class-or-namespace-name. If neither is found the ERROR_MARK_NODE is returned. */ static tree cp_parser_qualifying_entity (cp_parser *parser, bool typename_keyword_p, bool template_keyword_p, bool check_dependency_p, bool type_p, bool is_declaration) { tree saved_scope; tree saved_qualifying_scope; tree saved_object_scope; tree scope; bool only_class_p; bool successful_parse_p; /* DR 743: decltype can appear in a nested-name-specifier. */ if (cp_lexer_next_token_is_decltype (parser->lexer)) { scope = cp_parser_decltype (parser); if (TREE_CODE (scope) != ENUMERAL_TYPE && !MAYBE_CLASS_TYPE_P (scope)) { cp_parser_simulate_error (parser); return error_mark_node; } if (TYPE_NAME (scope)) scope = TYPE_NAME (scope); return scope; } /* Before we try to parse the class-name, we must save away the current PARSER->SCOPE since cp_parser_class_name will destroy it. */ saved_scope = parser->scope; saved_qualifying_scope = parser->qualifying_scope; saved_object_scope = parser->object_scope; /* Try for a class-name first. If the SAVED_SCOPE is a type, then there is no need to look for a namespace-name. */ only_class_p = template_keyword_p || (saved_scope && TYPE_P (saved_scope) && cxx_dialect == cxx98); if (!only_class_p) cp_parser_parse_tentatively (parser); scope = cp_parser_class_name (parser, typename_keyword_p, template_keyword_p, type_p ? class_type : none_type, check_dependency_p, /*class_head_p=*/false, is_declaration); successful_parse_p = only_class_p || cp_parser_parse_definitely (parser); /* If that didn't work and we're in C++0x mode, try for a type-name. */ if (!only_class_p && cxx_dialect != cxx98 && !successful_parse_p) { /* Restore the saved scope. */ parser->scope = saved_scope; parser->qualifying_scope = saved_qualifying_scope; parser->object_scope = saved_object_scope; /* Parse tentatively. */ cp_parser_parse_tentatively (parser); /* Parse a type-name */ scope = cp_parser_type_name (parser); /* "If the name found does not designate a namespace or a class, enumeration, or dependent type, the program is ill-formed." We cover classes and dependent types above and namespaces below, so this code is only looking for enums. */ if (!scope || TREE_CODE (scope) != TYPE_DECL || TREE_CODE (TREE_TYPE (scope)) != ENUMERAL_TYPE) cp_parser_simulate_error (parser); successful_parse_p = cp_parser_parse_definitely (parser); } /* If that didn't work, try for a namespace-name. */ if (!only_class_p && !successful_parse_p) { /* Restore the saved scope. */ parser->scope = saved_scope; parser->qualifying_scope = saved_qualifying_scope; parser->object_scope = saved_object_scope; /* If we are not looking at an identifier followed by the scope resolution operator, then this is not part of a nested-name-specifier. (Note that this function is only used to parse the components of a nested-name-specifier.) */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME) || cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE) return error_mark_node; scope = cp_parser_namespace_name (parser); } return scope; } /* Return true if we are looking at a compound-literal, false otherwise. */ static bool cp_parser_compound_literal_p (cp_parser *parser) { /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); cp_lexer_save_tokens (parser->lexer); /* Skip tokens until the next token is a closing parenthesis. If we find the closing `)', and the next token is a `{', then we are looking at a compound-literal. */ bool compound_literal_p = (cp_parser_skip_to_closing_parenthesis (parser, false, false, /*consume_paren=*/true) && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)); /* Roll back the tokens we skipped. */ cp_lexer_rollback_tokens (parser->lexer); return compound_literal_p; } /* Parse a postfix-expression. postfix-expression: primary-expression postfix-expression [ expression ] postfix-expression ( expression-list [opt] ) simple-type-specifier ( expression-list [opt] ) typename :: [opt] nested-name-specifier identifier ( expression-list [opt] ) typename :: [opt] nested-name-specifier template [opt] template-id ( expression-list [opt] ) postfix-expression . template [opt] id-expression postfix-expression -> template [opt] id-expression postfix-expression . pseudo-destructor-name postfix-expression -> pseudo-destructor-name postfix-expression ++ postfix-expression -- dynamic_cast < type-id > ( expression ) static_cast < type-id > ( expression ) reinterpret_cast < type-id > ( expression ) const_cast < type-id > ( expression ) typeid ( expression ) typeid ( type-id ) GNU Extension: postfix-expression: ( type-id ) { initializer-list , [opt] } This extension is a GNU version of the C99 compound-literal construct. (The C99 grammar uses `type-name' instead of `type-id', but they are essentially the same concept.) If ADDRESS_P is true, the postfix expression is the operand of the `&' operator. CAST_P is true if this expression is the target of a cast. If MEMBER_ACCESS_ONLY_P, we only allow postfix expressions that are class member access expressions [expr.ref]. Returns a representation of the expression. */ static tree cp_parser_postfix_expression (cp_parser *parser, bool address_p, bool cast_p, bool member_access_only_p, bool decltype_p, cp_id_kind * pidk_return) { cp_token *token; location_t loc; enum rid keyword; cp_id_kind idk = CP_ID_KIND_NONE; tree postfix_expression = NULL_TREE; bool is_member_access = false; int saved_in_statement = -1; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); loc = token->location; /* Some of the productions are determined by keywords. */ keyword = token->keyword; switch (keyword) { case RID_DYNCAST: case RID_STATCAST: case RID_REINTCAST: case RID_CONSTCAST: { tree type; tree expression; const char *saved_message; bool saved_in_type_id_in_expr_p; /* All of these can be handled in the same way from the point of view of parsing. Begin by consuming the token identifying the cast. */ cp_lexer_consume_token (parser->lexer); /* New types cannot be defined in the cast. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in casts"); /* Look for the opening `<'. */ cp_parser_require (parser, CPP_LESS, RT_LESS); /* Parse the type to which we are casting. */ saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; type = cp_parser_type_id (parser); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; /* Look for the closing `>'. */ cp_parser_require (parser, CPP_GREATER, RT_GREATER); /* Restore the old message. */ parser->type_definition_forbidden_message = saved_message; bool saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = true; /* And the expression which is being cast. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); expression = cp_parser_expression (parser, & idk, /*cast_p=*/true); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; /* Only type conversions to integral or enumeration types can be used in constant-expressions. */ if (!cast_valid_in_integral_constant_expression_p (type) && cp_parser_non_integral_constant_expression (parser, NIC_CAST)) return error_mark_node; switch (keyword) { case RID_DYNCAST: postfix_expression = build_dynamic_cast (type, expression, tf_warning_or_error); break; case RID_STATCAST: postfix_expression = build_static_cast (type, expression, tf_warning_or_error); break; case RID_REINTCAST: postfix_expression = build_reinterpret_cast (type, expression, tf_warning_or_error); break; case RID_CONSTCAST: postfix_expression = build_const_cast (type, expression, tf_warning_or_error); break; default: gcc_unreachable (); } } break; case RID_TYPEID: { tree type; const char *saved_message; bool saved_in_type_id_in_expr_p; /* Consume the `typeid' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the `(' token. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Types cannot be defined in a `typeid' expression. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in a %<typeid%> expression"); /* We can't be sure yet whether we're looking at a type-id or an expression. */ cp_parser_parse_tentatively (parser); /* Try a type-id first. */ saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; type = cp_parser_type_id (parser); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; /* Look for the `)' token. Otherwise, we can't be sure that we're not looking at an expression: consider `typeid (int (3))', for example. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* If all went well, simply lookup the type-id. */ if (cp_parser_parse_definitely (parser)) postfix_expression = get_typeid (type, tf_warning_or_error); /* Otherwise, fall back to the expression variant. */ else { tree expression; /* Look for an expression. */ expression = cp_parser_expression (parser, & idk); /* Compute its typeid. */ postfix_expression = build_typeid (expression, tf_warning_or_error); /* Look for the `)' token. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); } /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; /* `typeid' may not appear in an integral constant expression. */ if (cp_parser_non_integral_constant_expression (parser, NIC_TYPEID)) return error_mark_node; } break; case RID_TYPENAME: { tree type; /* The syntax permitted here is the same permitted for an elaborated-type-specifier. */ type = cp_parser_elaborated_type_specifier (parser, /*is_friend=*/false, /*is_declaration=*/false); postfix_expression = cp_parser_functional_cast (parser, type); } break; case RID_CILK_SPAWN: { cp_lexer_consume_token (parser->lexer); token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_SEMICOLON) { error_at (token->location, "%<_Cilk_spawn%> must be followed by " "an expression"); postfix_expression = error_mark_node; break; } else if (!current_function_decl) { error_at (token->location, "%<_Cilk_spawn%> may only be used " "inside a function"); postfix_expression = error_mark_node; break; } else { /* Consecutive _Cilk_spawns are not allowed in a statement. */ saved_in_statement = parser->in_statement; parser->in_statement |= IN_CILK_SPAWN; } cfun->calls_cilk_spawn = 1; postfix_expression = cp_parser_postfix_expression (parser, false, false, false, false, &idk); if (!flag_cilkplus) { error_at (token->location, "-fcilkplus must be enabled to use" " %<_Cilk_spawn%>"); cfun->calls_cilk_spawn = 0; } else if (saved_in_statement & IN_CILK_SPAWN) { error_at (token->location, "consecutive %<_Cilk_spawn%> keywords " "are not permitted"); postfix_expression = error_mark_node; cfun->calls_cilk_spawn = 0; } else { postfix_expression = build_cilk_spawn (token->location, postfix_expression); if (postfix_expression != error_mark_node) SET_EXPR_LOCATION (postfix_expression, input_location); parser->in_statement = parser->in_statement & ~IN_CILK_SPAWN; } break; } case RID_BUILTIN_SHUFFLE: { vec<tree, va_gc> *vec; unsigned int i; tree p; cp_lexer_consume_token (parser->lexer); vec = cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL); if (vec == NULL) return error_mark_node; FOR_EACH_VEC_ELT (*vec, i, p) mark_exp_read (p); if (vec->length () == 2) return build_x_vec_perm_expr (loc, (*vec)[0], NULL_TREE, (*vec)[1], tf_warning_or_error); else if (vec->length () == 3) return build_x_vec_perm_expr (loc, (*vec)[0], (*vec)[1], (*vec)[2], tf_warning_or_error); else { error_at (loc, "wrong number of arguments to " "%<__builtin_shuffle%>"); return error_mark_node; } break; } default: { tree type; /* If the next thing is a simple-type-specifier, we may be looking at a functional cast. We could also be looking at an id-expression. So, we try the functional cast, and if that doesn't work we fall back to the primary-expression. */ cp_parser_parse_tentatively (parser); /* Look for the simple-type-specifier. */ type = cp_parser_simple_type_specifier (parser, /*decl_specs=*/NULL, CP_PARSER_FLAGS_NONE); /* Parse the cast itself. */ if (!cp_parser_error_occurred (parser)) postfix_expression = cp_parser_functional_cast (parser, type); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) break; /* If the functional-cast didn't work out, try a compound-literal. */ if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { tree initializer = NULL_TREE; cp_parser_parse_tentatively (parser); /* Avoid calling cp_parser_type_id pointlessly, see comment in cp_parser_cast_expression about c++/29234. */ if (!cp_parser_compound_literal_p (parser)) cp_parser_simulate_error (parser); else { /* Parse the type. */ bool saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; type = cp_parser_type_id (parser); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); } /* If things aren't going well, there's no need to keep going. */ if (!cp_parser_error_occurred (parser)) { bool non_constant_p; /* Parse the brace-enclosed initializer list. */ initializer = cp_parser_braced_list (parser, &non_constant_p); } /* If that worked, we're definitely looking at a compound-literal expression. */ if (cp_parser_parse_definitely (parser)) { /* Warn the user that a compound literal is not allowed in standard C++. */ pedwarn (input_location, OPT_Wpedantic, "ISO C++ forbids compound-literals"); /* For simplicity, we disallow compound literals in constant-expressions. We could allow compound literals of integer type, whose initializer was a constant, in constant expressions. Permitting that usage, as a further extension, would not change the meaning of any currently accepted programs. (Of course, as compound literals are not part of ISO C++, the standard has nothing to say.) */ if (cp_parser_non_integral_constant_expression (parser, NIC_NCC)) { postfix_expression = error_mark_node; break; } /* Form the representation of the compound-literal. */ postfix_expression = finish_compound_literal (type, initializer, tf_warning_or_error); break; } } /* It must be a primary-expression. */ postfix_expression = cp_parser_primary_expression (parser, address_p, cast_p, /*template_arg_p=*/false, decltype_p, &idk); } break; } /* Note that we don't need to worry about calling build_cplus_new on a class-valued CALL_EXPR in decltype when it isn't the end of the postfix-expression; unary_complex_lvalue will take care of that for all these cases. */ /* Keep looping until the postfix-expression is complete. */ while (true) { if (idk == CP_ID_KIND_UNQUALIFIED && identifier_p (postfix_expression) && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN)) /* It is not a Koenig lookup function call. */ postfix_expression = unqualified_name_lookup_error (postfix_expression); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_OPEN_SQUARE: if (cp_next_tokens_can_be_std_attribute_p (parser)) { cp_parser_error (parser, "two consecutive %<[%> shall " "only introduce an attribute"); return error_mark_node; } postfix_expression = cp_parser_postfix_open_square_expression (parser, postfix_expression, false, decltype_p); idk = CP_ID_KIND_NONE; is_member_access = false; break; case CPP_OPEN_PAREN: /* postfix-expression ( expression-list [opt] ) */ { bool koenig_p; bool is_builtin_constant_p; bool saved_integral_constant_expression_p = false; bool saved_non_integral_constant_expression_p = false; tsubst_flags_t complain = complain_flags (decltype_p); vec<tree, va_gc> *args; is_member_access = false; is_builtin_constant_p = DECL_IS_BUILTIN_CONSTANT_P (postfix_expression); if (is_builtin_constant_p) { /* The whole point of __builtin_constant_p is to allow non-constant expressions to appear as arguments. */ saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; parser->integral_constant_expression_p = false; } args = (cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL, /*want_literal_zero_p=*/warn_memset_transposed_args)); if (is_builtin_constant_p) { parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; } if (args == NULL) { postfix_expression = error_mark_node; break; } /* Function calls are not permitted in constant-expressions. */ if (! builtin_valid_in_constant_expr_p (postfix_expression) && cp_parser_non_integral_constant_expression (parser, NIC_FUNC_CALL)) { postfix_expression = error_mark_node; release_tree_vector (args); break; } koenig_p = false; if (idk == CP_ID_KIND_UNQUALIFIED || idk == CP_ID_KIND_TEMPLATE_ID) { if (identifier_p (postfix_expression)) { if (!args->is_empty ()) { koenig_p = true; if (!any_type_dependent_arguments_p (args)) postfix_expression = perform_koenig_lookup (postfix_expression, args, complain); } else postfix_expression = unqualified_fn_lookup_error (postfix_expression); } /* We do not perform argument-dependent lookup if normal lookup finds a non-function, in accordance with the expected resolution of DR 218. */ else if (!args->is_empty () && is_overloaded_fn (postfix_expression)) { tree fn = get_first_fn (postfix_expression); fn = STRIP_TEMPLATE (fn); /* Do not do argument dependent lookup if regular lookup finds a member function or a block-scope function declaration. [basic.lookup.argdep]/3 */ if (!DECL_FUNCTION_MEMBER_P (fn) && !DECL_LOCAL_FUNCTION_P (fn)) { koenig_p = true; if (!any_type_dependent_arguments_p (args)) postfix_expression = perform_koenig_lookup (postfix_expression, args, complain); } } } if (warn_memset_transposed_args) { if (TREE_CODE (postfix_expression) == FUNCTION_DECL && DECL_BUILT_IN_CLASS (postfix_expression) == BUILT_IN_NORMAL && DECL_FUNCTION_CODE (postfix_expression) == BUILT_IN_MEMSET && vec_safe_length (args) == 3 && integer_zerop ((*args)[2]) && LITERAL_ZERO_P ((*args)[2]) && !(integer_zerop ((*args)[1]) && LITERAL_ZERO_P ((*args)[1]))) warning (OPT_Wmemset_transposed_args, "%<memset%> used with constant zero length " "parameter; this could be due to transposed " "parameters"); /* Replace LITERAL_ZERO_P INTEGER_CSTs with normal ones to avoid leaking those into folder and middle-end. */ unsigned int i; tree arg; FOR_EACH_VEC_SAFE_ELT (args, i, arg) if (TREE_CODE (arg) == INTEGER_CST && LITERAL_ZERO_P (arg)) (*args)[i] = build_int_cst (TREE_TYPE (arg), 0); } if (TREE_CODE (postfix_expression) == COMPONENT_REF) { tree instance = TREE_OPERAND (postfix_expression, 0); tree fn = TREE_OPERAND (postfix_expression, 1); if (processing_template_decl && (type_dependent_expression_p (instance) || (!BASELINK_P (fn) && TREE_CODE (fn) != FIELD_DECL) || type_dependent_expression_p (fn) || any_type_dependent_arguments_p (args))) { postfix_expression = build_nt_call_vec (postfix_expression, args); release_tree_vector (args); break; } if (BASELINK_P (fn)) { postfix_expression = (build_new_method_call (instance, fn, &args, NULL_TREE, (idk == CP_ID_KIND_QUALIFIED ? LOOKUP_NORMAL|LOOKUP_NONVIRTUAL : LOOKUP_NORMAL), /*fn_p=*/NULL, complain)); } else postfix_expression = finish_call_expr (postfix_expression, &args, /*disallow_virtual=*/false, /*koenig_p=*/false, complain); } else if (TREE_CODE (postfix_expression) == OFFSET_REF || TREE_CODE (postfix_expression) == MEMBER_REF || TREE_CODE (postfix_expression) == DOTSTAR_EXPR) postfix_expression = (build_offset_ref_call_from_tree (postfix_expression, &args, complain)); else if (idk == CP_ID_KIND_QUALIFIED) /* A call to a static class member, or a namespace-scope function. */ postfix_expression = finish_call_expr (postfix_expression, &args, /*disallow_virtual=*/true, koenig_p, complain); else /* All other function calls. */ postfix_expression = finish_call_expr (postfix_expression, &args, /*disallow_virtual=*/false, koenig_p, complain); protected_set_expr_location (postfix_expression, token->location); /* The POSTFIX_EXPRESSION is certainly no longer an id. */ idk = CP_ID_KIND_NONE; release_tree_vector (args); } break; case CPP_DOT: case CPP_DEREF: /* postfix-expression . template [opt] id-expression postfix-expression . pseudo-destructor-name postfix-expression -> template [opt] id-expression postfix-expression -> pseudo-destructor-name */ /* Consume the `.' or `->' operator. */ cp_lexer_consume_token (parser->lexer); postfix_expression = cp_parser_postfix_dot_deref_expression (parser, token->type, postfix_expression, false, &idk, loc); is_member_access = true; break; case CPP_PLUS_PLUS: /* postfix-expression ++ */ /* Consume the `++' token. */ cp_lexer_consume_token (parser->lexer); /* Generate a representation for the complete expression. */ postfix_expression = finish_increment_expr (postfix_expression, POSTINCREMENT_EXPR); /* Increments may not appear in constant-expressions. */ if (cp_parser_non_integral_constant_expression (parser, NIC_INC)) postfix_expression = error_mark_node; idk = CP_ID_KIND_NONE; is_member_access = false; break; case CPP_MINUS_MINUS: /* postfix-expression -- */ /* Consume the `--' token. */ cp_lexer_consume_token (parser->lexer); /* Generate a representation for the complete expression. */ postfix_expression = finish_increment_expr (postfix_expression, POSTDECREMENT_EXPR); /* Decrements may not appear in constant-expressions. */ if (cp_parser_non_integral_constant_expression (parser, NIC_DEC)) postfix_expression = error_mark_node; idk = CP_ID_KIND_NONE; is_member_access = false; break; default: if (pidk_return != NULL) * pidk_return = idk; if (member_access_only_p) return is_member_access? postfix_expression : error_mark_node; else return postfix_expression; } } /* We should never get here. */ gcc_unreachable (); return error_mark_node; } /* This function parses Cilk Plus array notations. If a normal array expr. is parsed then the array index is passed back to the caller through *INIT_INDEX and the function returns a NULL_TREE. If array notation expr. is parsed, then *INIT_INDEX is ignored by the caller and the function returns a tree of type ARRAY_NOTATION_REF. If some error occurred it returns error_mark_node. */ static tree cp_parser_array_notation (location_t loc, cp_parser *parser, tree *init_index, tree array_value) { cp_token *token = NULL; tree length_index, stride = NULL_TREE, value_tree, array_type; if (!array_value || array_value == error_mark_node) { cp_parser_skip_to_end_of_statement (parser); return error_mark_node; } array_type = TREE_TYPE (array_value); bool saved_colon_corrects = parser->colon_corrects_to_scope_p; parser->colon_corrects_to_scope_p = false; token = cp_lexer_peek_token (parser->lexer); if (!token) { cp_parser_error (parser, "expected %<:%> or numeral"); return error_mark_node; } else if (token->type == CPP_COLON) { /* Consume the ':'. */ cp_lexer_consume_token (parser->lexer); /* If we are here, then we have a case like this A[:]. */ if (cp_lexer_peek_token (parser->lexer)->type != CPP_CLOSE_SQUARE) { cp_parser_error (parser, "expected %<]%>"); cp_parser_skip_to_end_of_statement (parser); return error_mark_node; } *init_index = NULL_TREE; stride = NULL_TREE; length_index = NULL_TREE; } else { /* If we are here, then there are three valid possibilities: 1. ARRAY [ EXP ] 2. ARRAY [ EXP : EXP ] 3. ARRAY [ EXP : EXP : EXP ] */ *init_index = cp_parser_expression (parser); if (cp_lexer_peek_token (parser->lexer)->type != CPP_COLON) { /* This indicates that we have a normal array expression. */ parser->colon_corrects_to_scope_p = saved_colon_corrects; return NULL_TREE; } /* Consume the ':'. */ cp_lexer_consume_token (parser->lexer); length_index = cp_parser_expression (parser); if (cp_lexer_peek_token (parser->lexer)->type == CPP_COLON) { cp_lexer_consume_token (parser->lexer); stride = cp_parser_expression (parser); } } parser->colon_corrects_to_scope_p = saved_colon_corrects; if (*init_index == error_mark_node || length_index == error_mark_node || stride == error_mark_node || array_type == error_mark_node) { if (cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_SQUARE) cp_lexer_consume_token (parser->lexer); return error_mark_node; } cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); value_tree = build_array_notation_ref (loc, array_value, *init_index, length_index, stride, array_type); return value_tree; } /* A subroutine of cp_parser_postfix_expression that also gets hijacked by cp_parser_builtin_offsetof. We're looking for postfix-expression [ expression ] postfix-expression [ braced-init-list ] (C++11) FOR_OFFSETOF is set if we're being called in that context, which changes how we deal with integer constant expressions. */ static tree cp_parser_postfix_open_square_expression (cp_parser *parser, tree postfix_expression, bool for_offsetof, bool decltype_p) { tree index = NULL_TREE; location_t loc = cp_lexer_peek_token (parser->lexer)->location; bool saved_greater_than_is_operator_p; /* Consume the `[' token. */ cp_lexer_consume_token (parser->lexer); saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = true; /* Parse the index expression. */ /* ??? For offsetof, there is a question of what to allow here. If offsetof is not being used in an integral constant expression context, then we *could* get the right answer by computing the value at runtime. If we are in an integral constant expression context, then we might could accept any constant expression; hard to say without analysis. Rather than open the barn door too wide right away, allow only integer constant expressions here. */ if (for_offsetof) index = cp_parser_constant_expression (parser); else { if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { bool expr_nonconst_p; cp_lexer_set_source_position (parser->lexer); maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); index = cp_parser_braced_list (parser, &expr_nonconst_p); if (flag_cilkplus && cp_lexer_peek_token (parser->lexer)->type == CPP_COLON) { error_at (cp_lexer_peek_token (parser->lexer)->location, "braced list index is not allowed with array " "notation"); cp_parser_skip_to_end_of_statement (parser); return error_mark_node; } } else if (flag_cilkplus) { /* Here are have these two options: ARRAY[EXP : EXP] - Array notation expr with default stride of 1. ARRAY[EXP : EXP : EXP] - Array Notation with user-defined stride. */ tree an_exp = cp_parser_array_notation (loc, parser, &index, postfix_expression); if (an_exp) return an_exp; } else index = cp_parser_expression (parser); } parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; /* Look for the closing `]'. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); /* Build the ARRAY_REF. */ postfix_expression = grok_array_decl (loc, postfix_expression, index, decltype_p); /* When not doing offsetof, array references are not permitted in constant-expressions. */ if (!for_offsetof && (cp_parser_non_integral_constant_expression (parser, NIC_ARRAY_REF))) postfix_expression = error_mark_node; return postfix_expression; } /* A subroutine of cp_parser_postfix_expression that also gets hijacked by cp_parser_builtin_offsetof. We're looking for postfix-expression . template [opt] id-expression postfix-expression . pseudo-destructor-name postfix-expression -> template [opt] id-expression postfix-expression -> pseudo-destructor-name FOR_OFFSETOF is set if we're being called in that context. That sorta limits what of the above we'll actually accept, but nevermind. TOKEN_TYPE is the "." or "->" token, which will already have been removed from the stream. */ static tree cp_parser_postfix_dot_deref_expression (cp_parser *parser, enum cpp_ttype token_type, tree postfix_expression, bool for_offsetof, cp_id_kind *idk, location_t location) { tree name; bool dependent_p; bool pseudo_destructor_p; tree scope = NULL_TREE; /* If this is a `->' operator, dereference the pointer. */ if (token_type == CPP_DEREF) postfix_expression = build_x_arrow (location, postfix_expression, tf_warning_or_error); /* Check to see whether or not the expression is type-dependent. */ dependent_p = type_dependent_expression_p (postfix_expression); /* The identifier following the `->' or `.' is not qualified. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; *idk = CP_ID_KIND_NONE; /* Enter the scope corresponding to the type of the object given by the POSTFIX_EXPRESSION. */ if (!dependent_p && TREE_TYPE (postfix_expression) != NULL_TREE) { scope = TREE_TYPE (postfix_expression); /* According to the standard, no expression should ever have reference type. Unfortunately, we do not currently match the standard in this respect in that our internal representation of an expression may have reference type even when the standard says it does not. Therefore, we have to manually obtain the underlying type here. */ scope = non_reference (scope); /* The type of the POSTFIX_EXPRESSION must be complete. */ if (scope == unknown_type_node) { error_at (location, "%qE does not have class type", postfix_expression); scope = NULL_TREE; } /* Unlike the object expression in other contexts, *this is not required to be of complete type for purposes of class member access (5.2.5) outside the member function body. */ else if (postfix_expression != current_class_ref && !(processing_template_decl && scope == current_class_type)) scope = complete_type_or_else (scope, NULL_TREE); /* Let the name lookup machinery know that we are processing a class member access expression. */ parser->context->object_type = scope; /* If something went wrong, we want to be able to discern that case, as opposed to the case where there was no SCOPE due to the type of expression being dependent. */ if (!scope) scope = error_mark_node; /* If the SCOPE was erroneous, make the various semantic analysis functions exit quickly -- and without issuing additional error messages. */ if (scope == error_mark_node) postfix_expression = error_mark_node; } /* Assume this expression is not a pseudo-destructor access. */ pseudo_destructor_p = false; /* If the SCOPE is a scalar type, then, if this is a valid program, we must be looking at a pseudo-destructor-name. If POSTFIX_EXPRESSION is type dependent, it can be pseudo-destructor-name or something else. Try to parse it as pseudo-destructor-name first. */ if ((scope && SCALAR_TYPE_P (scope)) || dependent_p) { tree s; tree type; cp_parser_parse_tentatively (parser); /* Parse the pseudo-destructor-name. */ s = NULL_TREE; cp_parser_pseudo_destructor_name (parser, postfix_expression, &s, &type); if (dependent_p && (cp_parser_error_occurred (parser) || !SCALAR_TYPE_P (type))) cp_parser_abort_tentative_parse (parser); else if (cp_parser_parse_definitely (parser)) { pseudo_destructor_p = true; postfix_expression = finish_pseudo_destructor_expr (postfix_expression, s, type, location); } } if (!pseudo_destructor_p) { /* If the SCOPE is not a scalar type, we are looking at an ordinary class member access expression, rather than a pseudo-destructor-name. */ bool template_p; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Parse the id-expression. */ name = (cp_parser_id_expression (parser, cp_parser_optional_template_keyword (parser), /*check_dependency_p=*/true, &template_p, /*declarator_p=*/false, /*optional_p=*/false)); /* In general, build a SCOPE_REF if the member name is qualified. However, if the name was not dependent and has already been resolved; there is no need to build the SCOPE_REF. For example; struct X { void f(); }; template <typename T> void f(T* t) { t->X::f(); } Even though "t" is dependent, "X::f" is not and has been resolved to a BASELINK; there is no need to include scope information. */ /* But we do need to remember that there was an explicit scope for virtual function calls. */ if (parser->scope) *idk = CP_ID_KIND_QUALIFIED; /* If the name is a template-id that names a type, we will get a TYPE_DECL here. That is invalid code. */ if (TREE_CODE (name) == TYPE_DECL) { error_at (token->location, "invalid use of %qD", name); postfix_expression = error_mark_node; } else { if (name != error_mark_node && !BASELINK_P (name) && parser->scope) { if (TREE_CODE (parser->scope) == NAMESPACE_DECL) { error_at (token->location, "%<%D::%D%> is not a class member", parser->scope, name); postfix_expression = error_mark_node; } else name = build_qualified_name (/*type=*/NULL_TREE, parser->scope, name, template_p); parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; } if (parser->scope && name && BASELINK_P (name)) adjust_result_of_qualified_name_lookup (name, parser->scope, scope); postfix_expression = finish_class_member_access_expr (postfix_expression, name, template_p, tf_warning_or_error); } } /* We no longer need to look up names in the scope of the object on the left-hand side of the `.' or `->' operator. */ parser->context->object_type = NULL_TREE; /* Outside of offsetof, these operators may not appear in constant-expressions. */ if (!for_offsetof && (cp_parser_non_integral_constant_expression (parser, token_type == CPP_DEREF ? NIC_ARROW : NIC_POINT))) postfix_expression = error_mark_node; return postfix_expression; } /* Cache of LITERAL_ZERO_P constants. */ static GTY(()) tree literal_zeros[itk_none]; /* Parse a parenthesized expression-list. expression-list: assignment-expression expression-list, assignment-expression attribute-list: expression-list identifier identifier, expression-list CAST_P is true if this expression is the target of a cast. ALLOW_EXPANSION_P is true if this expression allows expansion of an argument pack. Returns a vector of trees. Each element is a representation of an assignment-expression. NULL is returned if the ( and or ) are missing. An empty, but allocated, vector is returned on no expressions. The parentheses are eaten. IS_ATTRIBUTE_LIST is id_attr if we are parsing an attribute list for an attribute that wants a plain identifier argument, normal_attr for an attribute that wants an expression, or non_attr if we aren't parsing an attribute list. If NON_CONSTANT_P is non-NULL, *NON_CONSTANT_P indicates whether or not all of the expressions in the list were constant. WANT_LITERAL_ZERO_P is true if the caller is interested in LITERAL_ZERO_P INTEGER_CSTs. FIXME: once we don't fold everything immediately, this can be removed. */ static vec<tree, va_gc> * cp_parser_parenthesized_expression_list (cp_parser* parser, int is_attribute_list, bool cast_p, bool allow_expansion_p, bool *non_constant_p, bool want_literal_zero_p) { vec<tree, va_gc> *expression_list; bool fold_expr_p = is_attribute_list != non_attr; tree identifier = NULL_TREE; bool saved_greater_than_is_operator_p; /* Assume all the expressions will be constant. */ if (non_constant_p) *non_constant_p = false; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return NULL; expression_list = make_tree_vector (); /* Within a parenthesized expression, a `>' token is always the greater-than operator. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = true; /* Consume expressions until there are no more. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) while (true) { tree expr; /* At the beginning of attribute lists, check to see if the next token is an identifier. */ if (is_attribute_list == id_attr && cp_lexer_peek_token (parser->lexer)->type == CPP_NAME) { cp_token *token; /* Consume the identifier. */ token = cp_lexer_consume_token (parser->lexer); /* Save the identifier. */ identifier = token->u.value; } else { bool expr_non_constant_p; /* Parse the next assignment-expression. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { /* A braced-init-list. */ cp_lexer_set_source_position (parser->lexer); maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); expr = cp_parser_braced_list (parser, &expr_non_constant_p); if (non_constant_p && expr_non_constant_p) *non_constant_p = true; } else if (non_constant_p) { expr = (cp_parser_constant_expression (parser, /*allow_non_constant_p=*/true, &expr_non_constant_p)); if (expr_non_constant_p) *non_constant_p = true; } else { expr = NULL_TREE; cp_token *tok = cp_lexer_peek_token (parser->lexer); switch (tok->type) { case CPP_NUMBER: case CPP_CHAR: case CPP_WCHAR: case CPP_CHAR16: case CPP_CHAR32: /* If a parameter is literal zero alone, remember it for -Wmemset-transposed-args warning. */ if (integer_zerop (tok->u.value) && !TREE_OVERFLOW (tok->u.value) && want_literal_zero_p && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COMMA || cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_CLOSE_PAREN)) { unsigned int i; for (i = 0; i < itk_none; ++i) if (TREE_TYPE (tok->u.value) == integer_types[i]) break; if (i < itk_none && literal_zeros[i]) expr = literal_zeros[i]; else { expr = copy_node (tok->u.value); LITERAL_ZERO_P (expr) = 1; if (i < itk_none) literal_zeros[i] = expr; } /* Consume the 0 token (or '\0', 0LL etc.). */ cp_lexer_consume_token (parser->lexer); } break; default: break; } if (expr == NULL_TREE) expr = cp_parser_assignment_expression (parser, /*pidk=*/NULL, cast_p); } if (fold_expr_p) expr = instantiate_non_dependent_expr (expr); /* If we have an ellipsis, then this is an expression expansion. */ if (allow_expansion_p && cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); /* Build the argument pack. */ expr = make_pack_expansion (expr); } /* Add it to the list. We add error_mark_node expressions to the list, so that we can still tell if the correct form for a parenthesized expression-list is found. That gives better errors. */ vec_safe_push (expression_list, expr); if (expr == error_mark_node) goto skip_comma; } /* After the first item, attribute lists look the same as expression lists. */ is_attribute_list = non_attr; get_comma:; /* If the next token isn't a `,', then we are done. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Otherwise, consume the `,' and keep going. */ cp_lexer_consume_token (parser->lexer); } if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) { int ending; skip_comma:; /* We try and resync to an unnested comma, as that will give the user better diagnostics. */ ending = cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/true, /*consume_paren=*/true); if (ending < 0) goto get_comma; if (!ending) { parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; return NULL; } } parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; if (identifier) vec_safe_insert (expression_list, 0, identifier); return expression_list; } /* Parse a pseudo-destructor-name. pseudo-destructor-name: :: [opt] nested-name-specifier [opt] type-name :: ~ type-name :: [opt] nested-name-specifier template template-id :: ~ type-name :: [opt] nested-name-specifier [opt] ~ type-name If either of the first two productions is used, sets *SCOPE to the TYPE specified before the final `::'. Otherwise, *SCOPE is set to NULL_TREE. *TYPE is set to the TYPE_DECL for the final type-name, or ERROR_MARK_NODE if the parse fails. */ static void cp_parser_pseudo_destructor_name (cp_parser* parser, tree object, tree* scope, tree* type) { bool nested_name_specifier_p; /* Handle ~auto. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMPL) && cp_lexer_nth_token_is_keyword (parser->lexer, 2, RID_AUTO) && !type_dependent_expression_p (object)) { if (cxx_dialect < cxx14) pedwarn (input_location, 0, "%<~auto%> only available with " "-std=c++14 or -std=gnu++14"); cp_lexer_consume_token (parser->lexer); cp_lexer_consume_token (parser->lexer); *scope = NULL_TREE; *type = TREE_TYPE (object); return; } /* Assume that things will not work out. */ *type = error_mark_node; /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/true); /* Look for the optional nested-name-specifier. */ nested_name_specifier_p = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/false) != NULL_TREE); /* Now, if we saw a nested-name-specifier, we might be doing the second production. */ if (nested_name_specifier_p && cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) { /* Consume the `template' keyword. */ cp_lexer_consume_token (parser->lexer); /* Parse the template-id. */ cp_parser_template_id (parser, /*template_keyword_p=*/true, /*check_dependency_p=*/false, class_type, /*is_declaration=*/true); /* Look for the `::' token. */ cp_parser_require (parser, CPP_SCOPE, RT_SCOPE); } /* If the next token is not a `~', then there might be some additional qualification. */ else if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMPL)) { /* At this point, we're looking for "type-name :: ~". The type-name must not be a class-name, since this is a pseudo-destructor. So, it must be either an enum-name, or a typedef-name -- both of which are just identifiers. So, we peek ahead to check that the "::" and "~" tokens are present; if they are not, then we can avoid calling type_name. */ if (cp_lexer_peek_token (parser->lexer)->type != CPP_NAME || cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE || cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_COMPL) { cp_parser_error (parser, "non-scalar type"); return; } /* Look for the type-name. */ *scope = TREE_TYPE (cp_parser_nonclass_name (parser)); if (*scope == error_mark_node) return; /* Look for the `::' token. */ cp_parser_require (parser, CPP_SCOPE, RT_SCOPE); } else *scope = NULL_TREE; /* Look for the `~'. */ cp_parser_require (parser, CPP_COMPL, RT_COMPL); /* Once we see the ~, this has to be a pseudo-destructor. */ if (!processing_template_decl && !cp_parser_error_occurred (parser)) cp_parser_commit_to_topmost_tentative_parse (parser); /* Look for the type-name again. We are not responsible for checking that it matches the first type-name. */ *type = TREE_TYPE (cp_parser_nonclass_name (parser)); } /* Parse a unary-expression. unary-expression: postfix-expression ++ cast-expression -- cast-expression unary-operator cast-expression sizeof unary-expression sizeof ( type-id ) alignof ( type-id ) [C++0x] new-expression delete-expression GNU Extensions: unary-expression: __extension__ cast-expression __alignof__ unary-expression __alignof__ ( type-id ) alignof unary-expression [C++0x] __real__ cast-expression __imag__ cast-expression && identifier sizeof ( type-id ) { initializer-list , [opt] } alignof ( type-id ) { initializer-list , [opt] } [C++0x] __alignof__ ( type-id ) { initializer-list , [opt] } ADDRESS_P is true iff the unary-expression is appearing as the operand of the `&' operator. CAST_P is true if this expression is the target of a cast. Returns a representation of the expression. */ static tree cp_parser_unary_expression (cp_parser *parser, cp_id_kind * pidk, bool address_p, bool cast_p, bool decltype_p) { cp_token *token; enum tree_code unary_operator; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Some keywords give away the kind of expression. */ if (token->type == CPP_KEYWORD) { enum rid keyword = token->keyword; switch (keyword) { case RID_ALIGNOF: case RID_SIZEOF: { tree operand, ret; enum tree_code op; location_t first_loc; op = keyword == RID_ALIGNOF ? ALIGNOF_EXPR : SIZEOF_EXPR; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); first_loc = cp_lexer_peek_token (parser->lexer)->location; /* Parse the operand. */ operand = cp_parser_sizeof_operand (parser, keyword); if (TYPE_P (operand)) ret = cxx_sizeof_or_alignof_type (operand, op, true); else { /* ISO C++ defines alignof only with types, not with expressions. So pedwarn if alignof is used with a non- type expression. However, __alignof__ is ok. */ if (!strcmp (IDENTIFIER_POINTER (token->u.value), "alignof")) pedwarn (token->location, OPT_Wpedantic, "ISO C++ does not allow %<alignof%> " "with a non-type"); ret = cxx_sizeof_or_alignof_expr (operand, op, true); } /* For SIZEOF_EXPR, just issue diagnostics, but keep SIZEOF_EXPR with the original operand. */ if (op == SIZEOF_EXPR && ret != error_mark_node) { if (TREE_CODE (ret) != SIZEOF_EXPR || TYPE_P (operand)) { if (!processing_template_decl && TYPE_P (operand)) { ret = build_min (SIZEOF_EXPR, size_type_node, build1 (NOP_EXPR, operand, error_mark_node)); SIZEOF_EXPR_TYPE_P (ret) = 1; } else ret = build_min (SIZEOF_EXPR, size_type_node, operand); TREE_SIDE_EFFECTS (ret) = 0; TREE_READONLY (ret) = 1; } SET_EXPR_LOCATION (ret, first_loc); } return ret; } case RID_NEW: return cp_parser_new_expression (parser); case RID_DELETE: return cp_parser_delete_expression (parser); case RID_EXTENSION: { /* The saved value of the PEDANTIC flag. */ int saved_pedantic; tree expr; /* Save away the PEDANTIC flag. */ cp_parser_extension_opt (parser, &saved_pedantic); /* Parse the cast-expression. */ expr = cp_parser_simple_cast_expression (parser); /* Restore the PEDANTIC flag. */ pedantic = saved_pedantic; return expr; } case RID_REALPART: case RID_IMAGPART: { tree expression; /* Consume the `__real__' or `__imag__' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the cast-expression. */ expression = cp_parser_simple_cast_expression (parser); /* Create the complete representation. */ return build_x_unary_op (token->location, (keyword == RID_REALPART ? REALPART_EXPR : IMAGPART_EXPR), expression, tf_warning_or_error); } break; case RID_TRANSACTION_ATOMIC: case RID_TRANSACTION_RELAXED: return cp_parser_transaction_expression (parser, keyword); case RID_NOEXCEPT: { tree expr; const char *saved_message; bool saved_integral_constant_expression_p; bool saved_non_integral_constant_expression_p; bool saved_greater_than_is_operator_p; cp_lexer_consume_token (parser->lexer); cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in %<noexcept%> expressions"); saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; parser->integral_constant_expression_p = false; saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = true; ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; ++cp_noexcept_operand; expr = cp_parser_expression (parser); --cp_noexcept_operand; --c_inhibit_evaluation_warnings; --cp_unevaluated_operand; parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; parser->type_definition_forbidden_message = saved_message; cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return finish_noexcept_expr (expr, tf_warning_or_error); } default: break; } } /* Look for the `:: new' and `:: delete', which also signal the beginning of a new-expression, or delete-expression, respectively. If the next token is `::', then it might be one of these. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) { enum rid keyword; /* See if the token after the `::' is one of the keywords in which we're interested. */ keyword = cp_lexer_peek_nth_token (parser->lexer, 2)->keyword; /* If it's `new', we have a new-expression. */ if (keyword == RID_NEW) return cp_parser_new_expression (parser); /* Similarly, for `delete'. */ else if (keyword == RID_DELETE) return cp_parser_delete_expression (parser); } /* Look for a unary operator. */ unary_operator = cp_parser_unary_operator (token); /* The `++' and `--' operators can be handled similarly, even though they are not technically unary-operators in the grammar. */ if (unary_operator == ERROR_MARK) { if (token->type == CPP_PLUS_PLUS) unary_operator = PREINCREMENT_EXPR; else if (token->type == CPP_MINUS_MINUS) unary_operator = PREDECREMENT_EXPR; /* Handle the GNU address-of-label extension. */ else if (cp_parser_allow_gnu_extensions_p (parser) && token->type == CPP_AND_AND) { tree identifier; tree expression; location_t loc = token->location; /* Consume the '&&' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the identifier. */ identifier = cp_parser_identifier (parser); /* Create an expression representing the address. */ expression = finish_label_address_expr (identifier, loc); if (cp_parser_non_integral_constant_expression (parser, NIC_ADDR_LABEL)) expression = error_mark_node; return expression; } } if (unary_operator != ERROR_MARK) { tree cast_expression; tree expression = error_mark_node; non_integral_constant non_constant_p = NIC_NONE; location_t loc = token->location; tsubst_flags_t complain = complain_flags (decltype_p); /* Consume the operator token. */ token = cp_lexer_consume_token (parser->lexer); /* Parse the cast-expression. */ cast_expression = cp_parser_cast_expression (parser, unary_operator == ADDR_EXPR, /*cast_p=*/false, /*decltype*/false, pidk); /* Now, build an appropriate representation. */ switch (unary_operator) { case INDIRECT_REF: non_constant_p = NIC_STAR; expression = build_x_indirect_ref (loc, cast_expression, RO_UNARY_STAR, complain); break; case ADDR_EXPR: non_constant_p = NIC_ADDR; /* Fall through. */ case BIT_NOT_EXPR: expression = build_x_unary_op (loc, unary_operator, cast_expression, complain); break; case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: non_constant_p = unary_operator == PREINCREMENT_EXPR ? NIC_PREINCREMENT : NIC_PREDECREMENT; /* Fall through. */ case UNARY_PLUS_EXPR: case NEGATE_EXPR: case TRUTH_NOT_EXPR: expression = finish_unary_op_expr (loc, unary_operator, cast_expression, complain); break; default: gcc_unreachable (); } if (non_constant_p != NIC_NONE && cp_parser_non_integral_constant_expression (parser, non_constant_p)) expression = error_mark_node; return expression; } return cp_parser_postfix_expression (parser, address_p, cast_p, /*member_access_only_p=*/false, decltype_p, pidk); } /* Returns ERROR_MARK if TOKEN is not a unary-operator. If TOKEN is a unary-operator, the corresponding tree code is returned. */ static enum tree_code cp_parser_unary_operator (cp_token* token) { switch (token->type) { case CPP_MULT: return INDIRECT_REF; case CPP_AND: return ADDR_EXPR; case CPP_PLUS: return UNARY_PLUS_EXPR; case CPP_MINUS: return NEGATE_EXPR; case CPP_NOT: return TRUTH_NOT_EXPR; case CPP_COMPL: return BIT_NOT_EXPR; default: return ERROR_MARK; } } /* Parse a new-expression. new-expression: :: [opt] new new-placement [opt] new-type-id new-initializer [opt] :: [opt] new new-placement [opt] ( type-id ) new-initializer [opt] Returns a representation of the expression. */ static tree cp_parser_new_expression (cp_parser* parser) { bool global_scope_p; vec<tree, va_gc> *placement; tree type; vec<tree, va_gc> *initializer; tree nelts = NULL_TREE; tree ret; /* Look for the optional `::' operator. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the `new' operator. */ cp_parser_require_keyword (parser, RID_NEW, RT_NEW); /* There's no easy way to tell a new-placement from the `( type-id )' construct. */ cp_parser_parse_tentatively (parser); /* Look for a new-placement. */ placement = cp_parser_new_placement (parser); /* If that didn't work out, there's no new-placement. */ if (!cp_parser_parse_definitely (parser)) { if (placement != NULL) release_tree_vector (placement); placement = NULL; } /* If the next token is a `(', then we have a parenthesized type-id. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { cp_token *token; const char *saved_message = parser->type_definition_forbidden_message; /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Parse the type-id. */ parser->type_definition_forbidden_message = G_("types may not be defined in a new-expression"); type = cp_parser_type_id (parser); parser->type_definition_forbidden_message = saved_message; /* Look for the closing `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); token = cp_lexer_peek_token (parser->lexer); /* There should not be a direct-new-declarator in this production, but GCC used to allowed this, so we check and emit a sensible error message for this case. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) { error_at (token->location, "array bound forbidden after parenthesized type-id"); inform (token->location, "try removing the parentheses around the type-id"); cp_parser_direct_new_declarator (parser); } } /* Otherwise, there must be a new-type-id. */ else type = cp_parser_new_type_id (parser, &nelts); /* If the next token is a `(' or '{', then we have a new-initializer. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN) || cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) initializer = cp_parser_new_initializer (parser); else initializer = NULL; /* A new-expression may not appear in an integral constant expression. */ if (cp_parser_non_integral_constant_expression (parser, NIC_NEW)) ret = error_mark_node; else { /* Create a representation of the new-expression. */ ret = build_new (&placement, type, nelts, &initializer, global_scope_p, tf_warning_or_error); } if (placement != NULL) release_tree_vector (placement); if (initializer != NULL) release_tree_vector (initializer); return ret; } /* Parse a new-placement. new-placement: ( expression-list ) Returns the same representation as for an expression-list. */ static vec<tree, va_gc> * cp_parser_new_placement (cp_parser* parser) { vec<tree, va_gc> *expression_list; /* Parse the expression-list. */ expression_list = (cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL)); return expression_list; } /* Parse a new-type-id. new-type-id: type-specifier-seq new-declarator [opt] Returns the TYPE allocated. If the new-type-id indicates an array type, *NELTS is set to the number of elements in the last array bound; the TYPE will not include the last array bound. */ static tree cp_parser_new_type_id (cp_parser* parser, tree *nelts) { cp_decl_specifier_seq type_specifier_seq; cp_declarator *new_declarator; cp_declarator *declarator; cp_declarator *outer_declarator; const char *saved_message; /* The type-specifier sequence must not contain type definitions. (It cannot contain declarations of new types either, but if they are not definitions we will catch that because they are not complete.) */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in a new-type-id"); /* Parse the type-specifier-seq. */ cp_parser_type_specifier_seq (parser, /*is_declaration=*/false, /*is_trailing_return=*/false, &type_specifier_seq); /* Restore the old message. */ parser->type_definition_forbidden_message = saved_message; if (type_specifier_seq.type == error_mark_node) return error_mark_node; /* Parse the new-declarator. */ new_declarator = cp_parser_new_declarator_opt (parser); /* Determine the number of elements in the last array dimension, if any. */ *nelts = NULL_TREE; /* Skip down to the last array dimension. */ declarator = new_declarator; outer_declarator = NULL; while (declarator && (declarator->kind == cdk_pointer || declarator->kind == cdk_ptrmem)) { outer_declarator = declarator; declarator = declarator->declarator; } while (declarator && declarator->kind == cdk_array && declarator->declarator && declarator->declarator->kind == cdk_array) { outer_declarator = declarator; declarator = declarator->declarator; } if (declarator && declarator->kind == cdk_array) { *nelts = declarator->u.array.bounds; if (*nelts == error_mark_node) *nelts = integer_one_node; if (outer_declarator) outer_declarator->declarator = declarator->declarator; else new_declarator = NULL; } return groktypename (&type_specifier_seq, new_declarator, false); } /* Parse an (optional) new-declarator. new-declarator: ptr-operator new-declarator [opt] direct-new-declarator Returns the declarator. */ static cp_declarator * cp_parser_new_declarator_opt (cp_parser* parser) { enum tree_code code; tree type, std_attributes = NULL_TREE; cp_cv_quals cv_quals; /* We don't know if there's a ptr-operator next, or not. */ cp_parser_parse_tentatively (parser); /* Look for a ptr-operator. */ code = cp_parser_ptr_operator (parser, &type, &cv_quals, &std_attributes); /* If that worked, look for more new-declarators. */ if (cp_parser_parse_definitely (parser)) { cp_declarator *declarator; /* Parse another optional declarator. */ declarator = cp_parser_new_declarator_opt (parser); declarator = cp_parser_make_indirect_declarator (code, type, cv_quals, declarator, std_attributes); return declarator; } /* If the next token is a `[', there is a direct-new-declarator. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) return cp_parser_direct_new_declarator (parser); return NULL; } /* Parse a direct-new-declarator. direct-new-declarator: [ expression ] direct-new-declarator [constant-expression] */ static cp_declarator * cp_parser_direct_new_declarator (cp_parser* parser) { cp_declarator *declarator = NULL; while (true) { tree expression; cp_token *token; /* Look for the opening `['. */ cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE); token = cp_lexer_peek_token (parser->lexer); expression = cp_parser_expression (parser); /* The standard requires that the expression have integral type. DR 74 adds enumeration types. We believe that the real intent is that these expressions be handled like the expression in a `switch' condition, which also allows classes with a single conversion to integral or enumeration type. */ if (!processing_template_decl) { expression = build_expr_type_conversion (WANT_INT | WANT_ENUM, expression, /*complain=*/true); if (!expression) { error_at (token->location, "expression in new-declarator must have integral " "or enumeration type"); expression = error_mark_node; } } /* Look for the closing `]'. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); /* Add this bound to the declarator. */ declarator = make_array_declarator (declarator, expression); /* If the next token is not a `[', then there are no more bounds. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_SQUARE)) break; } return declarator; } /* Parse a new-initializer. new-initializer: ( expression-list [opt] ) braced-init-list Returns a representation of the expression-list. */ static vec<tree, va_gc> * cp_parser_new_initializer (cp_parser* parser) { vec<tree, va_gc> *expression_list; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { tree t; bool expr_non_constant_p; cp_lexer_set_source_position (parser->lexer); maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); t = cp_parser_braced_list (parser, &expr_non_constant_p); CONSTRUCTOR_IS_DIRECT_INIT (t) = 1; expression_list = make_tree_vector_single (t); } else expression_list = (cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL)); return expression_list; } /* Parse a delete-expression. delete-expression: :: [opt] delete cast-expression :: [opt] delete [ ] cast-expression Returns a representation of the expression. */ static tree cp_parser_delete_expression (cp_parser* parser) { bool global_scope_p; bool array_p; tree expression; /* Look for the optional `::' operator. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the `delete' keyword. */ cp_parser_require_keyword (parser, RID_DELETE, RT_DELETE); /* See if the array syntax is in use. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) { /* Consume the `[' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the `]' token. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); /* Remember that this is the `[]' construct. */ array_p = true; } else array_p = false; /* Parse the cast-expression. */ expression = cp_parser_simple_cast_expression (parser); /* A delete-expression may not appear in an integral constant expression. */ if (cp_parser_non_integral_constant_expression (parser, NIC_DEL)) return error_mark_node; return delete_sanity (expression, NULL_TREE, array_p, global_scope_p, tf_warning_or_error); } /* Returns 1 if TOKEN may start a cast-expression and isn't '++', '--', neither '[' in C++11; -1 if TOKEN is '++', '--', or '[' in C++11; 0 otherwise. */ static int cp_parser_tokens_start_cast_expression (cp_parser *parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_COMMA: case CPP_SEMICOLON: case CPP_QUERY: case CPP_COLON: case CPP_CLOSE_SQUARE: case CPP_CLOSE_PAREN: case CPP_CLOSE_BRACE: case CPP_OPEN_BRACE: case CPP_DOT: case CPP_DOT_STAR: case CPP_DEREF: case CPP_DEREF_STAR: case CPP_DIV: case CPP_MOD: case CPP_LSHIFT: case CPP_RSHIFT: case CPP_LESS: case CPP_GREATER: case CPP_LESS_EQ: case CPP_GREATER_EQ: case CPP_EQ_EQ: case CPP_NOT_EQ: case CPP_EQ: case CPP_MULT_EQ: case CPP_DIV_EQ: case CPP_MOD_EQ: case CPP_PLUS_EQ: case CPP_MINUS_EQ: case CPP_RSHIFT_EQ: case CPP_LSHIFT_EQ: case CPP_AND_EQ: case CPP_XOR_EQ: case CPP_OR_EQ: case CPP_XOR: case CPP_OR: case CPP_OR_OR: case CPP_EOF: case CPP_ELLIPSIS: return 0; case CPP_OPEN_PAREN: /* In ((type ()) () the last () isn't a valid cast-expression, so the whole must be parsed as postfix-expression. */ return cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_CLOSE_PAREN; case CPP_OPEN_SQUARE: /* '[' may start a primary-expression in obj-c++ and in C++11, as a lambda-expression, eg, '(void)[]{}'. */ if (cxx_dialect >= cxx11) return -1; return c_dialect_objc (); case CPP_PLUS_PLUS: case CPP_MINUS_MINUS: /* '++' and '--' may or may not start a cast-expression: struct T { void operator++(int); }; void f() { (T())++; } vs int a; (int)++a; */ return -1; default: return 1; } } /* Parse a cast-expression. cast-expression: unary-expression ( type-id ) cast-expression ADDRESS_P is true iff the unary-expression is appearing as the operand of the `&' operator. CAST_P is true if this expression is the target of a cast. Returns a representation of the expression. */ static tree cp_parser_cast_expression (cp_parser *parser, bool address_p, bool cast_p, bool decltype_p, cp_id_kind * pidk) { /* If it's a `(', then we might be looking at a cast. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { tree type = NULL_TREE; tree expr = NULL_TREE; int cast_expression = 0; const char *saved_message; /* There's no way to know yet whether or not this is a cast. For example, `(int (3))' is a unary-expression, while `(int) 3' is a cast. So, we resort to parsing tentatively. */ cp_parser_parse_tentatively (parser); /* Types may not be defined in a cast. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in casts"); /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* A very tricky bit is that `(struct S) { 3 }' is a compound-literal (which we permit in C++ as an extension). But, that construct is not a cast-expression -- it is a postfix-expression. (The reason is that `(struct S) { 3 }.i' is legal; if the compound-literal were a cast-expression, you'd need an extra set of parentheses.) But, if we parse the type-id, and it happens to be a class-specifier, then we will commit to the parse at that point, because we cannot undo the action that is done when creating a new class. So, then we cannot back up and do a postfix-expression. Another tricky case is the following (c++/29234): struct S { void operator () (); }; void foo () { ( S()() ); } As a type-id we parse the parenthesized S()() as a function returning a function, groktypename complains and we cannot back up in this case either. Therefore, we scan ahead to the closing `)', and check to see if the tokens after the `)' can start a cast-expression. Otherwise we are dealing with an unary-expression, a postfix-expression or something else. Yet another tricky case, in C++11, is the following (c++/54891): (void)[]{}; The issue is that usually, besides the case of lambda-expressions, the parenthesized type-id cannot be followed by '[', and, eg, we want to parse '(C ())[2];' in parse/pr26997.C as unary-expression. Thus, if cp_parser_tokens_start_cast_expression returns -1, below we don't commit, we try a cast-expression, then an unary-expression. Save tokens so that we can put them back. */ cp_lexer_save_tokens (parser->lexer); /* We may be looking at a cast-expression. */ if (cp_parser_skip_to_closing_parenthesis (parser, false, false, /*consume_paren=*/true)) cast_expression = cp_parser_tokens_start_cast_expression (parser); /* Roll back the tokens we skipped. */ cp_lexer_rollback_tokens (parser->lexer); /* If we aren't looking at a cast-expression, simulate an error so that the call to cp_parser_error_occurred below returns true. */ if (!cast_expression) cp_parser_simulate_error (parser); else { bool saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; /* Look for the type-id. */ type = cp_parser_type_id (parser); /* Look for the closing `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; } /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; /* At this point this can only be either a cast or a parenthesized ctor such as `(T ())' that looks like a cast to function returning T. */ if (!cp_parser_error_occurred (parser)) { /* Only commit if the cast-expression doesn't start with '++', '--', or '[' in C++11. */ if (cast_expression > 0) cp_parser_commit_to_topmost_tentative_parse (parser); expr = cp_parser_cast_expression (parser, /*address_p=*/false, /*cast_p=*/true, /*decltype_p=*/false, pidk); if (cp_parser_parse_definitely (parser)) { /* Warn about old-style casts, if so requested. */ if (warn_old_style_cast && !in_system_header_at (input_location) && !VOID_TYPE_P (type) && current_lang_name != lang_name_c) warning (OPT_Wold_style_cast, "use of old-style cast"); /* Only type conversions to integral or enumeration types can be used in constant-expressions. */ if (!cast_valid_in_integral_constant_expression_p (type) && cp_parser_non_integral_constant_expression (parser, NIC_CAST)) return error_mark_node; /* Perform the cast. */ expr = build_c_cast (input_location, type, expr); return expr; } } else cp_parser_abort_tentative_parse (parser); } /* If we get here, then it's not a cast, so it must be a unary-expression. */ return cp_parser_unary_expression (parser, pidk, address_p, cast_p, decltype_p); } /* Parse a binary expression of the general form: pm-expression: cast-expression pm-expression .* cast-expression pm-expression ->* cast-expression multiplicative-expression: pm-expression multiplicative-expression * pm-expression multiplicative-expression / pm-expression multiplicative-expression % pm-expression additive-expression: multiplicative-expression additive-expression + multiplicative-expression additive-expression - multiplicative-expression shift-expression: additive-expression shift-expression << additive-expression shift-expression >> additive-expression relational-expression: shift-expression relational-expression < shift-expression relational-expression > shift-expression relational-expression <= shift-expression relational-expression >= shift-expression GNU Extension: relational-expression: relational-expression <? shift-expression relational-expression >? shift-expression equality-expression: relational-expression equality-expression == relational-expression equality-expression != relational-expression and-expression: equality-expression and-expression & equality-expression exclusive-or-expression: and-expression exclusive-or-expression ^ and-expression inclusive-or-expression: exclusive-or-expression inclusive-or-expression | exclusive-or-expression logical-and-expression: inclusive-or-expression logical-and-expression && inclusive-or-expression logical-or-expression: logical-and-expression logical-or-expression || logical-and-expression All these are implemented with a single function like: binary-expression: simple-cast-expression binary-expression <token> binary-expression CAST_P is true if this expression is the target of a cast. The binops_by_token map is used to get the tree codes for each <token> type. binary-expressions are associated according to a precedence table. */ #define TOKEN_PRECEDENCE(token) \ (((token->type == CPP_GREATER \ || ((cxx_dialect != cxx98) && token->type == CPP_RSHIFT)) \ && !parser->greater_than_is_operator_p) \ ? PREC_NOT_OPERATOR \ : binops_by_token[token->type].prec) static tree cp_parser_binary_expression (cp_parser* parser, bool cast_p, bool no_toplevel_fold_p, bool decltype_p, enum cp_parser_prec prec, cp_id_kind * pidk) { cp_parser_expression_stack stack; cp_parser_expression_stack_entry *sp = &stack[0]; cp_parser_expression_stack_entry current; tree rhs; cp_token *token; enum tree_code rhs_type; enum cp_parser_prec new_prec, lookahead_prec; tree overload; /* Parse the first expression. */ current.lhs_type = (cp_lexer_next_token_is (parser->lexer, CPP_NOT) ? TRUTH_NOT_EXPR : ERROR_MARK); current.lhs = cp_parser_cast_expression (parser, /*address_p=*/false, cast_p, decltype_p, pidk); current.prec = prec; if (cp_parser_error_occurred (parser)) return error_mark_node; for (;;) { /* Get an operator token. */ token = cp_lexer_peek_token (parser->lexer); if (warn_cxx0x_compat && token->type == CPP_RSHIFT && !parser->greater_than_is_operator_p) { if (warning_at (token->location, OPT_Wc__0x_compat, "%<>>%> operator is treated" " as two right angle brackets in C++11")) inform (token->location, "suggest parentheses around %<>>%> expression"); } new_prec = TOKEN_PRECEDENCE (token); /* Popping an entry off the stack means we completed a subexpression: - either we found a token which is not an operator (`>' where it is not an operator, or prec == PREC_NOT_OPERATOR), in which case popping will happen repeatedly; - or, we found an operator which has lower priority. This is the case where the recursive descent *ascends*, as in `3 * 4 + 5' after parsing `3 * 4'. */ if (new_prec <= current.prec) { if (sp == stack) break; else goto pop; } get_rhs: current.tree_type = binops_by_token[token->type].tree_type; current.loc = token->location; /* We used the operator token. */ cp_lexer_consume_token (parser->lexer); /* For "false && x" or "true || x", x will never be executed; disable warnings while evaluating it. */ if (current.tree_type == TRUTH_ANDIF_EXPR) c_inhibit_evaluation_warnings += current.lhs == truthvalue_false_node; else if (current.tree_type == TRUTH_ORIF_EXPR) c_inhibit_evaluation_warnings += current.lhs == truthvalue_true_node; /* Extract another operand. It may be the RHS of this expression or the LHS of a new, higher priority expression. */ rhs_type = (cp_lexer_next_token_is (parser->lexer, CPP_NOT) ? TRUTH_NOT_EXPR : ERROR_MARK); rhs = cp_parser_simple_cast_expression (parser); /* Get another operator token. Look up its precedence to avoid building a useless (immediately popped) stack entry for common cases such as 3 + 4 + 5 or 3 * 4 + 5. */ token = cp_lexer_peek_token (parser->lexer); lookahead_prec = TOKEN_PRECEDENCE (token); if (lookahead_prec > new_prec) { /* ... and prepare to parse the RHS of the new, higher priority expression. Since precedence levels on the stack are monotonically increasing, we do not have to care about stack overflows. */ *sp = current; ++sp; current.lhs = rhs; current.lhs_type = rhs_type; current.prec = new_prec; new_prec = lookahead_prec; goto get_rhs; pop: lookahead_prec = new_prec; /* If the stack is not empty, we have parsed into LHS the right side (`4' in the example above) of an expression we had suspended. We can use the information on the stack to recover the LHS (`3') from the stack together with the tree code (`MULT_EXPR'), and the precedence of the higher level subexpression (`PREC_ADDITIVE_EXPRESSION'). TOKEN is the CPP_PLUS token, which will be used to actually build the additive expression. */ rhs = current.lhs; rhs_type = current.lhs_type; --sp; current = *sp; } /* Undo the disabling of warnings done above. */ if (current.tree_type == TRUTH_ANDIF_EXPR) c_inhibit_evaluation_warnings -= current.lhs == truthvalue_false_node; else if (current.tree_type == TRUTH_ORIF_EXPR) c_inhibit_evaluation_warnings -= current.lhs == truthvalue_true_node; if (warn_logical_not_paren && TREE_CODE_CLASS (current.tree_type) == tcc_comparison && current.lhs_type == TRUTH_NOT_EXPR /* Avoid warning for !!x == y. */ && (TREE_CODE (current.lhs) != NE_EXPR || !integer_zerop (TREE_OPERAND (current.lhs, 1))) && (TREE_CODE (current.lhs) != TRUTH_NOT_EXPR || (TREE_CODE (TREE_OPERAND (current.lhs, 0)) != TRUTH_NOT_EXPR /* Avoid warning for !b == y where b is boolean. */ && (TREE_TYPE (TREE_OPERAND (current.lhs, 0)) == NULL_TREE || (TREE_CODE (TREE_TYPE (TREE_OPERAND (current.lhs, 0))) != BOOLEAN_TYPE)))) /* Avoid warning for !!b == y where b is boolean. */ && (!DECL_P (current.lhs) || TREE_TYPE (current.lhs) == NULL_TREE || TREE_CODE (TREE_TYPE (current.lhs)) != BOOLEAN_TYPE)) warn_logical_not_parentheses (current.loc, current.tree_type, maybe_constant_value (rhs)); overload = NULL; /* ??? Currently we pass lhs_type == ERROR_MARK and rhs_type == ERROR_MARK for everything that is not a binary expression. This makes warn_about_parentheses miss some warnings that involve unary operators. For unary expressions we should pass the correct tree_code unless the unary expression was surrounded by parentheses. */ if (no_toplevel_fold_p && lookahead_prec <= current.prec && sp == stack) current.lhs = build2 (current.tree_type, TREE_CODE_CLASS (current.tree_type) == tcc_comparison ? boolean_type_node : TREE_TYPE (current.lhs), current.lhs, rhs); else current.lhs = build_x_binary_op (current.loc, current.tree_type, current.lhs, current.lhs_type, rhs, rhs_type, &overload, complain_flags (decltype_p)); current.lhs_type = current.tree_type; if (EXPR_P (current.lhs)) SET_EXPR_LOCATION (current.lhs, current.loc); /* If the binary operator required the use of an overloaded operator, then this expression cannot be an integral constant-expression. An overloaded operator can be used even if both operands are otherwise permissible in an integral constant-expression if at least one of the operands is of enumeration type. */ if (overload && cp_parser_non_integral_constant_expression (parser, NIC_OVERLOADED)) return error_mark_node; } return current.lhs; } static tree cp_parser_binary_expression (cp_parser* parser, bool cast_p, bool no_toplevel_fold_p, enum cp_parser_prec prec, cp_id_kind * pidk) { return cp_parser_binary_expression (parser, cast_p, no_toplevel_fold_p, /*decltype*/false, prec, pidk); } /* Parse the `? expression : assignment-expression' part of a conditional-expression. The LOGICAL_OR_EXPR is the logical-or-expression that started the conditional-expression. Returns a representation of the entire conditional-expression. This routine is used by cp_parser_assignment_expression. ? expression : assignment-expression GNU Extensions: ? : assignment-expression */ static tree cp_parser_question_colon_clause (cp_parser* parser, tree logical_or_expr) { tree expr; tree assignment_expr; struct cp_token *token; location_t loc = cp_lexer_peek_token (parser->lexer)->location; /* Consume the `?' token. */ cp_lexer_consume_token (parser->lexer); token = cp_lexer_peek_token (parser->lexer); if (cp_parser_allow_gnu_extensions_p (parser) && token->type == CPP_COLON) { pedwarn (token->location, OPT_Wpedantic, "ISO C++ does not allow ?: with omitted middle operand"); /* Implicit true clause. */ expr = NULL_TREE; c_inhibit_evaluation_warnings += logical_or_expr == truthvalue_true_node; warn_for_omitted_condop (token->location, logical_or_expr); } else { bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; parser->colon_corrects_to_scope_p = false; /* Parse the expression. */ c_inhibit_evaluation_warnings += logical_or_expr == truthvalue_false_node; expr = cp_parser_expression (parser); c_inhibit_evaluation_warnings += ((logical_or_expr == truthvalue_true_node) - (logical_or_expr == truthvalue_false_node)); parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; } /* The next token should be a `:'. */ cp_parser_require (parser, CPP_COLON, RT_COLON); /* Parse the assignment-expression. */ assignment_expr = cp_parser_assignment_expression (parser); c_inhibit_evaluation_warnings -= logical_or_expr == truthvalue_true_node; /* Build the conditional-expression. */ return build_x_conditional_expr (loc, logical_or_expr, expr, assignment_expr, tf_warning_or_error); } /* Parse an assignment-expression. assignment-expression: conditional-expression logical-or-expression assignment-operator assignment_expression throw-expression CAST_P is true if this expression is the target of a cast. DECLTYPE_P is true if this expression is the operand of decltype. Returns a representation for the expression. */ static tree cp_parser_assignment_expression (cp_parser* parser, cp_id_kind * pidk, bool cast_p, bool decltype_p) { tree expr; /* If the next token is the `throw' keyword, then we're looking at a throw-expression. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_THROW)) expr = cp_parser_throw_expression (parser); /* Otherwise, it must be that we are looking at a logical-or-expression. */ else { /* Parse the binary expressions (logical-or-expression). */ expr = cp_parser_binary_expression (parser, cast_p, false, decltype_p, PREC_NOT_OPERATOR, pidk); /* If the next token is a `?' then we're actually looking at a conditional-expression. */ if (cp_lexer_next_token_is (parser->lexer, CPP_QUERY)) return cp_parser_question_colon_clause (parser, expr); else { location_t loc = cp_lexer_peek_token (parser->lexer)->location; /* If it's an assignment-operator, we're using the second production. */ enum tree_code assignment_operator = cp_parser_assignment_operator_opt (parser); if (assignment_operator != ERROR_MARK) { bool non_constant_p; location_t saved_input_location; /* Parse the right-hand side of the assignment. */ tree rhs = cp_parser_initializer_clause (parser, &non_constant_p); if (BRACE_ENCLOSED_INITIALIZER_P (rhs)) maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); /* An assignment may not appear in a constant-expression. */ if (cp_parser_non_integral_constant_expression (parser, NIC_ASSIGNMENT)) return error_mark_node; /* Build the assignment expression. Its default location is the location of the '=' token. */ saved_input_location = input_location; input_location = loc; expr = build_x_modify_expr (loc, expr, assignment_operator, rhs, complain_flags (decltype_p)); input_location = saved_input_location; } } } return expr; } /* Parse an (optional) assignment-operator. assignment-operator: one of = *= /= %= += -= >>= <<= &= ^= |= GNU Extension: assignment-operator: one of <?= >?= If the next token is an assignment operator, the corresponding tree code is returned, and the token is consumed. For example, for `+=', PLUS_EXPR is returned. For `=' itself, the code returned is NOP_EXPR. For `/', TRUNC_DIV_EXPR is returned; for `%', TRUNC_MOD_EXPR is returned. If TOKEN is not an assignment operator, ERROR_MARK is returned. */ static enum tree_code cp_parser_assignment_operator_opt (cp_parser* parser) { enum tree_code op; cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EQ: op = NOP_EXPR; break; case CPP_MULT_EQ: op = MULT_EXPR; break; case CPP_DIV_EQ: op = TRUNC_DIV_EXPR; break; case CPP_MOD_EQ: op = TRUNC_MOD_EXPR; break; case CPP_PLUS_EQ: op = PLUS_EXPR; break; case CPP_MINUS_EQ: op = MINUS_EXPR; break; case CPP_RSHIFT_EQ: op = RSHIFT_EXPR; break; case CPP_LSHIFT_EQ: op = LSHIFT_EXPR; break; case CPP_AND_EQ: op = BIT_AND_EXPR; break; case CPP_XOR_EQ: op = BIT_XOR_EXPR; break; case CPP_OR_EQ: op = BIT_IOR_EXPR; break; default: /* Nothing else is an assignment operator. */ op = ERROR_MARK; } /* If it was an assignment operator, consume it. */ if (op != ERROR_MARK) cp_lexer_consume_token (parser->lexer); return op; } /* Parse an expression. expression: assignment-expression expression , assignment-expression CAST_P is true if this expression is the target of a cast. DECLTYPE_P is true if this expression is the immediate operand of decltype, except possibly parenthesized or on the RHS of a comma (N3276). Returns a representation of the expression. */ static tree cp_parser_expression (cp_parser* parser, cp_id_kind * pidk, bool cast_p, bool decltype_p) { tree expression = NULL_TREE; location_t loc = UNKNOWN_LOCATION; while (true) { tree assignment_expression; /* Parse the next assignment-expression. */ assignment_expression = cp_parser_assignment_expression (parser, pidk, cast_p, decltype_p); /* We don't create a temporary for a call that is the immediate operand of decltype or on the RHS of a comma. But when we see a comma, we need to create a temporary for a call on the LHS. */ if (decltype_p && !processing_template_decl && TREE_CODE (assignment_expression) == CALL_EXPR && CLASS_TYPE_P (TREE_TYPE (assignment_expression)) && cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) assignment_expression = build_cplus_new (TREE_TYPE (assignment_expression), assignment_expression, tf_warning_or_error); /* If this is the first assignment-expression, we can just save it away. */ if (!expression) expression = assignment_expression; else expression = build_x_compound_expr (loc, expression, assignment_expression, complain_flags (decltype_p)); /* If the next token is not a comma, then we are done with the expression. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,'. */ loc = cp_lexer_peek_token (parser->lexer)->location; cp_lexer_consume_token (parser->lexer); /* A comma operator cannot appear in a constant-expression. */ if (cp_parser_non_integral_constant_expression (parser, NIC_COMMA)) expression = error_mark_node; } return expression; } /* Parse a constant-expression. constant-expression: conditional-expression If ALLOW_NON_CONSTANT_P a non-constant expression is silently accepted. If ALLOW_NON_CONSTANT_P is true and the expression is not constant, *NON_CONSTANT_P is set to TRUE. If ALLOW_NON_CONSTANT_P is false, NON_CONSTANT_P should be NULL. */ static tree cp_parser_constant_expression (cp_parser* parser, bool allow_non_constant_p, bool *non_constant_p) { bool saved_integral_constant_expression_p; bool saved_allow_non_integral_constant_expression_p; bool saved_non_integral_constant_expression_p; tree expression; /* It might seem that we could simply parse the conditional-expression, and then check to see if it were TREE_CONSTANT. However, an expression that is TREE_CONSTANT is one that the compiler can figure out is constant, possibly after doing some simplifications or optimizations. The standard has a precise definition of constant-expression, and we must honor that, even though it is somewhat more restrictive. For example: int i[(2, 3)]; is not a legal declaration, because `(2, 3)' is not a constant-expression. The `,' operator is forbidden in a constant-expression. However, GCC's constant-folding machinery will fold this operation to an INTEGER_CST for `3'. */ /* Save the old settings. */ saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_allow_non_integral_constant_expression_p = parser->allow_non_integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; /* We are now parsing a constant-expression. */ parser->integral_constant_expression_p = true; parser->allow_non_integral_constant_expression_p = (allow_non_constant_p || cxx_dialect >= cxx11); parser->non_integral_constant_expression_p = false; /* Although the grammar says "conditional-expression", we parse an "assignment-expression", which also permits "throw-expression" and the use of assignment operators. In the case that ALLOW_NON_CONSTANT_P is false, we get better errors than we would otherwise. In the case that ALLOW_NON_CONSTANT_P is true, it is actually essential that we look for an assignment-expression. For example, cp_parser_initializer_clauses uses this function to determine whether a particular assignment-expression is in fact constant. */ expression = cp_parser_assignment_expression (parser); /* Restore the old settings. */ parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->allow_non_integral_constant_expression_p = saved_allow_non_integral_constant_expression_p; if (cxx_dialect >= cxx11) { /* Require an rvalue constant expression here; that's what our callers expect. Reference constant expressions are handled separately in e.g. cp_parser_template_argument. */ bool is_const = potential_rvalue_constant_expression (expression); parser->non_integral_constant_expression_p = !is_const; if (!is_const && !allow_non_constant_p) require_potential_rvalue_constant_expression (expression); } if (allow_non_constant_p) *non_constant_p = parser->non_integral_constant_expression_p; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; return expression; } /* Parse __builtin_offsetof. offsetof-expression: "__builtin_offsetof" "(" type-id "," offsetof-member-designator ")" offsetof-member-designator: id-expression | offsetof-member-designator "." id-expression | offsetof-member-designator "[" expression "]" | offsetof-member-designator "->" id-expression */ static tree cp_parser_builtin_offsetof (cp_parser *parser) { int save_ice_p, save_non_ice_p; tree type, expr; cp_id_kind dummy; cp_token *token; /* We're about to accept non-integral-constant things, but will definitely yield an integral constant expression. Save and restore these values around our local parsing. */ save_ice_p = parser->integral_constant_expression_p; save_non_ice_p = parser->non_integral_constant_expression_p; /* Consume the "__builtin_offsetof" token. */ cp_lexer_consume_token (parser->lexer); /* Consume the opening `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Parse the type-id. */ location_t loc = cp_lexer_peek_token (parser->lexer)->location; type = cp_parser_type_id (parser); /* Look for the `,'. */ cp_parser_require (parser, CPP_COMMA, RT_COMMA); token = cp_lexer_peek_token (parser->lexer); /* Build the (type *)null that begins the traditional offsetof macro. */ expr = build_static_cast (build_pointer_type (type), null_pointer_node, tf_warning_or_error); /* Parse the offsetof-member-designator. We begin as if we saw "expr->". */ expr = cp_parser_postfix_dot_deref_expression (parser, CPP_DEREF, expr, true, &dummy, token->location); while (true) { token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_OPEN_SQUARE: /* offsetof-member-designator "[" expression "]" */ expr = cp_parser_postfix_open_square_expression (parser, expr, true, false); break; case CPP_DEREF: /* offsetof-member-designator "->" identifier */ expr = grok_array_decl (token->location, expr, integer_zero_node, false); /* FALLTHRU */ case CPP_DOT: /* offsetof-member-designator "." identifier */ cp_lexer_consume_token (parser->lexer); expr = cp_parser_postfix_dot_deref_expression (parser, CPP_DOT, expr, true, &dummy, token->location); break; case CPP_CLOSE_PAREN: /* Consume the ")" token. */ cp_lexer_consume_token (parser->lexer); goto success; default: /* Error. We know the following require will fail, but that gives the proper error message. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); cp_parser_skip_to_closing_parenthesis (parser, true, false, true); expr = error_mark_node; goto failure; } } success: expr = finish_offsetof (expr, loc); failure: parser->integral_constant_expression_p = save_ice_p; parser->non_integral_constant_expression_p = save_non_ice_p; return expr; } /* Parse a trait expression. Returns a representation of the expression, the underlying type of the type at issue when KEYWORD is RID_UNDERLYING_TYPE. */ static tree cp_parser_trait_expr (cp_parser* parser, enum rid keyword) { cp_trait_kind kind; tree type1, type2 = NULL_TREE; bool binary = false; bool variadic = false; switch (keyword) { case RID_HAS_NOTHROW_ASSIGN: kind = CPTK_HAS_NOTHROW_ASSIGN; break; case RID_HAS_NOTHROW_CONSTRUCTOR: kind = CPTK_HAS_NOTHROW_CONSTRUCTOR; break; case RID_HAS_NOTHROW_COPY: kind = CPTK_HAS_NOTHROW_COPY; break; case RID_HAS_TRIVIAL_ASSIGN: kind = CPTK_HAS_TRIVIAL_ASSIGN; break; case RID_HAS_TRIVIAL_CONSTRUCTOR: kind = CPTK_HAS_TRIVIAL_CONSTRUCTOR; break; case RID_HAS_TRIVIAL_COPY: kind = CPTK_HAS_TRIVIAL_COPY; break; case RID_HAS_TRIVIAL_DESTRUCTOR: kind = CPTK_HAS_TRIVIAL_DESTRUCTOR; break; case RID_HAS_VIRTUAL_DESTRUCTOR: kind = CPTK_HAS_VIRTUAL_DESTRUCTOR; break; case RID_IS_ABSTRACT: kind = CPTK_IS_ABSTRACT; break; case RID_IS_BASE_OF: kind = CPTK_IS_BASE_OF; binary = true; break; case RID_IS_CLASS: kind = CPTK_IS_CLASS; break; case RID_IS_EMPTY: kind = CPTK_IS_EMPTY; break; case RID_IS_ENUM: kind = CPTK_IS_ENUM; break; case RID_IS_FINAL: kind = CPTK_IS_FINAL; break; case RID_IS_LITERAL_TYPE: kind = CPTK_IS_LITERAL_TYPE; break; case RID_IS_POD: kind = CPTK_IS_POD; break; case RID_IS_POLYMORPHIC: kind = CPTK_IS_POLYMORPHIC; break; case RID_IS_STD_LAYOUT: kind = CPTK_IS_STD_LAYOUT; break; case RID_IS_TRIVIAL: kind = CPTK_IS_TRIVIAL; break; case RID_IS_TRIVIALLY_ASSIGNABLE: kind = CPTK_IS_TRIVIALLY_ASSIGNABLE; binary = true; break; case RID_IS_TRIVIALLY_CONSTRUCTIBLE: kind = CPTK_IS_TRIVIALLY_CONSTRUCTIBLE; variadic = true; break; case RID_IS_TRIVIALLY_COPYABLE: kind = CPTK_IS_TRIVIALLY_COPYABLE; break; case RID_IS_UNION: kind = CPTK_IS_UNION; break; case RID_UNDERLYING_TYPE: kind = CPTK_UNDERLYING_TYPE; break; case RID_BASES: kind = CPTK_BASES; break; case RID_DIRECT_BASES: kind = CPTK_DIRECT_BASES; break; default: gcc_unreachable (); } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); type1 = cp_parser_type_id (parser); if (type1 == error_mark_node) return error_mark_node; if (binary) { cp_parser_require (parser, CPP_COMMA, RT_COMMA); type2 = cp_parser_type_id (parser); if (type2 == error_mark_node) return error_mark_node; } else if (variadic) { while (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) { cp_lexer_consume_token (parser->lexer); tree elt = cp_parser_type_id (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { cp_lexer_consume_token (parser->lexer); elt = make_pack_expansion (elt); } if (elt == error_mark_node) return error_mark_node; type2 = tree_cons (NULL_TREE, elt, type2); } } cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Complete the trait expression, which may mean either processing the trait expr now or saving it for template instantiation. */ switch(kind) { case CPTK_UNDERLYING_TYPE: return finish_underlying_type (type1); case CPTK_BASES: return finish_bases (type1, false); case CPTK_DIRECT_BASES: return finish_bases (type1, true); default: return finish_trait_expr (kind, type1, type2); } } /* Lambdas that appear in variable initializer or default argument scope get that in their mangling, so we need to record it. We might as well use the count for function and namespace scopes as well. */ static GTY(()) tree lambda_scope; static GTY(()) int lambda_count; typedef struct GTY(()) tree_int { tree t; int i; } tree_int; static GTY(()) vec<tree_int, va_gc> *lambda_scope_stack; static void start_lambda_scope (tree decl) { tree_int ti; gcc_assert (decl); /* Once we're inside a function, we ignore other scopes and just push the function again so that popping works properly. */ if (current_function_decl && TREE_CODE (decl) != FUNCTION_DECL) decl = current_function_decl; ti.t = lambda_scope; ti.i = lambda_count; vec_safe_push (lambda_scope_stack, ti); if (lambda_scope != decl) { /* Don't reset the count if we're still in the same function. */ lambda_scope = decl; lambda_count = 0; } } static void record_lambda_scope (tree lambda) { LAMBDA_EXPR_EXTRA_SCOPE (lambda) = lambda_scope; LAMBDA_EXPR_DISCRIMINATOR (lambda) = lambda_count++; } static void finish_lambda_scope (void) { tree_int *p = &lambda_scope_stack->last (); if (lambda_scope != p->t) { lambda_scope = p->t; lambda_count = p->i; } lambda_scope_stack->pop (); } /* Parse a lambda expression. lambda-expression: lambda-introducer lambda-declarator [opt] compound-statement Returns a representation of the expression. */ static tree cp_parser_lambda_expression (cp_parser* parser) { tree lambda_expr = build_lambda_expr (); tree type; bool ok = true; cp_token *token = cp_lexer_peek_token (parser->lexer); cp_token_position start = 0; LAMBDA_EXPR_LOCATION (lambda_expr) = token->location; if (cp_unevaluated_operand) { if (!token->error_reported) { error_at (LAMBDA_EXPR_LOCATION (lambda_expr), "lambda-expression in unevaluated context"); token->error_reported = true; } ok = false; } else if (parser->in_template_argument_list_p) { if (!token->error_reported) { error_at (token->location, "lambda-expression in template-argument"); token->error_reported = true; } ok = false; } /* We may be in the middle of deferred access check. Disable it now. */ push_deferring_access_checks (dk_no_deferred); cp_parser_lambda_introducer (parser, lambda_expr); type = begin_lambda_type (lambda_expr); if (type == error_mark_node) return error_mark_node; record_lambda_scope (lambda_expr); /* Do this again now that LAMBDA_EXPR_EXTRA_SCOPE is set. */ determine_visibility (TYPE_NAME (type)); /* Now that we've started the type, add the capture fields for any explicit captures. */ register_capture_members (LAMBDA_EXPR_CAPTURE_LIST (lambda_expr)); { /* Inside the class, surrounding template-parameter-lists do not apply. */ unsigned int saved_num_template_parameter_lists = parser->num_template_parameter_lists; unsigned char in_statement = parser->in_statement; bool in_switch_statement_p = parser->in_switch_statement_p; bool fully_implicit_function_template_p = parser->fully_implicit_function_template_p; tree implicit_template_parms = parser->implicit_template_parms; cp_binding_level* implicit_template_scope = parser->implicit_template_scope; bool auto_is_implicit_function_template_parm_p = parser->auto_is_implicit_function_template_parm_p; parser->num_template_parameter_lists = 0; parser->in_statement = 0; parser->in_switch_statement_p = false; parser->fully_implicit_function_template_p = false; parser->implicit_template_parms = 0; parser->implicit_template_scope = 0; parser->auto_is_implicit_function_template_parm_p = false; /* By virtue of defining a local class, a lambda expression has access to the private variables of enclosing classes. */ ok &= cp_parser_lambda_declarator_opt (parser, lambda_expr); if (ok) { if (!cp_parser_error_occurred (parser) && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE) && cp_parser_start_tentative_firewall (parser)) start = token; cp_parser_lambda_body (parser, lambda_expr); } else if (cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE)) { if (cp_parser_skip_to_closing_brace (parser)) cp_lexer_consume_token (parser->lexer); } /* The capture list was built up in reverse order; fix that now. */ LAMBDA_EXPR_CAPTURE_LIST (lambda_expr) = nreverse (LAMBDA_EXPR_CAPTURE_LIST (lambda_expr)); if (ok) maybe_add_lambda_conv_op (type); type = finish_struct (type, /*attributes=*/NULL_TREE); parser->num_template_parameter_lists = saved_num_template_parameter_lists; parser->in_statement = in_statement; parser->in_switch_statement_p = in_switch_statement_p; parser->fully_implicit_function_template_p = fully_implicit_function_template_p; parser->implicit_template_parms = implicit_template_parms; parser->implicit_template_scope = implicit_template_scope; parser->auto_is_implicit_function_template_parm_p = auto_is_implicit_function_template_parm_p; } pop_deferring_access_checks (); /* This field is only used during parsing of the lambda. */ LAMBDA_EXPR_THIS_CAPTURE (lambda_expr) = NULL_TREE; /* This lambda shouldn't have any proxies left at this point. */ gcc_assert (LAMBDA_EXPR_PENDING_PROXIES (lambda_expr) == NULL); /* And now that we're done, push proxies for an enclosing lambda. */ insert_pending_capture_proxies (); if (ok) lambda_expr = build_lambda_object (lambda_expr); else lambda_expr = error_mark_node; cp_parser_end_tentative_firewall (parser, start, lambda_expr); return lambda_expr; } /* Parse the beginning of a lambda expression. lambda-introducer: [ lambda-capture [opt] ] LAMBDA_EXPR is the current representation of the lambda expression. */ static void cp_parser_lambda_introducer (cp_parser* parser, tree lambda_expr) { /* Need commas after the first capture. */ bool first = true; /* Eat the leading `['. */ cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE); /* Record default capture mode. "[&" "[=" "[&," "[=," */ if (cp_lexer_next_token_is (parser->lexer, CPP_AND) && cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_NAME) LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) = CPLD_REFERENCE; else if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) = CPLD_COPY; if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) != CPLD_NONE) { cp_lexer_consume_token (parser->lexer); first = false; } while (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_SQUARE)) { cp_token* capture_token; tree capture_id; tree capture_init_expr; cp_id_kind idk = CP_ID_KIND_NONE; bool explicit_init_p = false; enum capture_kind_type { BY_COPY, BY_REFERENCE }; enum capture_kind_type capture_kind = BY_COPY; if (cp_lexer_next_token_is (parser->lexer, CPP_EOF)) { error ("expected end of capture-list"); return; } if (first) first = false; else cp_parser_require (parser, CPP_COMMA, RT_COMMA); /* Possibly capture `this'. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_THIS)) { location_t loc = cp_lexer_peek_token (parser->lexer)->location; if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_COPY) pedwarn (loc, 0, "explicit by-copy capture of %<this%> redundant " "with by-copy capture default"); cp_lexer_consume_token (parser->lexer); add_capture (lambda_expr, /*id=*/this_identifier, /*initializer=*/finish_this_expr(), /*by_reference_p=*/false, explicit_init_p); continue; } /* Remember whether we want to capture as a reference or not. */ if (cp_lexer_next_token_is (parser->lexer, CPP_AND)) { capture_kind = BY_REFERENCE; cp_lexer_consume_token (parser->lexer); } /* Get the identifier. */ capture_token = cp_lexer_peek_token (parser->lexer); capture_id = cp_parser_identifier (parser); if (capture_id == error_mark_node) /* Would be nice to have a cp_parser_skip_to_closing_x for general delimiters, but I modified this to stop on unnested ']' as well. It was already changed to stop on unnested '}', so the "closing_parenthesis" name is no more misleading with my change. */ { cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/true, /*consume_paren=*/true); break; } /* Find the initializer for this capture. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ) || cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN) || cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { bool direct, non_constant; /* An explicit initializer exists. */ if (cxx_dialect < cxx14) pedwarn (input_location, 0, "lambda capture initializers " "only available with -std=c++14 or -std=gnu++14"); capture_init_expr = cp_parser_initializer (parser, &direct, &non_constant); explicit_init_p = true; if (capture_init_expr == NULL_TREE) { error ("empty initializer for lambda init-capture"); capture_init_expr = error_mark_node; } } else { const char* error_msg; /* Turn the identifier into an id-expression. */ capture_init_expr = cp_parser_lookup_name_simple (parser, capture_id, capture_token->location); if (capture_init_expr == error_mark_node) { unqualified_name_lookup_error (capture_id); continue; } else if (DECL_P (capture_init_expr) && (!VAR_P (capture_init_expr) && TREE_CODE (capture_init_expr) != PARM_DECL)) { error_at (capture_token->location, "capture of non-variable %qD ", capture_init_expr); inform (0, "%q+#D declared here", capture_init_expr); continue; } if (VAR_P (capture_init_expr) && decl_storage_duration (capture_init_expr) != dk_auto) { if (pedwarn (capture_token->location, 0, "capture of variable " "%qD with non-automatic storage duration", capture_init_expr)) inform (0, "%q+#D declared here", capture_init_expr); continue; } capture_init_expr = finish_id_expression (capture_id, capture_init_expr, parser->scope, &idk, /*integral_constant_expression_p=*/false, /*allow_non_integral_constant_expression_p=*/false, /*non_integral_constant_expression_p=*/NULL, /*template_p=*/false, /*done=*/true, /*address_p=*/false, /*template_arg_p=*/false, &error_msg, capture_token->location); if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { cp_lexer_consume_token (parser->lexer); capture_init_expr = make_pack_expansion (capture_init_expr); } else check_for_bare_parameter_packs (capture_init_expr); } if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) != CPLD_NONE && !explicit_init_p) { if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_COPY && capture_kind == BY_COPY) pedwarn (capture_token->location, 0, "explicit by-copy capture " "of %qD redundant with by-copy capture default", capture_id); if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_REFERENCE && capture_kind == BY_REFERENCE) pedwarn (capture_token->location, 0, "explicit by-reference " "capture of %qD redundant with by-reference capture " "default", capture_id); } add_capture (lambda_expr, capture_id, capture_init_expr, /*by_reference_p=*/capture_kind == BY_REFERENCE, explicit_init_p); } cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); } /* Parse the (optional) middle of a lambda expression. lambda-declarator: < template-parameter-list [opt] > ( parameter-declaration-clause [opt] ) attribute-specifier [opt] mutable [opt] exception-specification [opt] lambda-return-type-clause [opt] LAMBDA_EXPR is the current representation of the lambda expression. */ static bool cp_parser_lambda_declarator_opt (cp_parser* parser, tree lambda_expr) { /* 5.1.1.4 of the standard says: If a lambda-expression does not include a lambda-declarator, it is as if the lambda-declarator were (). This means an empty parameter list, no attributes, and no exception specification. */ tree param_list = void_list_node; tree attributes = NULL_TREE; tree exception_spec = NULL_TREE; tree template_param_list = NULL_TREE; /* The template-parameter-list is optional, but must begin with an opening angle if present. */ if (cp_lexer_next_token_is (parser->lexer, CPP_LESS)) { if (cxx_dialect < cxx14) pedwarn (parser->lexer->next_token->location, 0, "lambda templates are only available with " "-std=c++14 or -std=gnu++14"); cp_lexer_consume_token (parser->lexer); template_param_list = cp_parser_template_parameter_list (parser); cp_parser_skip_to_end_of_template_parameter_list (parser); /* We just processed one more parameter list. */ ++parser->num_template_parameter_lists; } /* The parameter-declaration-clause is optional (unless template-parameter-list was given), but must begin with an opening parenthesis if present. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { cp_lexer_consume_token (parser->lexer); begin_scope (sk_function_parms, /*entity=*/NULL_TREE); /* Parse parameters. */ param_list = cp_parser_parameter_declaration_clause (parser); /* Default arguments shall not be specified in the parameter-declaration-clause of a lambda-declarator. */ for (tree t = param_list; t; t = TREE_CHAIN (t)) if (TREE_PURPOSE (t) && cxx_dialect < cxx14) pedwarn (DECL_SOURCE_LOCATION (TREE_VALUE (t)), OPT_Wpedantic, "default argument specified for lambda parameter"); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); attributes = cp_parser_attributes_opt (parser); /* Parse optional `mutable' keyword. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_MUTABLE)) { cp_lexer_consume_token (parser->lexer); LAMBDA_EXPR_MUTABLE_P (lambda_expr) = 1; } /* Parse optional exception specification. */ exception_spec = cp_parser_exception_specification_opt (parser); /* Parse optional trailing return type. */ if (cp_lexer_next_token_is (parser->lexer, CPP_DEREF)) { cp_lexer_consume_token (parser->lexer); LAMBDA_EXPR_RETURN_TYPE (lambda_expr) = cp_parser_trailing_type_id (parser); } /* The function parameters must be in scope all the way until after the trailing-return-type in case of decltype. */ pop_bindings_and_leave_scope (); } else if (template_param_list != NULL_TREE) // generate diagnostic cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Create the function call operator. Messing with declarators like this is no uglier than building up the FUNCTION_DECL by hand, and this is less likely to get out of sync with other code. */ { cp_decl_specifier_seq return_type_specs; cp_declarator* declarator; tree fco; int quals; void *p; clear_decl_specs (&return_type_specs); if (LAMBDA_EXPR_RETURN_TYPE (lambda_expr)) return_type_specs.type = LAMBDA_EXPR_RETURN_TYPE (lambda_expr); else /* Maybe we will deduce the return type later. */ return_type_specs.type = make_auto (); p = obstack_alloc (&declarator_obstack, 0); declarator = make_id_declarator (NULL_TREE, ansi_opname (CALL_EXPR), sfk_none); quals = (LAMBDA_EXPR_MUTABLE_P (lambda_expr) ? TYPE_UNQUALIFIED : TYPE_QUAL_CONST); declarator = make_call_declarator (declarator, param_list, quals, VIRT_SPEC_UNSPECIFIED, REF_QUAL_NONE, exception_spec, /*late_return_type=*/NULL_TREE); declarator->id_loc = LAMBDA_EXPR_LOCATION (lambda_expr); fco = grokmethod (&return_type_specs, declarator, attributes); if (fco != error_mark_node) { DECL_INITIALIZED_IN_CLASS_P (fco) = 1; DECL_ARTIFICIAL (fco) = 1; /* Give the object parameter a different name. */ DECL_NAME (DECL_ARGUMENTS (fco)) = get_identifier ("__closure"); if (LAMBDA_EXPR_RETURN_TYPE (lambda_expr)) TYPE_HAS_LATE_RETURN_TYPE (TREE_TYPE (fco)) = 1; } if (template_param_list) { fco = finish_member_template_decl (fco); finish_template_decl (template_param_list); --parser->num_template_parameter_lists; } else if (parser->fully_implicit_function_template_p) fco = finish_fully_implicit_template (parser, fco); finish_member_declaration (fco); obstack_free (&declarator_obstack, p); return (fco != error_mark_node); } } /* Parse the body of a lambda expression, which is simply compound-statement but which requires special handling. LAMBDA_EXPR is the current representation of the lambda expression. */ static void cp_parser_lambda_body (cp_parser* parser, tree lambda_expr) { bool nested = (current_function_decl != NULL_TREE); bool local_variables_forbidden_p = parser->local_variables_forbidden_p; if (nested) push_function_context (); else /* Still increment function_depth so that we don't GC in the middle of an expression. */ ++function_depth; /* Clear this in case we're in the middle of a default argument. */ parser->local_variables_forbidden_p = false; /* Finish the function call operator - class_specifier + late_parsing_for_member + function_definition_after_declarator + ctor_initializer_opt_and_function_body */ { tree fco = lambda_function (lambda_expr); tree body; bool done = false; tree compound_stmt; tree cap; /* Let the front end know that we are going to be defining this function. */ start_preparsed_function (fco, NULL_TREE, SF_PRE_PARSED | SF_INCLASS_INLINE); start_lambda_scope (fco); body = begin_function_body (); if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE)) goto out; /* Push the proxies for any explicit captures. */ for (cap = LAMBDA_EXPR_CAPTURE_LIST (lambda_expr); cap; cap = TREE_CHAIN (cap)) build_capture_proxy (TREE_PURPOSE (cap)); compound_stmt = begin_compound_stmt (0); /* 5.1.1.4 of the standard says: If a lambda-expression does not include a trailing-return-type, it is as if the trailing-return-type denotes the following type: * if the compound-statement is of the form { return attribute-specifier [opt] expression ; } the type of the returned expression after lvalue-to-rvalue conversion (_conv.lval_ 4.1), array-to-pointer conversion (_conv.array_ 4.2), and function-to-pointer conversion (_conv.func_ 4.3); * otherwise, void. */ /* In a lambda that has neither a lambda-return-type-clause nor a deducible form, errors should be reported for return statements in the body. Since we used void as the placeholder return type, parsing the body as usual will give such desired behavior. */ if (!LAMBDA_EXPR_RETURN_TYPE (lambda_expr) && cp_lexer_peek_nth_token (parser->lexer, 1)->keyword == RID_RETURN && cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SEMICOLON) { tree expr = NULL_TREE; cp_id_kind idk = CP_ID_KIND_NONE; /* Parse tentatively in case there's more after the initial return statement. */ cp_parser_parse_tentatively (parser); cp_parser_require_keyword (parser, RID_RETURN, RT_RETURN); expr = cp_parser_expression (parser, &idk); cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); if (cp_parser_parse_definitely (parser)) { if (!processing_template_decl) apply_deduced_return_type (fco, lambda_return_type (expr)); /* Will get error here if type not deduced yet. */ finish_return_stmt (expr); done = true; } } if (!done) { while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL)) cp_parser_label_declaration (parser); cp_parser_statement_seq_opt (parser, NULL_TREE); cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); } finish_compound_stmt (compound_stmt); out: finish_function_body (body); finish_lambda_scope (); /* Finish the function and generate code for it if necessary. */ tree fn = finish_function (/*inline*/2); /* Only expand if the call op is not a template. */ if (!DECL_TEMPLATE_INFO (fco)) expand_or_defer_fn (fn); } parser->local_variables_forbidden_p = local_variables_forbidden_p; if (nested) pop_function_context(); else --function_depth; } /* Statements [gram.stmt.stmt] */ /* Parse a statement. statement: labeled-statement expression-statement compound-statement selection-statement iteration-statement jump-statement declaration-statement try-block C++11: statement: labeled-statement attribute-specifier-seq (opt) expression-statement attribute-specifier-seq (opt) compound-statement attribute-specifier-seq (opt) selection-statement attribute-specifier-seq (opt) iteration-statement attribute-specifier-seq (opt) jump-statement declaration-statement attribute-specifier-seq (opt) try-block TM Extension: statement: atomic-statement IN_COMPOUND is true when the statement is nested inside a cp_parser_compound_statement; this matters for certain pragmas. If IF_P is not NULL, *IF_P is set to indicate whether the statement is a (possibly labeled) if statement which is not enclosed in braces and has an else clause. This is used to implement -Wparentheses. */ static void cp_parser_statement (cp_parser* parser, tree in_statement_expr, bool in_compound, bool *if_p) { tree statement, std_attrs = NULL_TREE; cp_token *token; location_t statement_location, attrs_location; restart: if (if_p != NULL) *if_p = false; /* There is no statement yet. */ statement = NULL_TREE; saved_token_sentinel saved_tokens (parser->lexer); attrs_location = cp_lexer_peek_token (parser->lexer)->location; if (c_dialect_objc ()) /* In obj-c++, seeing '[[' might be the either the beginning of c++11 attributes, or a nested objc-message-expression. So let's parse the c++11 attributes tentatively. */ cp_parser_parse_tentatively (parser); std_attrs = cp_parser_std_attribute_spec_seq (parser); if (c_dialect_objc ()) { if (!cp_parser_parse_definitely (parser)) std_attrs = NULL_TREE; } /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Remember the location of the first token in the statement. */ statement_location = token->location; /* If this is a keyword, then that will often determine what kind of statement we have. */ if (token->type == CPP_KEYWORD) { enum rid keyword = token->keyword; switch (keyword) { case RID_CASE: case RID_DEFAULT: /* Looks like a labeled-statement with a case label. Parse the label, and then use tail recursion to parse the statement. */ cp_parser_label_for_labeled_statement (parser, std_attrs); goto restart; case RID_IF: case RID_SWITCH: statement = cp_parser_selection_statement (parser, if_p); break; case RID_WHILE: case RID_DO: case RID_FOR: statement = cp_parser_iteration_statement (parser, false); break; case RID_CILK_FOR: if (!flag_cilkplus) { error_at (cp_lexer_peek_token (parser->lexer)->location, "-fcilkplus must be enabled to use %<_Cilk_for%>"); cp_lexer_consume_token (parser->lexer); statement = error_mark_node; } else statement = cp_parser_cilk_for (parser, integer_zero_node); break; case RID_BREAK: case RID_CONTINUE: case RID_RETURN: case RID_GOTO: statement = cp_parser_jump_statement (parser); break; case RID_CILK_SYNC: cp_lexer_consume_token (parser->lexer); if (flag_cilkplus) { tree sync_expr = build_cilk_sync (); SET_EXPR_LOCATION (sync_expr, token->location); statement = finish_expr_stmt (sync_expr); } else { error_at (token->location, "-fcilkplus must be enabled to use" " %<_Cilk_sync%>"); statement = error_mark_node; } cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); break; /* Objective-C++ exception-handling constructs. */ case RID_AT_TRY: case RID_AT_CATCH: case RID_AT_FINALLY: case RID_AT_SYNCHRONIZED: case RID_AT_THROW: statement = cp_parser_objc_statement (parser); break; case RID_TRY: statement = cp_parser_try_block (parser); break; case RID_NAMESPACE: /* This must be a namespace alias definition. */ cp_parser_declaration_statement (parser); return; case RID_TRANSACTION_ATOMIC: case RID_TRANSACTION_RELAXED: statement = cp_parser_transaction (parser, keyword); break; case RID_TRANSACTION_CANCEL: statement = cp_parser_transaction_cancel (parser); break; default: /* It might be a keyword like `int' that can start a declaration-statement. */ break; } } else if (token->type == CPP_NAME) { /* If the next token is a `:', then we are looking at a labeled-statement. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); if (token->type == CPP_COLON) { /* Looks like a labeled-statement with an ordinary label. Parse the label, and then use tail recursion to parse the statement. */ cp_parser_label_for_labeled_statement (parser, std_attrs); goto restart; } } /* Anything that starts with a `{' must be a compound-statement. */ else if (token->type == CPP_OPEN_BRACE) statement = cp_parser_compound_statement (parser, NULL, false, false); /* CPP_PRAGMA is a #pragma inside a function body, which constitutes a statement all its own. */ else if (token->type == CPP_PRAGMA) { /* Only certain OpenMP pragmas are attached to statements, and thus are considered statements themselves. All others are not. In the context of a compound, accept the pragma as a "statement" and return so that we can check for a close brace. Otherwise we require a real statement and must go back and read one. */ if (in_compound) cp_parser_pragma (parser, pragma_compound); else if (!cp_parser_pragma (parser, pragma_stmt)) goto restart; return; } else if (token->type == CPP_EOF) { cp_parser_error (parser, "expected statement"); return; } /* Everything else must be a declaration-statement or an expression-statement. Try for the declaration-statement first, unless we are looking at a `;', in which case we know that we have an expression-statement. */ if (!statement) { if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { if (std_attrs != NULL_TREE) { /* Attributes should be parsed as part of the the declaration, so let's un-parse them. */ saved_tokens.rollback(); std_attrs = NULL_TREE; } cp_parser_parse_tentatively (parser); /* Try to parse the declaration-statement. */ cp_parser_declaration_statement (parser); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) return; } /* Look for an expression-statement instead. */ statement = cp_parser_expression_statement (parser, in_statement_expr); } /* Set the line number for the statement. */ if (statement && STATEMENT_CODE_P (TREE_CODE (statement))) SET_EXPR_LOCATION (statement, statement_location); /* Note that for now, we don't do anything with c++11 statements parsed at this level. */ if (std_attrs != NULL_TREE) warning_at (attrs_location, OPT_Wattributes, "attributes at the beginning of statement are ignored"); } /* Parse the label for a labeled-statement, i.e. identifier : case constant-expression : default : GNU Extension: case constant-expression ... constant-expression : statement When a label is parsed without errors, the label is added to the parse tree by the finish_* functions, so this function doesn't have to return the label. */ static void cp_parser_label_for_labeled_statement (cp_parser* parser, tree attributes) { cp_token *token; tree label = NULL_TREE; bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; /* The next token should be an identifier. */ token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_NAME && token->type != CPP_KEYWORD) { cp_parser_error (parser, "expected labeled-statement"); return; } parser->colon_corrects_to_scope_p = false; switch (token->keyword) { case RID_CASE: { tree expr, expr_hi; cp_token *ellipsis; /* Consume the `case' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the constant-expression. */ expr = cp_parser_constant_expression (parser); if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; ellipsis = cp_lexer_peek_token (parser->lexer); if (ellipsis->type == CPP_ELLIPSIS) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); expr_hi = cp_parser_constant_expression (parser); if (check_for_bare_parameter_packs (expr_hi)) expr_hi = error_mark_node; /* We don't need to emit warnings here, as the common code will do this for us. */ } else expr_hi = NULL_TREE; if (parser->in_switch_statement_p) finish_case_label (token->location, expr, expr_hi); else error_at (token->location, "case label %qE not within a switch statement", expr); } break; case RID_DEFAULT: /* Consume the `default' token. */ cp_lexer_consume_token (parser->lexer); if (parser->in_switch_statement_p) finish_case_label (token->location, NULL_TREE, NULL_TREE); else error_at (token->location, "case label not within a switch statement"); break; default: /* Anything else must be an ordinary label. */ label = finish_label_stmt (cp_parser_identifier (parser)); break; } /* Require the `:' token. */ cp_parser_require (parser, CPP_COLON, RT_COLON); /* An ordinary label may optionally be followed by attributes. However, this is only permitted if the attributes are then followed by a semicolon. This is because, for backward compatibility, when parsing lab: __attribute__ ((unused)) int i; we want the attribute to attach to "i", not "lab". */ if (label != NULL_TREE && cp_next_tokens_can_be_gnu_attribute_p (parser)) { tree attrs; cp_parser_parse_tentatively (parser); attrs = cp_parser_gnu_attributes_opt (parser); if (attrs == NULL_TREE || cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) cp_parser_abort_tentative_parse (parser); else if (!cp_parser_parse_definitely (parser)) ; else attributes = chainon (attributes, attrs); } if (attributes != NULL_TREE) cplus_decl_attributes (&label, attributes, 0); parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; } /* Parse an expression-statement. expression-statement: expression [opt] ; Returns the new EXPR_STMT -- or NULL_TREE if the expression statement consists of nothing more than an `;'. IN_STATEMENT_EXPR_P indicates whether this expression-statement is part of an expression statement. */ static tree cp_parser_expression_statement (cp_parser* parser, tree in_statement_expr) { tree statement = NULL_TREE; cp_token *token = cp_lexer_peek_token (parser->lexer); /* If the next token is a ';', then there is no expression statement. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { statement = cp_parser_expression (parser); if (statement == error_mark_node && !cp_parser_uncommitted_to_tentative_parse_p (parser)) { cp_parser_skip_to_end_of_block_or_statement (parser); return error_mark_node; } } /* Give a helpful message for "A<T>::type t;" and the like. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON) && !cp_parser_uncommitted_to_tentative_parse_p (parser)) { if (TREE_CODE (statement) == SCOPE_REF) error_at (token->location, "need %<typename%> before %qE because " "%qT is a dependent scope", statement, TREE_OPERAND (statement, 0)); else if (is_overloaded_fn (statement) && DECL_CONSTRUCTOR_P (get_first_fn (statement))) { /* A::A a; */ tree fn = get_first_fn (statement); error_at (token->location, "%<%T::%D%> names the constructor, not the type", DECL_CONTEXT (fn), DECL_NAME (fn)); } } /* Consume the final `;'. */ cp_parser_consume_semicolon_at_end_of_statement (parser); if (in_statement_expr && cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) /* This is the final expression statement of a statement expression. */ statement = finish_stmt_expr_expr (statement, in_statement_expr); else if (statement) statement = finish_expr_stmt (statement); return statement; } /* Parse a compound-statement. compound-statement: { statement-seq [opt] } GNU extension: compound-statement: { label-declaration-seq [opt] statement-seq [opt] } label-declaration-seq: label-declaration label-declaration-seq label-declaration Returns a tree representing the statement. */ static tree cp_parser_compound_statement (cp_parser *parser, tree in_statement_expr, bool in_try, bool function_body) { tree compound_stmt; /* Consume the `{'. */ if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE)) return error_mark_node; if (DECL_DECLARED_CONSTEXPR_P (current_function_decl) && !function_body && cxx_dialect < cxx14) pedwarn (input_location, OPT_Wpedantic, "compound-statement in constexpr function"); /* Begin the compound-statement. */ compound_stmt = begin_compound_stmt (in_try ? BCS_TRY_BLOCK : 0); /* If the next keyword is `__label__' we have a label declaration. */ while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL)) cp_parser_label_declaration (parser); /* Parse an (optional) statement-seq. */ cp_parser_statement_seq_opt (parser, in_statement_expr); /* Finish the compound-statement. */ finish_compound_stmt (compound_stmt); /* Consume the `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); return compound_stmt; } /* Parse an (optional) statement-seq. statement-seq: statement statement-seq [opt] statement */ static void cp_parser_statement_seq_opt (cp_parser* parser, tree in_statement_expr) { /* Scan statements until there aren't any more. */ while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); /* If we are looking at a `}', then we have run out of statements; the same is true if we have reached the end of file, or have stumbled upon a stray '@end'. */ if (token->type == CPP_CLOSE_BRACE || token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL || (token->type == CPP_KEYWORD && token->keyword == RID_AT_END)) break; /* If we are in a compound statement and find 'else' then something went wrong. */ else if (token->type == CPP_KEYWORD && token->keyword == RID_ELSE) { if (parser->in_statement & IN_IF_STMT) break; else { token = cp_lexer_consume_token (parser->lexer); error_at (token->location, "%<else%> without a previous %<if%>"); } } /* Parse the statement. */ cp_parser_statement (parser, in_statement_expr, true, NULL); } } /* Parse a selection-statement. selection-statement: if ( condition ) statement if ( condition ) statement else statement switch ( condition ) statement Returns the new IF_STMT or SWITCH_STMT. If IF_P is not NULL, *IF_P is set to indicate whether the statement is a (possibly labeled) if statement which is not enclosed in braces and has an else clause. This is used to implement -Wparentheses. */ static tree cp_parser_selection_statement (cp_parser* parser, bool *if_p) { cp_token *token; enum rid keyword; if (if_p != NULL) *if_p = false; /* Peek at the next token. */ token = cp_parser_require (parser, CPP_KEYWORD, RT_SELECT); /* See what kind of keyword it is. */ keyword = token->keyword; switch (keyword) { case RID_IF: case RID_SWITCH: { tree statement; tree condition; /* Look for the `('. */ if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) { cp_parser_skip_to_end_of_statement (parser); return error_mark_node; } /* Begin the selection-statement. */ if (keyword == RID_IF) statement = begin_if_stmt (); else statement = begin_switch_stmt (); /* Parse the condition. */ condition = cp_parser_condition (parser); /* Look for the `)'. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, true, false, /*consume_paren=*/true); if (keyword == RID_IF) { bool nested_if; unsigned char in_statement; /* Add the condition. */ finish_if_stmt_cond (condition, statement); /* Parse the then-clause. */ in_statement = parser->in_statement; parser->in_statement |= IN_IF_STMT; if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { location_t loc = cp_lexer_peek_token (parser->lexer)->location; add_stmt (build_empty_stmt (loc)); cp_lexer_consume_token (parser->lexer); if (!cp_lexer_next_token_is_keyword (parser->lexer, RID_ELSE)) warning_at (loc, OPT_Wempty_body, "suggest braces around " "empty body in an %<if%> statement"); nested_if = false; } else cp_parser_implicitly_scoped_statement (parser, &nested_if); parser->in_statement = in_statement; finish_then_clause (statement); /* If the next token is `else', parse the else-clause. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ELSE)) { /* Consume the `else' keyword. */ cp_lexer_consume_token (parser->lexer); begin_else_clause (statement); /* Parse the else-clause. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { location_t loc; loc = cp_lexer_peek_token (parser->lexer)->location; warning_at (loc, OPT_Wempty_body, "suggest braces around " "empty body in an %<else%> statement"); add_stmt (build_empty_stmt (loc)); cp_lexer_consume_token (parser->lexer); } else cp_parser_implicitly_scoped_statement (parser, NULL); finish_else_clause (statement); /* If we are currently parsing a then-clause, then IF_P will not be NULL. We set it to true to indicate that this if statement has an else clause. This may trigger the Wparentheses warning below when we get back up to the parent if statement. */ if (if_p != NULL) *if_p = true; } else { /* This if statement does not have an else clause. If NESTED_IF is true, then the then-clause is an if statement which does have an else clause. We warn about the potential ambiguity. */ if (nested_if) warning_at (EXPR_LOCATION (statement), OPT_Wparentheses, "suggest explicit braces to avoid ambiguous" " %<else%>"); } /* Now we're all done with the if-statement. */ finish_if_stmt (statement); } else { bool in_switch_statement_p; unsigned char in_statement; /* Add the condition. */ finish_switch_cond (condition, statement); /* Parse the body of the switch-statement. */ in_switch_statement_p = parser->in_switch_statement_p; in_statement = parser->in_statement; parser->in_switch_statement_p = true; parser->in_statement |= IN_SWITCH_STMT; cp_parser_implicitly_scoped_statement (parser, NULL); parser->in_switch_statement_p = in_switch_statement_p; parser->in_statement = in_statement; /* Now we're all done with the switch-statement. */ finish_switch_stmt (statement); } return statement; } break; default: cp_parser_error (parser, "expected selection-statement"); return error_mark_node; } } /* Parse a condition. condition: expression type-specifier-seq declarator = initializer-clause type-specifier-seq declarator braced-init-list GNU Extension: condition: type-specifier-seq declarator asm-specification [opt] attributes [opt] = assignment-expression Returns the expression that should be tested. */ static tree cp_parser_condition (cp_parser* parser) { cp_decl_specifier_seq type_specifiers; const char *saved_message; int declares_class_or_enum; /* Try the declaration first. */ cp_parser_parse_tentatively (parser); /* New types are not allowed in the type-specifier-seq for a condition. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in conditions"); /* Parse the type-specifier-seq. */ cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR, &type_specifiers, &declares_class_or_enum); /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; /* If all is well, we might be looking at a declaration. */ if (!cp_parser_error_occurred (parser)) { tree decl; tree asm_specification; tree attributes; cp_declarator *declarator; tree initializer = NULL_TREE; /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false, /*friend_p=*/false); /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); /* Parse the asm-specification. */ asm_specification = cp_parser_asm_specification_opt (parser); /* If the next token is not an `=' or '{', then we might still be looking at an expression. For example: if (A(a).x) looks like a decl-specifier-seq and a declarator -- but then there is no `=', so this is an expression. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ) && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)) cp_parser_simulate_error (parser); /* If we did see an `=' or '{', then we are looking at a declaration for sure. */ if (cp_parser_parse_definitely (parser)) { tree pushed_scope; bool non_constant_p; bool flags = LOOKUP_ONLYCONVERTING; /* Create the declaration. */ decl = start_decl (declarator, &type_specifiers, /*initialized_p=*/true, attributes, /*prefix_attributes=*/NULL_TREE, &pushed_scope); /* Parse the initializer. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { initializer = cp_parser_braced_list (parser, &non_constant_p); CONSTRUCTOR_IS_DIRECT_INIT (initializer) = 1; flags = 0; } else { /* Consume the `='. */ cp_parser_require (parser, CPP_EQ, RT_EQ); initializer = cp_parser_initializer_clause (parser, &non_constant_p); } if (BRACE_ENCLOSED_INITIALIZER_P (initializer)) maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); /* Process the initializer. */ cp_finish_decl (decl, initializer, !non_constant_p, asm_specification, flags); if (pushed_scope) pop_scope (pushed_scope); return convert_from_reference (decl); } } /* If we didn't even get past the declarator successfully, we are definitely not looking at a declaration. */ else cp_parser_abort_tentative_parse (parser); /* Otherwise, we are looking at an expression. */ return cp_parser_expression (parser); } /* Parses a for-statement or range-for-statement until the closing ')', not included. */ static tree cp_parser_for (cp_parser *parser, bool ivdep) { tree init, scope, decl; bool is_range_for; /* Begin the for-statement. */ scope = begin_for_scope (&init); /* Parse the initialization. */ is_range_for = cp_parser_for_init_statement (parser, &decl); if (is_range_for) return cp_parser_range_for (parser, scope, init, decl, ivdep); else return cp_parser_c_for (parser, scope, init, ivdep); } static tree cp_parser_c_for (cp_parser *parser, tree scope, tree init, bool ivdep) { /* Normal for loop */ tree condition = NULL_TREE; tree expression = NULL_TREE; tree stmt; stmt = begin_for_stmt (scope, init); /* The for-init-statement has already been parsed in cp_parser_for_init_statement, so no work is needed here. */ finish_for_init_stmt (stmt); /* If there's a condition, process it. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) condition = cp_parser_condition (parser); else if (ivdep) { cp_parser_error (parser, "missing loop condition in loop with " "%<GCC ivdep%> pragma"); condition = error_mark_node; } finish_for_cond (condition, stmt, ivdep); /* Look for the `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); /* If there's an expression, process it. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) expression = cp_parser_expression (parser); finish_for_expr (expression, stmt); return stmt; } /* Tries to parse a range-based for-statement: range-based-for: decl-specifier-seq declarator : expression The decl-specifier-seq declarator and the `:' are already parsed by cp_parser_for_init_statement. If processing_template_decl it returns a newly created RANGE_FOR_STMT; if not, it is converted to a regular FOR_STMT. */ static tree cp_parser_range_for (cp_parser *parser, tree scope, tree init, tree range_decl, bool ivdep) { tree stmt, range_expr; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { bool expr_non_constant_p; range_expr = cp_parser_braced_list (parser, &expr_non_constant_p); } else range_expr = cp_parser_expression (parser); /* If in template, STMT is converted to a normal for-statement at instantiation. If not, it is done just ahead. */ if (processing_template_decl) { if (check_for_bare_parameter_packs (range_expr)) range_expr = error_mark_node; stmt = begin_range_for_stmt (scope, init); if (ivdep) RANGE_FOR_IVDEP (stmt) = 1; finish_range_for_decl (stmt, range_decl, range_expr); if (!type_dependent_expression_p (range_expr) /* do_auto_deduction doesn't mess with template init-lists. */ && !BRACE_ENCLOSED_INITIALIZER_P (range_expr)) do_range_for_auto_deduction (range_decl, range_expr); } else { stmt = begin_for_stmt (scope, init); stmt = cp_convert_range_for (stmt, range_decl, range_expr, ivdep); } return stmt; } /* Subroutine of cp_convert_range_for: given the initializer expression, builds up the range temporary. */ static tree build_range_temp (tree range_expr) { tree range_type, range_temp; /* Find out the type deduced by the declaration `auto &&__range = range_expr'. */ range_type = cp_build_reference_type (make_auto (), true); range_type = do_auto_deduction (range_type, range_expr, type_uses_auto (range_type)); /* Create the __range variable. */ range_temp = build_decl (input_location, VAR_DECL, get_identifier ("__for_range"), range_type); TREE_USED (range_temp) = 1; DECL_ARTIFICIAL (range_temp) = 1; return range_temp; } /* Used by cp_parser_range_for in template context: we aren't going to do a full conversion yet, but we still need to resolve auto in the type of the for-range-declaration if present. This is basically a shortcut version of cp_convert_range_for. */ static void do_range_for_auto_deduction (tree decl, tree range_expr) { tree auto_node = type_uses_auto (TREE_TYPE (decl)); if (auto_node) { tree begin_dummy, end_dummy, range_temp, iter_type, iter_decl; range_temp = convert_from_reference (build_range_temp (range_expr)); iter_type = (cp_parser_perform_range_for_lookup (range_temp, &begin_dummy, &end_dummy)); if (iter_type) { iter_decl = build_decl (input_location, VAR_DECL, NULL_TREE, iter_type); iter_decl = build_x_indirect_ref (input_location, iter_decl, RO_NULL, tf_warning_or_error); TREE_TYPE (decl) = do_auto_deduction (TREE_TYPE (decl), iter_decl, auto_node); } } } /* Converts a range-based for-statement into a normal for-statement, as per the definition. for (RANGE_DECL : RANGE_EXPR) BLOCK should be equivalent to: { auto &&__range = RANGE_EXPR; for (auto __begin = BEGIN_EXPR, end = END_EXPR; __begin != __end; ++__begin) { RANGE_DECL = *__begin; BLOCK } } If RANGE_EXPR is an array: BEGIN_EXPR = __range END_EXPR = __range + ARRAY_SIZE(__range) Else if RANGE_EXPR has a member 'begin' or 'end': BEGIN_EXPR = __range.begin() END_EXPR = __range.end() Else: BEGIN_EXPR = begin(__range) END_EXPR = end(__range); If __range has a member 'begin' but not 'end', or vice versa, we must still use the second alternative (it will surely fail, however). When calling begin()/end() in the third alternative we must use argument dependent lookup, but always considering 'std' as an associated namespace. */ tree cp_convert_range_for (tree statement, tree range_decl, tree range_expr, bool ivdep) { tree begin, end; tree iter_type, begin_expr, end_expr; tree condition, expression; if (range_decl == error_mark_node || range_expr == error_mark_node) /* If an error happened previously do nothing or else a lot of unhelpful errors would be issued. */ begin_expr = end_expr = iter_type = error_mark_node; else { tree range_temp; if (TREE_CODE (range_expr) == VAR_DECL && array_of_runtime_bound_p (TREE_TYPE (range_expr))) /* Can't bind a reference to an array of runtime bound. */ range_temp = range_expr; else { range_temp = build_range_temp (range_expr); pushdecl (range_temp); cp_finish_decl (range_temp, range_expr, /*is_constant_init*/false, NULL_TREE, LOOKUP_ONLYCONVERTING); range_temp = convert_from_reference (range_temp); } iter_type = cp_parser_perform_range_for_lookup (range_temp, &begin_expr, &end_expr); } /* The new for initialization statement. */ begin = build_decl (input_location, VAR_DECL, get_identifier ("__for_begin"), iter_type); TREE_USED (begin) = 1; DECL_ARTIFICIAL (begin) = 1; pushdecl (begin); cp_finish_decl (begin, begin_expr, /*is_constant_init*/false, NULL_TREE, LOOKUP_ONLYCONVERTING); end = build_decl (input_location, VAR_DECL, get_identifier ("__for_end"), iter_type); TREE_USED (end) = 1; DECL_ARTIFICIAL (end) = 1; pushdecl (end); cp_finish_decl (end, end_expr, /*is_constant_init*/false, NULL_TREE, LOOKUP_ONLYCONVERTING); finish_for_init_stmt (statement); /* The new for condition. */ condition = build_x_binary_op (input_location, NE_EXPR, begin, ERROR_MARK, end, ERROR_MARK, NULL, tf_warning_or_error); finish_for_cond (condition, statement, ivdep); /* The new increment expression. */ expression = finish_unary_op_expr (input_location, PREINCREMENT_EXPR, begin, tf_warning_or_error); finish_for_expr (expression, statement); /* The declaration is initialized with *__begin inside the loop body. */ cp_finish_decl (range_decl, build_x_indirect_ref (input_location, begin, RO_NULL, tf_warning_or_error), /*is_constant_init*/false, NULL_TREE, LOOKUP_ONLYCONVERTING); return statement; } /* Solves BEGIN_EXPR and END_EXPR as described in cp_convert_range_for. We need to solve both at the same time because the method used depends on the existence of members begin or end. Returns the type deduced for the iterator expression. */ static tree cp_parser_perform_range_for_lookup (tree range, tree *begin, tree *end) { if (error_operand_p (range)) { *begin = *end = error_mark_node; return error_mark_node; } if (!COMPLETE_TYPE_P (complete_type (TREE_TYPE (range)))) { error ("range-based %<for%> expression of type %qT " "has incomplete type", TREE_TYPE (range)); *begin = *end = error_mark_node; return error_mark_node; } if (TREE_CODE (TREE_TYPE (range)) == ARRAY_TYPE) { /* If RANGE is an array, we will use pointer arithmetic. */ *begin = range; *end = build_binary_op (input_location, PLUS_EXPR, range, array_type_nelts_top (TREE_TYPE (range)), 0); return build_pointer_type (TREE_TYPE (TREE_TYPE (range))); } else { /* If it is not an array, we must do a bit of magic. */ tree id_begin, id_end; tree member_begin, member_end; *begin = *end = error_mark_node; id_begin = get_identifier ("begin"); id_end = get_identifier ("end"); member_begin = lookup_member (TREE_TYPE (range), id_begin, /*protect=*/2, /*want_type=*/false, tf_warning_or_error); member_end = lookup_member (TREE_TYPE (range), id_end, /*protect=*/2, /*want_type=*/false, tf_warning_or_error); if (member_begin != NULL_TREE || member_end != NULL_TREE) { /* Use the member functions. */ if (member_begin != NULL_TREE) *begin = cp_parser_range_for_member_function (range, id_begin); else error ("range-based %<for%> expression of type %qT has an " "%<end%> member but not a %<begin%>", TREE_TYPE (range)); if (member_end != NULL_TREE) *end = cp_parser_range_for_member_function (range, id_end); else error ("range-based %<for%> expression of type %qT has a " "%<begin%> member but not an %<end%>", TREE_TYPE (range)); } else { /* Use global functions with ADL. */ vec<tree, va_gc> *vec; vec = make_tree_vector (); vec_safe_push (vec, range); member_begin = perform_koenig_lookup (id_begin, vec, tf_warning_or_error); *begin = finish_call_expr (member_begin, &vec, false, true, tf_warning_or_error); member_end = perform_koenig_lookup (id_end, vec, tf_warning_or_error); *end = finish_call_expr (member_end, &vec, false, true, tf_warning_or_error); release_tree_vector (vec); } /* Last common checks. */ if (*begin == error_mark_node || *end == error_mark_node) { /* If one of the expressions is an error do no more checks. */ *begin = *end = error_mark_node; return error_mark_node; } else if (type_dependent_expression_p (*begin) || type_dependent_expression_p (*end)) /* Can happen, when, eg, in a template context, Koenig lookup can't resolve begin/end (c++/58503). */ return NULL_TREE; else { tree iter_type = cv_unqualified (TREE_TYPE (*begin)); /* The unqualified type of the __begin and __end temporaries should be the same, as required by the multiple auto declaration. */ if (!same_type_p (iter_type, cv_unqualified (TREE_TYPE (*end)))) error ("inconsistent begin/end types in range-based %<for%> " "statement: %qT and %qT", TREE_TYPE (*begin), TREE_TYPE (*end)); return iter_type; } } } /* Helper function for cp_parser_perform_range_for_lookup. Builds a tree for RANGE.IDENTIFIER(). */ static tree cp_parser_range_for_member_function (tree range, tree identifier) { tree member, res; vec<tree, va_gc> *vec; member = finish_class_member_access_expr (range, identifier, false, tf_warning_or_error); if (member == error_mark_node) return error_mark_node; vec = make_tree_vector (); res = finish_call_expr (member, &vec, /*disallow_virtual=*/false, /*koenig_p=*/false, tf_warning_or_error); release_tree_vector (vec); return res; } /* Parse an iteration-statement. iteration-statement: while ( condition ) statement do statement while ( expression ) ; for ( for-init-statement condition [opt] ; expression [opt] ) statement Returns the new WHILE_STMT, DO_STMT, FOR_STMT or RANGE_FOR_STMT. */ static tree cp_parser_iteration_statement (cp_parser* parser, bool ivdep) { cp_token *token; enum rid keyword; tree statement; unsigned char in_statement; /* Peek at the next token. */ token = cp_parser_require (parser, CPP_KEYWORD, RT_INTERATION); if (!token) return error_mark_node; /* Remember whether or not we are already within an iteration statement. */ in_statement = parser->in_statement; /* See what kind of keyword it is. */ keyword = token->keyword; switch (keyword) { case RID_WHILE: { tree condition; /* Begin the while-statement. */ statement = begin_while_stmt (); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Parse the condition. */ condition = cp_parser_condition (parser); finish_while_stmt_cond (condition, statement, ivdep); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Parse the dependent statement. */ parser->in_statement = IN_ITERATION_STMT; cp_parser_already_scoped_statement (parser); parser->in_statement = in_statement; /* We're done with the while-statement. */ finish_while_stmt (statement); } break; case RID_DO: { tree expression; /* Begin the do-statement. */ statement = begin_do_stmt (); /* Parse the body of the do-statement. */ parser->in_statement = IN_ITERATION_STMT; cp_parser_implicitly_scoped_statement (parser, NULL); parser->in_statement = in_statement; finish_do_body (statement); /* Look for the `while' keyword. */ cp_parser_require_keyword (parser, RID_WHILE, RT_WHILE); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Parse the expression. */ expression = cp_parser_expression (parser); /* We're done with the do-statement. */ finish_do_stmt (expression, statement, ivdep); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Look for the `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); } break; case RID_FOR: { /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); statement = cp_parser_for (parser, ivdep); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* Parse the body of the for-statement. */ parser->in_statement = IN_ITERATION_STMT; cp_parser_already_scoped_statement (parser); parser->in_statement = in_statement; /* We're done with the for-statement. */ finish_for_stmt (statement); } break; default: cp_parser_error (parser, "expected iteration-statement"); statement = error_mark_node; break; } return statement; } /* Parse a for-init-statement or the declarator of a range-based-for. Returns true if a range-based-for declaration is seen. for-init-statement: expression-statement simple-declaration */ static bool cp_parser_for_init_statement (cp_parser* parser, tree *decl) { /* If the next token is a `;', then we have an empty expression-statement. Grammatically, this is also a simple-declaration, but an invalid one, because it does not declare anything. Therefore, if we did not handle this case specially, we would issue an error message about an invalid declaration. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { bool is_range_for = false; bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME) && cp_lexer_nth_token_is (parser->lexer, 2, CPP_COLON)) { /* N3994 -- for (id : init) ... */ if (cxx_dialect < cxx1z) pedwarn (input_location, 0, "range-based for loop without a " "type-specifier only available with " "-std=c++1z or -std=gnu++1z"); tree name = cp_parser_identifier (parser); tree type = cp_build_reference_type (make_auto (), /*rval*/true); *decl = build_decl (input_location, VAR_DECL, name, type); pushdecl (*decl); cp_lexer_consume_token (parser->lexer); return true; } /* A colon is used in range-based for. */ parser->colon_corrects_to_scope_p = false; /* We're going to speculatively look for a declaration, falling back to an expression, if necessary. */ cp_parser_parse_tentatively (parser); /* Parse the declaration. */ cp_parser_simple_declaration (parser, /*function_definition_allowed_p=*/false, decl); parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { /* It is a range-for, consume the ':' */ cp_lexer_consume_token (parser->lexer); is_range_for = true; if (cxx_dialect < cxx11) { pedwarn (cp_lexer_peek_token (parser->lexer)->location, 0, "range-based %<for%> loops only available with " "-std=c++11 or -std=gnu++11"); *decl = error_mark_node; } } else /* The ';' is not consumed yet because we told cp_parser_simple_declaration not to. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); if (cp_parser_parse_definitely (parser)) return is_range_for; /* If the tentative parse failed, then we shall need to look for an expression-statement. */ } /* If we are here, it is an expression-statement. */ cp_parser_expression_statement (parser, NULL_TREE); return false; } /* Parse a jump-statement. jump-statement: break ; continue ; return expression [opt] ; return braced-init-list ; goto identifier ; GNU extension: jump-statement: goto * expression ; Returns the new BREAK_STMT, CONTINUE_STMT, RETURN_EXPR, or GOTO_EXPR. */ static tree cp_parser_jump_statement (cp_parser* parser) { tree statement = error_mark_node; cp_token *token; enum rid keyword; unsigned char in_statement; /* Peek at the next token. */ token = cp_parser_require (parser, CPP_KEYWORD, RT_JUMP); if (!token) return error_mark_node; /* See what kind of keyword it is. */ keyword = token->keyword; switch (keyword) { case RID_BREAK: in_statement = parser->in_statement & ~IN_IF_STMT; switch (in_statement) { case 0: error_at (token->location, "break statement not within loop or switch"); break; default: gcc_assert ((in_statement & IN_SWITCH_STMT) || in_statement == IN_ITERATION_STMT); statement = finish_break_stmt (); if (in_statement == IN_ITERATION_STMT) break_maybe_infinite_loop (); break; case IN_OMP_BLOCK: error_at (token->location, "invalid exit from OpenMP structured block"); break; case IN_OMP_FOR: error_at (token->location, "break statement used with OpenMP for loop"); break; case IN_CILK_SIMD_FOR: error_at (token->location, "break statement used with Cilk Plus for loop"); break; } cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); break; case RID_CONTINUE: switch (parser->in_statement & ~(IN_SWITCH_STMT | IN_IF_STMT)) { case 0: error_at (token->location, "continue statement not within a loop"); break; case IN_CILK_SIMD_FOR: error_at (token->location, "continue statement within %<#pragma simd%> loop body"); /* Fall through. */ case IN_ITERATION_STMT: case IN_OMP_FOR: statement = finish_continue_stmt (); break; case IN_OMP_BLOCK: error_at (token->location, "invalid exit from OpenMP structured block"); break; default: gcc_unreachable (); } cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); break; case RID_RETURN: { tree expr; bool expr_non_constant_p; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { cp_lexer_set_source_position (parser->lexer); maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); expr = cp_parser_braced_list (parser, &expr_non_constant_p); } else if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) expr = cp_parser_expression (parser); else /* If the next token is a `;', then there is no expression. */ expr = NULL_TREE; /* Build the return-statement. */ statement = finish_return_stmt (expr); /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); } break; case RID_GOTO: if (parser->in_function_body && DECL_DECLARED_CONSTEXPR_P (current_function_decl)) { error ("%<goto%> in %<constexpr%> function"); cp_function_chain->invalid_constexpr = true; } /* Create the goto-statement. */ if (cp_lexer_next_token_is (parser->lexer, CPP_MULT)) { /* Issue a warning about this use of a GNU extension. */ pedwarn (token->location, OPT_Wpedantic, "ISO C++ forbids computed gotos"); /* Consume the '*' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the dependent expression. */ finish_goto_stmt (cp_parser_expression (parser)); } else finish_goto_stmt (cp_parser_identifier (parser)); /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); break; default: cp_parser_error (parser, "expected jump-statement"); break; } return statement; } /* Parse a declaration-statement. declaration-statement: block-declaration */ static void cp_parser_declaration_statement (cp_parser* parser) { void *p; /* Get the high-water mark for the DECLARATOR_OBSTACK. */ p = obstack_alloc (&declarator_obstack, 0); /* Parse the block-declaration. */ cp_parser_block_declaration (parser, /*statement_p=*/true); /* Free any declarators allocated. */ obstack_free (&declarator_obstack, p); } /* Some dependent statements (like `if (cond) statement'), are implicitly in their own scope. In other words, if the statement is a single statement (as opposed to a compound-statement), it is none-the-less treated as if it were enclosed in braces. Any declarations appearing in the dependent statement are out of scope after control passes that point. This function parses a statement, but ensures that is in its own scope, even if it is not a compound-statement. If IF_P is not NULL, *IF_P is set to indicate whether the statement is a (possibly labeled) if statement which is not enclosed in braces and has an else clause. This is used to implement -Wparentheses. Returns the new statement. */ static tree cp_parser_implicitly_scoped_statement (cp_parser* parser, bool *if_p) { tree statement; if (if_p != NULL) *if_p = false; /* Mark if () ; with a special NOP_EXPR. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { location_t loc = cp_lexer_peek_token (parser->lexer)->location; cp_lexer_consume_token (parser->lexer); statement = add_stmt (build_empty_stmt (loc)); } /* if a compound is opened, we simply parse the statement directly. */ else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) statement = cp_parser_compound_statement (parser, NULL, false, false); /* If the token is not a `{', then we must take special action. */ else { /* Create a compound-statement. */ statement = begin_compound_stmt (0); /* Parse the dependent-statement. */ cp_parser_statement (parser, NULL_TREE, false, if_p); /* Finish the dummy compound-statement. */ finish_compound_stmt (statement); } /* Return the statement. */ return statement; } /* For some dependent statements (like `while (cond) statement'), we have already created a scope. Therefore, even if the dependent statement is a compound-statement, we do not want to create another scope. */ static void cp_parser_already_scoped_statement (cp_parser* parser) { /* If the token is a `{', then we must take special action. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)) cp_parser_statement (parser, NULL_TREE, false, NULL); else { /* Avoid calling cp_parser_compound_statement, so that we don't create a new scope. Do everything else by hand. */ cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE); /* If the next keyword is `__label__' we have a label declaration. */ while (cp_lexer_next_token_is_keyword (parser->lexer, RID_LABEL)) cp_parser_label_declaration (parser); /* Parse an (optional) statement-seq. */ cp_parser_statement_seq_opt (parser, NULL_TREE); cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); } } /* Declarations [gram.dcl.dcl] */ /* Parse an optional declaration-sequence. declaration-seq: declaration declaration-seq declaration */ static void cp_parser_declaration_seq_opt (cp_parser* parser) { while (true) { cp_token *token; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_CLOSE_BRACE || token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL) break; if (token->type == CPP_SEMICOLON) { /* A declaration consisting of a single semicolon is invalid. Allow it unless we're being pedantic. */ cp_lexer_consume_token (parser->lexer); if (!in_system_header_at (input_location)) pedwarn (input_location, OPT_Wpedantic, "extra %<;%>"); continue; } /* If we're entering or exiting a region that's implicitly extern "C", modify the lang context appropriately. */ if (!parser->implicit_extern_c && token->implicit_extern_c) { push_lang_context (lang_name_c); parser->implicit_extern_c = true; } else if (parser->implicit_extern_c && !token->implicit_extern_c) { pop_lang_context (); parser->implicit_extern_c = false; } if (token->type == CPP_PRAGMA) { /* A top-level declaration can consist solely of a #pragma. A nested declaration cannot, so this is done here and not in cp_parser_declaration. (A #pragma at block scope is handled in cp_parser_statement.) */ cp_parser_pragma (parser, pragma_external); continue; } /* Parse the declaration itself. */ cp_parser_declaration (parser); } } /* Parse a declaration. declaration: block-declaration function-definition template-declaration explicit-instantiation explicit-specialization linkage-specification namespace-definition GNU extension: declaration: __extension__ declaration */ static void cp_parser_declaration (cp_parser* parser) { cp_token token1; cp_token token2; int saved_pedantic; void *p; tree attributes = NULL_TREE; /* Check for the `__extension__' keyword. */ if (cp_parser_extension_opt (parser, &saved_pedantic)) { /* Parse the qualified declaration. */ cp_parser_declaration (parser); /* Restore the PEDANTIC flag. */ pedantic = saved_pedantic; return; } /* Try to figure out what kind of declaration is present. */ token1 = *cp_lexer_peek_token (parser->lexer); if (token1.type != CPP_EOF) token2 = *cp_lexer_peek_nth_token (parser->lexer, 2); else { token2.type = CPP_EOF; token2.keyword = RID_MAX; } /* Get the high-water mark for the DECLARATOR_OBSTACK. */ p = obstack_alloc (&declarator_obstack, 0); /* If the next token is `extern' and the following token is a string literal, then we have a linkage specification. */ if (token1.keyword == RID_EXTERN && cp_parser_is_pure_string_literal (&token2)) cp_parser_linkage_specification (parser); /* If the next token is `template', then we have either a template declaration, an explicit instantiation, or an explicit specialization. */ else if (token1.keyword == RID_TEMPLATE) { /* `template <>' indicates a template specialization. */ if (token2.type == CPP_LESS && cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_GREATER) cp_parser_explicit_specialization (parser); /* `template <' indicates a template declaration. */ else if (token2.type == CPP_LESS) cp_parser_template_declaration (parser, /*member_p=*/false); /* Anything else must be an explicit instantiation. */ else cp_parser_explicit_instantiation (parser); } /* If the next token is `export', then we have a template declaration. */ else if (token1.keyword == RID_EXPORT) cp_parser_template_declaration (parser, /*member_p=*/false); /* If the next token is `extern', 'static' or 'inline' and the one after that is `template', we have a GNU extended explicit instantiation directive. */ else if (cp_parser_allow_gnu_extensions_p (parser) && (token1.keyword == RID_EXTERN || token1.keyword == RID_STATIC || token1.keyword == RID_INLINE) && token2.keyword == RID_TEMPLATE) cp_parser_explicit_instantiation (parser); /* If the next token is `namespace', check for a named or unnamed namespace definition. */ else if (token1.keyword == RID_NAMESPACE && (/* A named namespace definition. */ (token2.type == CPP_NAME && (cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_EQ)) /* An unnamed namespace definition. */ || token2.type == CPP_OPEN_BRACE || token2.keyword == RID_ATTRIBUTE)) cp_parser_namespace_definition (parser); /* An inline (associated) namespace definition. */ else if (token1.keyword == RID_INLINE && token2.keyword == RID_NAMESPACE) cp_parser_namespace_definition (parser); /* Objective-C++ declaration/definition. */ else if (c_dialect_objc () && OBJC_IS_AT_KEYWORD (token1.keyword)) cp_parser_objc_declaration (parser, NULL_TREE); else if (c_dialect_objc () && token1.keyword == RID_ATTRIBUTE && cp_parser_objc_valid_prefix_attributes (parser, &attributes)) cp_parser_objc_declaration (parser, attributes); /* We must have either a block declaration or a function definition. */ else /* Try to parse a block-declaration, or a function-definition. */ cp_parser_block_declaration (parser, /*statement_p=*/false); /* Free any declarators allocated. */ obstack_free (&declarator_obstack, p); } /* Parse a block-declaration. block-declaration: simple-declaration asm-definition namespace-alias-definition using-declaration using-directive GNU Extension: block-declaration: __extension__ block-declaration C++0x Extension: block-declaration: static_assert-declaration If STATEMENT_P is TRUE, then this block-declaration is occurring as part of a declaration-statement. */ static void cp_parser_block_declaration (cp_parser *parser, bool statement_p) { cp_token *token1; int saved_pedantic; /* Check for the `__extension__' keyword. */ if (cp_parser_extension_opt (parser, &saved_pedantic)) { /* Parse the qualified declaration. */ cp_parser_block_declaration (parser, statement_p); /* Restore the PEDANTIC flag. */ pedantic = saved_pedantic; return; } /* Peek at the next token to figure out which kind of declaration is present. */ token1 = cp_lexer_peek_token (parser->lexer); /* If the next keyword is `asm', we have an asm-definition. */ if (token1->keyword == RID_ASM) { if (statement_p) cp_parser_commit_to_tentative_parse (parser); cp_parser_asm_definition (parser); } /* If the next keyword is `namespace', we have a namespace-alias-definition. */ else if (token1->keyword == RID_NAMESPACE) cp_parser_namespace_alias_definition (parser); /* If the next keyword is `using', we have a using-declaration, a using-directive, or an alias-declaration. */ else if (token1->keyword == RID_USING) { cp_token *token2; if (statement_p) cp_parser_commit_to_tentative_parse (parser); /* If the token after `using' is `namespace', then we have a using-directive. */ token2 = cp_lexer_peek_nth_token (parser->lexer, 2); if (token2->keyword == RID_NAMESPACE) cp_parser_using_directive (parser); /* If the second token after 'using' is '=', then we have an alias-declaration. */ else if (cxx_dialect >= cxx11 && token2->type == CPP_NAME && ((cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_EQ) || (cp_nth_tokens_can_be_attribute_p (parser, 3)))) cp_parser_alias_declaration (parser); /* Otherwise, it's a using-declaration. */ else cp_parser_using_declaration (parser, /*access_declaration_p=*/false); } /* If the next keyword is `__label__' we have a misplaced label declaration. */ else if (token1->keyword == RID_LABEL) { cp_lexer_consume_token (parser->lexer); error_at (token1->location, "%<__label__%> not at the beginning of a block"); cp_parser_skip_to_end_of_statement (parser); /* If the next token is now a `;', consume it. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); } /* If the next token is `static_assert' we have a static assertion. */ else if (token1->keyword == RID_STATIC_ASSERT) cp_parser_static_assert (parser, /*member_p=*/false); /* Anything else must be a simple-declaration. */ else cp_parser_simple_declaration (parser, !statement_p, /*maybe_range_for_decl*/NULL); } /* Parse a simple-declaration. simple-declaration: decl-specifier-seq [opt] init-declarator-list [opt] ; init-declarator-list: init-declarator init-declarator-list , init-declarator If FUNCTION_DEFINITION_ALLOWED_P is TRUE, then we also recognize a function-definition as a simple-declaration. If MAYBE_RANGE_FOR_DECL is not NULL, the pointed tree will be set to the parsed declaration if it is an uninitialized single declarator not followed by a `;', or to error_mark_node otherwise. Either way, the trailing `;', if present, will not be consumed. */ static void cp_parser_simple_declaration (cp_parser* parser, bool function_definition_allowed_p, tree *maybe_range_for_decl) { cp_decl_specifier_seq decl_specifiers; int declares_class_or_enum; bool saw_declarator; location_t comma_loc = UNKNOWN_LOCATION; location_t init_loc = UNKNOWN_LOCATION; if (maybe_range_for_decl) *maybe_range_for_decl = NULL_TREE; /* Defer access checks until we know what is being declared; the checks for names appearing in the decl-specifier-seq should be done as if we were in the scope of the thing being declared. */ push_deferring_access_checks (dk_deferred); /* Parse the decl-specifier-seq. We have to keep track of whether or not the decl-specifier-seq declares a named class or enumeration type, since that is the only case in which the init-declarator-list is allowed to be empty. [dcl.dcl] In a simple-declaration, the optional init-declarator-list can be omitted only when declaring a class or enumeration, that is when the decl-specifier-seq contains either a class-specifier, an elaborated-type-specifier, or an enum-specifier. */ cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &decl_specifiers, &declares_class_or_enum); /* We no longer need to defer access checks. */ stop_deferring_access_checks (); /* In a block scope, a valid declaration must always have a decl-specifier-seq. By not trying to parse declarators, we can resolve the declaration/expression ambiguity more quickly. */ if (!function_definition_allowed_p && !decl_specifiers.any_specifiers_p) { cp_parser_error (parser, "expected declaration"); goto done; } /* If the next two tokens are both identifiers, the code is erroneous. The usual cause of this situation is code like: T t; where "T" should name a type -- but does not. */ if (!decl_specifiers.any_type_specifiers_p && cp_parser_parse_and_diagnose_invalid_type_name (parser)) { /* If parsing tentatively, we should commit; we really are looking at a declaration. */ cp_parser_commit_to_tentative_parse (parser); /* Give up. */ goto done; } /* If we have seen at least one decl-specifier, and the next token is not a parenthesis, then we must be looking at a declaration. (After "int (" we might be looking at a functional cast.) */ if (decl_specifiers.any_specifiers_p && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN) && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE) && !cp_parser_error_occurred (parser)) cp_parser_commit_to_tentative_parse (parser); /* Keep going until we hit the `;' at the end of the simple declaration. */ saw_declarator = false; while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { cp_token *token; bool function_definition_p; tree decl; if (saw_declarator) { /* If we are processing next declarator, comma is expected */ token = cp_lexer_peek_token (parser->lexer); gcc_assert (token->type == CPP_COMMA); cp_lexer_consume_token (parser->lexer); if (maybe_range_for_decl) { *maybe_range_for_decl = error_mark_node; if (comma_loc == UNKNOWN_LOCATION) comma_loc = token->location; } } else saw_declarator = true; /* Parse the init-declarator. */ decl = cp_parser_init_declarator (parser, &decl_specifiers, /*checks=*/NULL, function_definition_allowed_p, /*member_p=*/false, declares_class_or_enum, &function_definition_p, maybe_range_for_decl, &init_loc); /* If an error occurred while parsing tentatively, exit quickly. (That usually happens when in the body of a function; each statement is treated as a declaration-statement until proven otherwise.) */ if (cp_parser_error_occurred (parser)) goto done; /* Handle function definitions specially. */ if (function_definition_p) { /* If the next token is a `,', then we are probably processing something like: void f() {}, *p; which is erroneous. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) { cp_token *token = cp_lexer_peek_token (parser->lexer); error_at (token->location, "mixing" " declarations and function-definitions is forbidden"); } /* Otherwise, we're done with the list of declarators. */ else { pop_deferring_access_checks (); return; } } if (maybe_range_for_decl && *maybe_range_for_decl == NULL_TREE) *maybe_range_for_decl = decl; /* The next token should be either a `,' or a `;'. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `,', there are more declarators to come. */ if (token->type == CPP_COMMA) /* will be consumed next time around */; /* If it's a `;', we are done. */ else if (token->type == CPP_SEMICOLON || maybe_range_for_decl) break; /* Anything else is an error. */ else { /* If we have already issued an error message we don't need to issue another one. */ if (decl != error_mark_node || cp_parser_uncommitted_to_tentative_parse_p (parser)) cp_parser_error (parser, "expected %<,%> or %<;%>"); /* Skip tokens until we reach the end of the statement. */ cp_parser_skip_to_end_of_statement (parser); /* If the next token is now a `;', consume it. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); goto done; } /* After the first time around, a function-definition is not allowed -- even if it was OK at first. For example: int i, f() {} is not valid. */ function_definition_allowed_p = false; } /* Issue an error message if no declarators are present, and the decl-specifier-seq does not itself declare a class or enumeration: [dcl.dcl]/3. */ if (!saw_declarator) { if (cp_parser_declares_only_class_p (parser)) { if (!declares_class_or_enum && decl_specifiers.type && OVERLOAD_TYPE_P (decl_specifiers.type)) /* Ensure an error is issued anyway when finish_decltype_type, called via cp_parser_decl_specifier_seq, returns a class or an enumeration (c++/51786). */ decl_specifiers.type = NULL_TREE; shadow_tag (&decl_specifiers); } /* Perform any deferred access checks. */ perform_deferred_access_checks (tf_warning_or_error); } /* Consume the `;'. */ if (!maybe_range_for_decl) cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); else if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { if (init_loc != UNKNOWN_LOCATION) error_at (init_loc, "initializer in range-based %<for%> loop"); if (comma_loc != UNKNOWN_LOCATION) error_at (comma_loc, "multiple declarations in range-based %<for%> loop"); } done: pop_deferring_access_checks (); } /* Parse a decl-specifier-seq. decl-specifier-seq: decl-specifier-seq [opt] decl-specifier decl-specifier attribute-specifier-seq [opt] (C++11) decl-specifier: storage-class-specifier type-specifier function-specifier friend typedef GNU Extension: decl-specifier: attributes Set *DECL_SPECS to a representation of the decl-specifier-seq. The parser flags FLAGS is used to control type-specifier parsing. *DECLARES_CLASS_OR_ENUM is set to the bitwise or of the following flags: 1: one of the decl-specifiers is an elaborated-type-specifier (i.e., a type declaration) 2: one of the decl-specifiers is an enum-specifier or a class-specifier (i.e., a type definition) */ static void cp_parser_decl_specifier_seq (cp_parser* parser, cp_parser_flags flags, cp_decl_specifier_seq *decl_specs, int* declares_class_or_enum) { bool constructor_possible_p = !parser->in_declarator_p; bool found_decl_spec = false; cp_token *start_token = NULL; cp_decl_spec ds; /* Clear DECL_SPECS. */ clear_decl_specs (decl_specs); /* Assume no class or enumeration type is declared. */ *declares_class_or_enum = 0; /* Keep reading specifiers until there are no more to read. */ while (true) { bool constructor_p; cp_token *token; ds = ds_last; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Save the first token of the decl spec list for error reporting. */ if (!start_token) start_token = token; /* Handle attributes. */ if (cp_next_tokens_can_be_attribute_p (parser)) { /* Parse the attributes. */ tree attrs = cp_parser_attributes_opt (parser); /* In a sequence of declaration specifiers, c++11 attributes appertain to the type that precede them. In that case [dcl.spec]/1 says: The attribute-specifier-seq affects the type only for the declaration it appears in, not other declarations involving the same type. But for now let's force the user to position the attribute either at the beginning of the declaration or after the declarator-id, which would clearly mean that it applies to the declarator. */ if (cxx11_attribute_p (attrs)) { if (!found_decl_spec) /* The c++11 attribute is at the beginning of the declaration. It appertains to the entity being declared. */; else { if (decl_specs->type && CLASS_TYPE_P (decl_specs->type)) { /* This is an attribute following a class-specifier. */ if (decl_specs->type_definition_p) warn_misplaced_attr_for_class_type (token->location, decl_specs->type); attrs = NULL_TREE; } else { decl_specs->std_attributes = chainon (decl_specs->std_attributes, attrs); if (decl_specs->locations[ds_std_attribute] == 0) decl_specs->locations[ds_std_attribute] = token->location; } continue; } } decl_specs->attributes = chainon (decl_specs->attributes, attrs); if (decl_specs->locations[ds_attribute] == 0) decl_specs->locations[ds_attribute] = token->location; continue; } /* Assume we will find a decl-specifier keyword. */ found_decl_spec = true; /* If the next token is an appropriate keyword, we can simply add it to the list. */ switch (token->keyword) { /* decl-specifier: friend constexpr */ case RID_FRIEND: if (!at_class_scope_p ()) { error_at (token->location, "%<friend%> used outside of class"); cp_lexer_purge_token (parser->lexer); } else { ds = ds_friend; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } break; case RID_CONSTEXPR: ds = ds_constexpr; cp_lexer_consume_token (parser->lexer); break; /* function-specifier: inline virtual explicit */ case RID_INLINE: case RID_VIRTUAL: case RID_EXPLICIT: cp_parser_function_specifier_opt (parser, decl_specs); break; /* decl-specifier: typedef */ case RID_TYPEDEF: ds = ds_typedef; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); /* A constructor declarator cannot appear in a typedef. */ constructor_possible_p = false; /* The "typedef" keyword can only occur in a declaration; we may as well commit at this point. */ cp_parser_commit_to_tentative_parse (parser); if (decl_specs->storage_class != sc_none) decl_specs->conflicting_specifiers_p = true; break; /* storage-class-specifier: auto register static extern mutable GNU Extension: thread */ case RID_AUTO: if (cxx_dialect == cxx98) { /* Consume the token. */ cp_lexer_consume_token (parser->lexer); /* Complain about `auto' as a storage specifier, if we're complaining about C++0x compatibility. */ warning_at (token->location, OPT_Wc__0x_compat, "%<auto%>" " changes meaning in C++11; please remove it"); /* Set the storage class anyway. */ cp_parser_set_storage_class (parser, decl_specs, RID_AUTO, token); } else /* C++0x auto type-specifier. */ found_decl_spec = false; break; case RID_REGISTER: case RID_STATIC: case RID_EXTERN: case RID_MUTABLE: /* Consume the token. */ cp_lexer_consume_token (parser->lexer); cp_parser_set_storage_class (parser, decl_specs, token->keyword, token); break; case RID_THREAD: /* Consume the token. */ ds = ds_thread; cp_lexer_consume_token (parser->lexer); break; default: /* We did not yet find a decl-specifier yet. */ found_decl_spec = false; break; } if (found_decl_spec && (flags & CP_PARSER_FLAGS_ONLY_TYPE_OR_CONSTEXPR) && token->keyword != RID_CONSTEXPR) error ("decl-specifier invalid in condition"); if (ds != ds_last) set_and_check_decl_spec_loc (decl_specs, ds, token); /* Constructors are a special case. The `S' in `S()' is not a decl-specifier; it is the beginning of the declarator. */ constructor_p = (!found_decl_spec && constructor_possible_p && (cp_parser_constructor_declarator_p (parser, decl_spec_seq_has_spec_p (decl_specs, ds_friend)))); /* If we don't have a DECL_SPEC yet, then we must be looking at a type-specifier. */ if (!found_decl_spec && !constructor_p) { int decl_spec_declares_class_or_enum; bool is_cv_qualifier; tree type_spec; type_spec = cp_parser_type_specifier (parser, flags, decl_specs, /*is_declaration=*/true, &decl_spec_declares_class_or_enum, &is_cv_qualifier); *declares_class_or_enum |= decl_spec_declares_class_or_enum; /* If this type-specifier referenced a user-defined type (a typedef, class-name, etc.), then we can't allow any more such type-specifiers henceforth. [dcl.spec] The longest sequence of decl-specifiers that could possibly be a type name is taken as the decl-specifier-seq of a declaration. The sequence shall be self-consistent as described below. [dcl.type] As a general rule, at most one type-specifier is allowed in the complete decl-specifier-seq of a declaration. The only exceptions are the following: -- const or volatile can be combined with any other type-specifier. -- signed or unsigned can be combined with char, long, short, or int. -- .. Example: typedef char* Pc; void g (const int Pc); Here, Pc is *not* part of the decl-specifier seq; it's the declarator. Therefore, once we see a type-specifier (other than a cv-qualifier), we forbid any additional user-defined types. We *do* still allow things like `int int' to be considered a decl-specifier-seq, and issue the error message later. */ if (type_spec && !is_cv_qualifier) flags |= CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES; /* A constructor declarator cannot follow a type-specifier. */ if (type_spec) { constructor_possible_p = false; found_decl_spec = true; if (!is_cv_qualifier) decl_specs->any_type_specifiers_p = true; } } /* If we still do not have a DECL_SPEC, then there are no more decl-specifiers. */ if (!found_decl_spec) break; decl_specs->any_specifiers_p = true; /* After we see one decl-specifier, further decl-specifiers are always optional. */ flags |= CP_PARSER_FLAGS_OPTIONAL; } /* Don't allow a friend specifier with a class definition. */ if (decl_spec_seq_has_spec_p (decl_specs, ds_friend) && (*declares_class_or_enum & 2)) error_at (decl_specs->locations[ds_friend], "class definition may not be declared a friend"); } /* Parse an (optional) storage-class-specifier. storage-class-specifier: auto register static extern mutable GNU Extension: storage-class-specifier: thread Returns an IDENTIFIER_NODE corresponding to the keyword used. */ static tree cp_parser_storage_class_specifier_opt (cp_parser* parser) { switch (cp_lexer_peek_token (parser->lexer)->keyword) { case RID_AUTO: if (cxx_dialect != cxx98) return NULL_TREE; /* Fall through for C++98. */ case RID_REGISTER: case RID_STATIC: case RID_EXTERN: case RID_MUTABLE: case RID_THREAD: /* Consume the token. */ return cp_lexer_consume_token (parser->lexer)->u.value; default: return NULL_TREE; } } /* Parse an (optional) function-specifier. function-specifier: inline virtual explicit Returns an IDENTIFIER_NODE corresponding to the keyword used. Updates DECL_SPECS, if it is non-NULL. */ static tree cp_parser_function_specifier_opt (cp_parser* parser, cp_decl_specifier_seq *decl_specs) { cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->keyword) { case RID_INLINE: set_and_check_decl_spec_loc (decl_specs, ds_inline, token); break; case RID_VIRTUAL: /* 14.5.2.3 [temp.mem] A member function template shall not be virtual. */ if (PROCESSING_REAL_TEMPLATE_DECL_P ()) error_at (token->location, "templates may not be %<virtual%>"); else set_and_check_decl_spec_loc (decl_specs, ds_virtual, token); break; case RID_EXPLICIT: set_and_check_decl_spec_loc (decl_specs, ds_explicit, token); break; default: return NULL_TREE; } /* Consume the token. */ return cp_lexer_consume_token (parser->lexer)->u.value; } /* Parse a linkage-specification. linkage-specification: extern string-literal { declaration-seq [opt] } extern string-literal declaration */ static void cp_parser_linkage_specification (cp_parser* parser) { tree linkage; /* Look for the `extern' keyword. */ cp_parser_require_keyword (parser, RID_EXTERN, RT_EXTERN); /* Look for the string-literal. */ linkage = cp_parser_string_literal (parser, false, false); /* Transform the literal into an identifier. If the literal is a wide-character string, or contains embedded NULs, then we can't handle it as the user wants. */ if (strlen (TREE_STRING_POINTER (linkage)) != (size_t) (TREE_STRING_LENGTH (linkage) - 1)) { cp_parser_error (parser, "invalid linkage-specification"); /* Assume C++ linkage. */ linkage = lang_name_cplusplus; } else linkage = get_identifier (TREE_STRING_POINTER (linkage)); /* We're now using the new linkage. */ push_lang_context (linkage); /* If the next token is a `{', then we're using the first production. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { cp_ensure_no_omp_declare_simd (parser); /* Consume the `{' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the declarations. */ cp_parser_declaration_seq_opt (parser); /* Look for the closing `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); } /* Otherwise, there's just one declaration. */ else { bool saved_in_unbraced_linkage_specification_p; saved_in_unbraced_linkage_specification_p = parser->in_unbraced_linkage_specification_p; parser->in_unbraced_linkage_specification_p = true; cp_parser_declaration (parser); parser->in_unbraced_linkage_specification_p = saved_in_unbraced_linkage_specification_p; } /* We're done with the linkage-specification. */ pop_lang_context (); } /* Parse a static_assert-declaration. static_assert-declaration: static_assert ( constant-expression , string-literal ) ; If MEMBER_P, this static_assert is a class member. */ static void cp_parser_static_assert(cp_parser *parser, bool member_p) { tree condition; tree message; cp_token *token; location_t saved_loc; bool dummy; /* Peek at the `static_assert' token so we can keep track of exactly where the static assertion started. */ token = cp_lexer_peek_token (parser->lexer); saved_loc = token->location; /* Look for the `static_assert' keyword. */ if (!cp_parser_require_keyword (parser, RID_STATIC_ASSERT, RT_STATIC_ASSERT)) return; /* We know we are in a static assertion; commit to any tentative parse. */ if (cp_parser_parsing_tentatively (parser)) cp_parser_commit_to_tentative_parse (parser); /* Parse the `(' starting the static assertion condition. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Parse the constant-expression. Allow a non-constant expression here in order to give better diagnostics in finish_static_assert. */ condition = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/true, /*non_constant_p=*/&dummy); /* Parse the separating `,'. */ cp_parser_require (parser, CPP_COMMA, RT_COMMA); /* Parse the string-literal message. */ message = cp_parser_string_literal (parser, /*translate=*/false, /*wide_ok=*/true); /* A `)' completes the static assertion. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); /* A semicolon terminates the declaration. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); /* Complete the static assertion, which may mean either processing the static assert now or saving it for template instantiation. */ finish_static_assert (condition, message, saved_loc, member_p); } /* Parse the expression in decltype ( expression ). */ static tree cp_parser_decltype_expr (cp_parser *parser, bool &id_expression_or_member_access_p) { cp_token *id_expr_start_token; tree expr; /* First, try parsing an id-expression. */ id_expr_start_token = cp_lexer_peek_token (parser->lexer); cp_parser_parse_tentatively (parser); expr = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*template_p=*/NULL, /*declarator_p=*/false, /*optional_p=*/false); if (!cp_parser_error_occurred (parser) && expr != error_mark_node) { bool non_integral_constant_expression_p = false; tree id_expression = expr; cp_id_kind idk; const char *error_msg; if (identifier_p (expr)) /* Lookup the name we got back from the id-expression. */ expr = cp_parser_lookup_name_simple (parser, expr, id_expr_start_token->location); if (expr && expr != error_mark_node && TREE_CODE (expr) != TYPE_DECL && (TREE_CODE (expr) != BIT_NOT_EXPR || !TYPE_P (TREE_OPERAND (expr, 0))) && cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN) { /* Complete lookup of the id-expression. */ expr = (finish_id_expression (id_expression, expr, parser->scope, &idk, /*integral_constant_expression_p=*/false, /*allow_non_integral_constant_expression_p=*/true, &non_integral_constant_expression_p, /*template_p=*/false, /*done=*/true, /*address_p=*/false, /*template_arg_p=*/false, &error_msg, id_expr_start_token->location)); if (expr == error_mark_node) /* We found an id-expression, but it was something that we should not have found. This is an error, not something we can recover from, so note that we found an id-expression and we'll recover as gracefully as possible. */ id_expression_or_member_access_p = true; } if (expr && expr != error_mark_node && cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN) /* We have an id-expression. */ id_expression_or_member_access_p = true; } if (!id_expression_or_member_access_p) { /* Abort the id-expression parse. */ cp_parser_abort_tentative_parse (parser); /* Parsing tentatively, again. */ cp_parser_parse_tentatively (parser); /* Parse a class member access. */ expr = cp_parser_postfix_expression (parser, /*address_p=*/false, /*cast_p=*/false, /*decltype*/true, /*member_access_only_p=*/true, NULL); if (expr && expr != error_mark_node && cp_lexer_peek_token (parser->lexer)->type == CPP_CLOSE_PAREN) /* We have an id-expression. */ id_expression_or_member_access_p = true; } if (id_expression_or_member_access_p) /* We have parsed the complete id-expression or member access. */ cp_parser_parse_definitely (parser); else { /* Abort our attempt to parse an id-expression or member access expression. */ cp_parser_abort_tentative_parse (parser); /* Parse a full expression. */ expr = cp_parser_expression (parser, /*pidk=*/NULL, /*cast_p=*/false, /*decltype_p=*/true); } return expr; } /* Parse a `decltype' type. Returns the type. simple-type-specifier: decltype ( expression ) C++14 proposal: decltype ( auto ) */ static tree cp_parser_decltype (cp_parser *parser) { tree expr; bool id_expression_or_member_access_p = false; const char *saved_message; bool saved_integral_constant_expression_p; bool saved_non_integral_constant_expression_p; bool saved_greater_than_is_operator_p; cp_token *start_token = cp_lexer_peek_token (parser->lexer); if (start_token->type == CPP_DECLTYPE) { /* Already parsed. */ cp_lexer_consume_token (parser->lexer); return start_token->u.value; } /* Look for the `decltype' token. */ if (!cp_parser_require_keyword (parser, RID_DECLTYPE, RT_DECLTYPE)) return error_mark_node; /* Parse the opening `('. */ if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return error_mark_node; /* decltype (auto) */ if (cxx_dialect >= cxx14 && cp_lexer_next_token_is_keyword (parser->lexer, RID_AUTO)) { cp_lexer_consume_token (parser->lexer); if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) return error_mark_node; expr = make_decltype_auto (); AUTO_IS_DECLTYPE (expr) = true; goto rewrite; } /* Types cannot be defined in a `decltype' expression. Save away the old message. */ saved_message = parser->type_definition_forbidden_message; /* And create the new one. */ parser->type_definition_forbidden_message = G_("types may not be defined in %<decltype%> expressions"); /* The restrictions on constant-expressions do not apply inside decltype expressions. */ saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; parser->integral_constant_expression_p = false; /* Within a parenthesized expression, a `>' token is always the greater-than operator. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = true; /* Do not actually evaluate the expression. */ ++cp_unevaluated_operand; /* Do not warn about problems with the expression. */ ++c_inhibit_evaluation_warnings; expr = cp_parser_decltype_expr (parser, id_expression_or_member_access_p); /* Go back to evaluating expressions. */ --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; /* The `>' token might be the end of a template-id or template-parameter-list now. */ parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; /* Restore the old message and the integral constant expression flags. */ parser->type_definition_forbidden_message = saved_message; parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; /* Parse to the closing `)'. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) { cp_parser_skip_to_closing_parenthesis (parser, true, false, /*consume_paren=*/true); return error_mark_node; } expr = finish_decltype_type (expr, id_expression_or_member_access_p, tf_warning_or_error); rewrite: /* Replace the decltype with a CPP_DECLTYPE so we don't need to parse it again. */ start_token->type = CPP_DECLTYPE; start_token->u.value = expr; start_token->keyword = RID_MAX; cp_lexer_purge_tokens_after (parser->lexer, start_token); return expr; } /* Special member functions [gram.special] */ /* Parse a conversion-function-id. conversion-function-id: operator conversion-type-id Returns an IDENTIFIER_NODE representing the operator. */ static tree cp_parser_conversion_function_id (cp_parser* parser) { tree type; tree saved_scope; tree saved_qualifying_scope; tree saved_object_scope; tree pushed_scope = NULL_TREE; /* Look for the `operator' token. */ if (!cp_parser_require_keyword (parser, RID_OPERATOR, RT_OPERATOR)) return error_mark_node; /* When we parse the conversion-type-id, the current scope will be reset. However, we need that information in able to look up the conversion function later, so we save it here. */ saved_scope = parser->scope; saved_qualifying_scope = parser->qualifying_scope; saved_object_scope = parser->object_scope; /* We must enter the scope of the class so that the names of entities declared within the class are available in the conversion-type-id. For example, consider: struct S { typedef int I; operator I(); }; S::operator I() { ... } In order to see that `I' is a type-name in the definition, we must be in the scope of `S'. */ if (saved_scope) pushed_scope = push_scope (saved_scope); /* Parse the conversion-type-id. */ type = cp_parser_conversion_type_id (parser); /* Leave the scope of the class, if any. */ if (pushed_scope) pop_scope (pushed_scope); /* Restore the saved scope. */ parser->scope = saved_scope; parser->qualifying_scope = saved_qualifying_scope; parser->object_scope = saved_object_scope; /* If the TYPE is invalid, indicate failure. */ if (type == error_mark_node) return error_mark_node; return mangle_conv_op_name_for_type (type); } /* Parse a conversion-type-id: conversion-type-id: type-specifier-seq conversion-declarator [opt] Returns the TYPE specified. */ static tree cp_parser_conversion_type_id (cp_parser* parser) { tree attributes; cp_decl_specifier_seq type_specifiers; cp_declarator *declarator; tree type_specified; const char *saved_message; /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in a conversion-type-id"); /* Parse the type-specifiers. */ cp_parser_type_specifier_seq (parser, /*is_declaration=*/false, /*is_trailing_return=*/false, &type_specifiers); parser->type_definition_forbidden_message = saved_message; /* If that didn't work, stop. */ if (type_specifiers.type == error_mark_node) return error_mark_node; /* Parse the conversion-declarator. */ declarator = cp_parser_conversion_declarator_opt (parser); type_specified = grokdeclarator (declarator, &type_specifiers, TYPENAME, /*initialized=*/0, &attributes); if (attributes) cplus_decl_attributes (&type_specified, attributes, /*flags=*/0); /* Don't give this error when parsing tentatively. This happens to work because we always parse this definitively once. */ if (! cp_parser_uncommitted_to_tentative_parse_p (parser) && type_uses_auto (type_specified)) { if (cxx_dialect < cxx14) { error ("invalid use of %<auto%> in conversion operator"); return error_mark_node; } else if (template_parm_scope_p ()) warning (0, "use of %<auto%> in member template " "conversion operator can never be deduced"); } return type_specified; } /* Parse an (optional) conversion-declarator. conversion-declarator: ptr-operator conversion-declarator [opt] */ static cp_declarator * cp_parser_conversion_declarator_opt (cp_parser* parser) { enum tree_code code; tree class_type, std_attributes = NULL_TREE; cp_cv_quals cv_quals; /* We don't know if there's a ptr-operator next, or not. */ cp_parser_parse_tentatively (parser); /* Try the ptr-operator. */ code = cp_parser_ptr_operator (parser, &class_type, &cv_quals, &std_attributes); /* If it worked, look for more conversion-declarators. */ if (cp_parser_parse_definitely (parser)) { cp_declarator *declarator; /* Parse another optional declarator. */ declarator = cp_parser_conversion_declarator_opt (parser); declarator = cp_parser_make_indirect_declarator (code, class_type, cv_quals, declarator, std_attributes); return declarator; } return NULL; } /* Parse an (optional) ctor-initializer. ctor-initializer: : mem-initializer-list Returns TRUE iff the ctor-initializer was actually present. */ static bool cp_parser_ctor_initializer_opt (cp_parser* parser) { /* If the next token is not a `:', then there is no ctor-initializer. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)) { /* Do default initialization of any bases and members. */ if (DECL_CONSTRUCTOR_P (current_function_decl)) finish_mem_initializers (NULL_TREE); return false; } /* Consume the `:' token. */ cp_lexer_consume_token (parser->lexer); /* And the mem-initializer-list. */ cp_parser_mem_initializer_list (parser); return true; } /* Parse a mem-initializer-list. mem-initializer-list: mem-initializer ... [opt] mem-initializer ... [opt] , mem-initializer-list */ static void cp_parser_mem_initializer_list (cp_parser* parser) { tree mem_initializer_list = NULL_TREE; tree target_ctor = error_mark_node; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Let the semantic analysis code know that we are starting the mem-initializer-list. */ if (!DECL_CONSTRUCTOR_P (current_function_decl)) error_at (token->location, "only constructors take member initializers"); /* Loop through the list. */ while (true) { tree mem_initializer; token = cp_lexer_peek_token (parser->lexer); /* Parse the mem-initializer. */ mem_initializer = cp_parser_mem_initializer (parser); /* If the next token is a `...', we're expanding member initializers. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); /* The TREE_PURPOSE must be a _TYPE, because base-specifiers can be expanded but members cannot. */ if (mem_initializer != error_mark_node && !TYPE_P (TREE_PURPOSE (mem_initializer))) { error_at (token->location, "cannot expand initializer for member %<%D%>", TREE_PURPOSE (mem_initializer)); mem_initializer = error_mark_node; } /* Construct the pack expansion type. */ if (mem_initializer != error_mark_node) mem_initializer = make_pack_expansion (mem_initializer); } if (target_ctor != error_mark_node && mem_initializer != error_mark_node) { error ("mem-initializer for %qD follows constructor delegation", TREE_PURPOSE (mem_initializer)); mem_initializer = error_mark_node; } /* Look for a target constructor. */ if (mem_initializer != error_mark_node && CLASS_TYPE_P (TREE_PURPOSE (mem_initializer)) && same_type_p (TREE_PURPOSE (mem_initializer), current_class_type)) { maybe_warn_cpp0x (CPP0X_DELEGATING_CTORS); if (mem_initializer_list) { error ("constructor delegation follows mem-initializer for %qD", TREE_PURPOSE (mem_initializer_list)); mem_initializer = error_mark_node; } target_ctor = mem_initializer; } /* Add it to the list, unless it was erroneous. */ if (mem_initializer != error_mark_node) { TREE_CHAIN (mem_initializer) = mem_initializer_list; mem_initializer_list = mem_initializer; } /* If the next token is not a `,', we're done. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } /* Perform semantic analysis. */ if (DECL_CONSTRUCTOR_P (current_function_decl)) finish_mem_initializers (mem_initializer_list); } /* Parse a mem-initializer. mem-initializer: mem-initializer-id ( expression-list [opt] ) mem-initializer-id braced-init-list GNU extension: mem-initializer: ( expression-list [opt] ) Returns a TREE_LIST. The TREE_PURPOSE is the TYPE (for a base class) or FIELD_DECL (for a non-static data member) to initialize; the TREE_VALUE is the expression-list. An empty initialization list is represented by void_list_node. */ static tree cp_parser_mem_initializer (cp_parser* parser) { tree mem_initializer_id; tree expression_list; tree member; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Find out what is being initialized. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { permerror (token->location, "anachronistic old-style base class initializer"); mem_initializer_id = NULL_TREE; } else { mem_initializer_id = cp_parser_mem_initializer_id (parser); if (mem_initializer_id == error_mark_node) return mem_initializer_id; } member = expand_member_init (mem_initializer_id); if (member && !DECL_P (member)) in_base_initializer = 1; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { bool expr_non_constant_p; cp_lexer_set_source_position (parser->lexer); maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); expression_list = cp_parser_braced_list (parser, &expr_non_constant_p); CONSTRUCTOR_IS_DIRECT_INIT (expression_list) = 1; expression_list = build_tree_list (NULL_TREE, expression_list); } else { vec<tree, va_gc> *vec; vec = cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL); if (vec == NULL) return error_mark_node; expression_list = build_tree_list_vec (vec); release_tree_vector (vec); } if (expression_list == error_mark_node) return error_mark_node; if (!expression_list) expression_list = void_type_node; in_base_initializer = 0; return member ? build_tree_list (member, expression_list) : error_mark_node; } /* Parse a mem-initializer-id. mem-initializer-id: :: [opt] nested-name-specifier [opt] class-name identifier Returns a TYPE indicating the class to be initializer for the first production. Returns an IDENTIFIER_NODE indicating the data member to be initialized for the second production. */ static tree cp_parser_mem_initializer_id (cp_parser* parser) { bool global_scope_p; bool nested_name_specifier_p; bool template_p = false; tree id; cp_token *token = cp_lexer_peek_token (parser->lexer); /* `typename' is not allowed in this context ([temp.res]). */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME)) { error_at (token->location, "keyword %<typename%> not allowed in this context (a qualified " "member initializer is implicitly a type)"); cp_lexer_consume_token (parser->lexer); } /* Look for the optional `::' operator. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the optional nested-name-specifier. The simplest way to implement: [temp.res] The keyword `typename' is not permitted in a base-specifier or mem-initializer; in these contexts a qualified name that depends on a template-parameter is implicitly assumed to be a type name. is to assume that we have seen the `typename' keyword at this point. */ nested_name_specifier_p = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/true, /*type_p=*/true, /*is_declaration=*/true) != NULL_TREE); if (nested_name_specifier_p) template_p = cp_parser_optional_template_keyword (parser); /* If there is a `::' operator or a nested-name-specifier, then we are definitely looking for a class-name. */ if (global_scope_p || nested_name_specifier_p) return cp_parser_class_name (parser, /*typename_keyword_p=*/true, /*template_keyword_p=*/template_p, typename_type, /*check_dependency_p=*/true, /*class_head_p=*/false, /*is_declaration=*/true); /* Otherwise, we could also be looking for an ordinary identifier. */ cp_parser_parse_tentatively (parser); /* Try a class-name. */ id = cp_parser_class_name (parser, /*typename_keyword_p=*/true, /*template_keyword_p=*/false, none_type, /*check_dependency_p=*/true, /*class_head_p=*/false, /*is_declaration=*/true); /* If we found one, we're done. */ if (cp_parser_parse_definitely (parser)) return id; /* Otherwise, look for an ordinary identifier. */ return cp_parser_identifier (parser); } /* Overloading [gram.over] */ /* Parse an operator-function-id. operator-function-id: operator operator Returns an IDENTIFIER_NODE for the operator which is a human-readable spelling of the identifier, e.g., `operator +'. */ static tree cp_parser_operator_function_id (cp_parser* parser) { /* Look for the `operator' keyword. */ if (!cp_parser_require_keyword (parser, RID_OPERATOR, RT_OPERATOR)) return error_mark_node; /* And then the name of the operator itself. */ return cp_parser_operator (parser); } /* Return an identifier node for a user-defined literal operator. The suffix identifier is chained to the operator name identifier. */ static tree cp_literal_operator_id (const char* name) { tree identifier; char *buffer = XNEWVEC (char, strlen (UDLIT_OP_ANSI_PREFIX) + strlen (name) + 10); sprintf (buffer, UDLIT_OP_ANSI_FORMAT, name); identifier = get_identifier (buffer); return identifier; } /* Parse an operator. operator: new delete new[] delete[] + - * / % ^ & | ~ ! = < > += -= *= /= %= ^= &= |= << >> >>= <<= == != <= >= && || ++ -- , ->* -> () [] GNU Extensions: operator: <? >? <?= >?= Returns an IDENTIFIER_NODE for the operator which is a human-readable spelling of the identifier, e.g., `operator +'. */ static tree cp_parser_operator (cp_parser* parser) { tree id = NULL_TREE; cp_token *token; bool utf8 = false; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Figure out which operator we have. */ switch (token->type) { case CPP_KEYWORD: { enum tree_code op; /* The keyword should be either `new' or `delete'. */ if (token->keyword == RID_NEW) op = NEW_EXPR; else if (token->keyword == RID_DELETE) op = DELETE_EXPR; else break; /* Consume the `new' or `delete' token. */ cp_lexer_consume_token (parser->lexer); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `[' token then this is the array variant of the operator. */ if (token->type == CPP_OPEN_SQUARE) { /* Consume the `[' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the `]' token. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); id = ansi_opname (op == NEW_EXPR ? VEC_NEW_EXPR : VEC_DELETE_EXPR); } /* Otherwise, we have the non-array variant. */ else id = ansi_opname (op); return id; } case CPP_PLUS: id = ansi_opname (PLUS_EXPR); break; case CPP_MINUS: id = ansi_opname (MINUS_EXPR); break; case CPP_MULT: id = ansi_opname (MULT_EXPR); break; case CPP_DIV: id = ansi_opname (TRUNC_DIV_EXPR); break; case CPP_MOD: id = ansi_opname (TRUNC_MOD_EXPR); break; case CPP_XOR: id = ansi_opname (BIT_XOR_EXPR); break; case CPP_AND: id = ansi_opname (BIT_AND_EXPR); break; case CPP_OR: id = ansi_opname (BIT_IOR_EXPR); break; case CPP_COMPL: id = ansi_opname (BIT_NOT_EXPR); break; case CPP_NOT: id = ansi_opname (TRUTH_NOT_EXPR); break; case CPP_EQ: id = ansi_assopname (NOP_EXPR); break; case CPP_LESS: id = ansi_opname (LT_EXPR); break; case CPP_GREATER: id = ansi_opname (GT_EXPR); break; case CPP_PLUS_EQ: id = ansi_assopname (PLUS_EXPR); break; case CPP_MINUS_EQ: id = ansi_assopname (MINUS_EXPR); break; case CPP_MULT_EQ: id = ansi_assopname (MULT_EXPR); break; case CPP_DIV_EQ: id = ansi_assopname (TRUNC_DIV_EXPR); break; case CPP_MOD_EQ: id = ansi_assopname (TRUNC_MOD_EXPR); break; case CPP_XOR_EQ: id = ansi_assopname (BIT_XOR_EXPR); break; case CPP_AND_EQ: id = ansi_assopname (BIT_AND_EXPR); break; case CPP_OR_EQ: id = ansi_assopname (BIT_IOR_EXPR); break; case CPP_LSHIFT: id = ansi_opname (LSHIFT_EXPR); break; case CPP_RSHIFT: id = ansi_opname (RSHIFT_EXPR); break; case CPP_LSHIFT_EQ: id = ansi_assopname (LSHIFT_EXPR); break; case CPP_RSHIFT_EQ: id = ansi_assopname (RSHIFT_EXPR); break; case CPP_EQ_EQ: id = ansi_opname (EQ_EXPR); break; case CPP_NOT_EQ: id = ansi_opname (NE_EXPR); break; case CPP_LESS_EQ: id = ansi_opname (LE_EXPR); break; case CPP_GREATER_EQ: id = ansi_opname (GE_EXPR); break; case CPP_AND_AND: id = ansi_opname (TRUTH_ANDIF_EXPR); break; case CPP_OR_OR: id = ansi_opname (TRUTH_ORIF_EXPR); break; case CPP_PLUS_PLUS: id = ansi_opname (POSTINCREMENT_EXPR); break; case CPP_MINUS_MINUS: id = ansi_opname (PREDECREMENT_EXPR); break; case CPP_COMMA: id = ansi_opname (COMPOUND_EXPR); break; case CPP_DEREF_STAR: id = ansi_opname (MEMBER_REF); break; case CPP_DEREF: id = ansi_opname (COMPONENT_REF); break; case CPP_OPEN_PAREN: /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Look for the matching `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return ansi_opname (CALL_EXPR); case CPP_OPEN_SQUARE: /* Consume the `['. */ cp_lexer_consume_token (parser->lexer); /* Look for the matching `]'. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); return ansi_opname (ARRAY_REF); case CPP_UTF8STRING: case CPP_UTF8STRING_USERDEF: utf8 = true; case CPP_STRING: case CPP_WSTRING: case CPP_STRING16: case CPP_STRING32: case CPP_STRING_USERDEF: case CPP_WSTRING_USERDEF: case CPP_STRING16_USERDEF: case CPP_STRING32_USERDEF: { tree str, string_tree; int sz, len; if (cxx_dialect == cxx98) maybe_warn_cpp0x (CPP0X_USER_DEFINED_LITERALS); /* Consume the string. */ str = cp_parser_string_literal (parser, /*translate=*/true, /*wide_ok=*/true, /*lookup_udlit=*/false); if (str == error_mark_node) return error_mark_node; else if (TREE_CODE (str) == USERDEF_LITERAL) { string_tree = USERDEF_LITERAL_VALUE (str); id = USERDEF_LITERAL_SUFFIX_ID (str); } else { string_tree = str; /* Look for the suffix identifier. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_NAME) id = cp_parser_identifier (parser); else if (token->type == CPP_KEYWORD) { error ("unexpected keyword;" " remove space between quotes and suffix identifier"); return error_mark_node; } else { error ("expected suffix identifier"); return error_mark_node; } } sz = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (string_tree)))); len = TREE_STRING_LENGTH (string_tree) / sz - 1; if (len != 0) { error ("expected empty string after %<operator%> keyword"); return error_mark_node; } if (utf8 || TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (string_tree))) != char_type_node) { error ("invalid encoding prefix in literal operator"); return error_mark_node; } if (id != error_mark_node) { const char *name = IDENTIFIER_POINTER (id); id = cp_literal_operator_id (name); } return id; } default: /* Anything else is an error. */ break; } /* If we have selected an identifier, we need to consume the operator token. */ if (id) cp_lexer_consume_token (parser->lexer); /* Otherwise, no valid operator name was present. */ else { cp_parser_error (parser, "expected operator"); id = error_mark_node; } return id; } /* Parse a template-declaration. template-declaration: export [opt] template < template-parameter-list > declaration If MEMBER_P is TRUE, this template-declaration occurs within a class-specifier. The grammar rule given by the standard isn't correct. What is really meant is: template-declaration: export [opt] template-parameter-list-seq decl-specifier-seq [opt] init-declarator [opt] ; export [opt] template-parameter-list-seq function-definition template-parameter-list-seq: template-parameter-list-seq [opt] template < template-parameter-list > */ static void cp_parser_template_declaration (cp_parser* parser, bool member_p) { /* Check for `export'. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_EXPORT)) { /* Consume the `export' token. */ cp_lexer_consume_token (parser->lexer); /* Warn that we do not support `export'. */ warning (0, "keyword %<export%> not implemented, and will be ignored"); } cp_parser_template_declaration_after_export (parser, member_p); } /* Parse a template-parameter-list. template-parameter-list: template-parameter template-parameter-list , template-parameter Returns a TREE_LIST. Each node represents a template parameter. The nodes are connected via their TREE_CHAINs. */ static tree cp_parser_template_parameter_list (cp_parser* parser) { tree parameter_list = NULL_TREE; begin_template_parm_list (); /* The loop below parses the template parms. We first need to know the total number of template parms to be able to compute proper canonical types of each dependent type. So after the loop, when we know the total number of template parms, end_template_parm_list computes the proper canonical types and fixes up the dependent types accordingly. */ while (true) { tree parameter; bool is_non_type; bool is_parameter_pack; location_t parm_loc; /* Parse the template-parameter. */ parm_loc = cp_lexer_peek_token (parser->lexer)->location; parameter = cp_parser_template_parameter (parser, &is_non_type, &is_parameter_pack); /* Add it to the list. */ if (parameter != error_mark_node) parameter_list = process_template_parm (parameter_list, parm_loc, parameter, is_non_type, is_parameter_pack); else { tree err_parm = build_tree_list (parameter, parameter); parameter_list = chainon (parameter_list, err_parm); } /* If the next token is not a `,', we're done. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Otherwise, consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } return end_template_parm_list (parameter_list); } /* Parse a template-parameter. template-parameter: type-parameter parameter-declaration If all goes well, returns a TREE_LIST. The TREE_VALUE represents the parameter. The TREE_PURPOSE is the default value, if any. Returns ERROR_MARK_NODE on failure. *IS_NON_TYPE is set to true iff this parameter is a non-type parameter. *IS_PARAMETER_PACK is set to true iff this parameter is a parameter pack. */ static tree cp_parser_template_parameter (cp_parser* parser, bool *is_non_type, bool *is_parameter_pack) { cp_token *token; cp_parameter_declarator *parameter_declarator; cp_declarator *id_declarator; tree parm; /* Assume it is a type parameter or a template parameter. */ *is_non_type = false; /* Assume it not a parameter pack. */ *is_parameter_pack = false; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it is `class' or `template', we have a type-parameter. */ if (token->keyword == RID_TEMPLATE) return cp_parser_type_parameter (parser, is_parameter_pack); /* If it is `class' or `typename' we do not know yet whether it is a type parameter or a non-type parameter. Consider: template <typename T, typename T::X X> ... or: template <class C, class D*> ... Here, the first parameter is a type parameter, and the second is a non-type parameter. We can tell by looking at the token after the identifier -- if it is a `,', `=', or `>' then we have a type parameter. */ if (token->keyword == RID_TYPENAME || token->keyword == RID_CLASS) { /* Peek at the token after `class' or `typename'. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); /* If it's an ellipsis, we have a template type parameter pack. */ if (token->type == CPP_ELLIPSIS) return cp_parser_type_parameter (parser, is_parameter_pack); /* If it's an identifier, skip it. */ if (token->type == CPP_NAME) token = cp_lexer_peek_nth_token (parser->lexer, 3); /* Now, see if the token looks like the end of a template parameter. */ if (token->type == CPP_COMMA || token->type == CPP_EQ || token->type == CPP_GREATER) return cp_parser_type_parameter (parser, is_parameter_pack); } /* Otherwise, it is a non-type parameter. [temp.param] When parsing a default template-argument for a non-type template-parameter, the first non-nested `>' is taken as the end of the template parameter-list rather than a greater-than operator. */ *is_non_type = true; parameter_declarator = cp_parser_parameter_declaration (parser, /*template_parm_p=*/true, /*parenthesized_p=*/NULL); if (!parameter_declarator) return error_mark_node; /* If the parameter declaration is marked as a parameter pack, set *IS_PARAMETER_PACK to notify the caller. Also, unmark the declarator's PACK_EXPANSION_P, otherwise we'll get errors from grokdeclarator. */ if (parameter_declarator->declarator && parameter_declarator->declarator->parameter_pack_p) { *is_parameter_pack = true; parameter_declarator->declarator->parameter_pack_p = false; } if (parameter_declarator->default_argument) { /* Can happen in some cases of erroneous input (c++/34892). */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) /* Consume the `...' for better error recovery. */ cp_lexer_consume_token (parser->lexer); } /* If the next token is an ellipsis, and we don't already have it marked as a parameter pack, then we have a parameter pack (that has no declarator). */ else if (!*is_parameter_pack && cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS) && (declarator_can_be_parameter_pack (parameter_declarator->declarator))) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); maybe_warn_variadic_templates (); *is_parameter_pack = true; } /* We might end up with a pack expansion as the type of the non-type template parameter, in which case this is a non-type template parameter pack. */ else if (parameter_declarator->decl_specifiers.type && PACK_EXPANSION_P (parameter_declarator->decl_specifiers.type)) { *is_parameter_pack = true; parameter_declarator->decl_specifiers.type = PACK_EXPANSION_PATTERN (parameter_declarator->decl_specifiers.type); } if (*is_parameter_pack && cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { /* Parameter packs cannot have default arguments. However, a user may try to do so, so we'll parse them and give an appropriate diagnostic here. */ cp_token *start_token = cp_lexer_peek_token (parser->lexer); /* Find the name of the parameter pack. */ id_declarator = parameter_declarator->declarator; while (id_declarator && id_declarator->kind != cdk_id) id_declarator = id_declarator->declarator; if (id_declarator && id_declarator->kind == cdk_id) error_at (start_token->location, "template parameter pack %qD cannot have a default argument", id_declarator->u.id.unqualified_name); else error_at (start_token->location, "template parameter pack cannot have a default argument"); /* Parse the default argument, but throw away the result. */ cp_parser_default_argument (parser, /*template_parm_p=*/true); } parm = grokdeclarator (parameter_declarator->declarator, &parameter_declarator->decl_specifiers, TPARM, /*initialized=*/0, /*attrlist=*/NULL); if (parm == error_mark_node) return error_mark_node; return build_tree_list (parameter_declarator->default_argument, parm); } /* Parse a type-parameter. type-parameter: class identifier [opt] class identifier [opt] = type-id typename identifier [opt] typename identifier [opt] = type-id template < template-parameter-list > class identifier [opt] template < template-parameter-list > class identifier [opt] = id-expression GNU Extension (variadic templates): type-parameter: class ... identifier [opt] typename ... identifier [opt] Returns a TREE_LIST. The TREE_VALUE is itself a TREE_LIST. The TREE_PURPOSE is the default-argument, if any. The TREE_VALUE is the declaration of the parameter. Sets *IS_PARAMETER_PACK if this is a template parameter pack. */ static tree cp_parser_type_parameter (cp_parser* parser, bool *is_parameter_pack) { cp_token *token; tree parameter; /* Look for a keyword to tell us what kind of parameter this is. */ token = cp_parser_require (parser, CPP_KEYWORD, RT_CLASS_TYPENAME_TEMPLATE); if (!token) return error_mark_node; switch (token->keyword) { case RID_CLASS: case RID_TYPENAME: { tree identifier; tree default_argument; /* If the next token is an ellipsis, we have a template argument pack. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); maybe_warn_variadic_templates (); *is_parameter_pack = true; } /* If the next token is an identifier, then it names the parameter. */ if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) identifier = cp_parser_identifier (parser); else identifier = NULL_TREE; /* Create the parameter. */ parameter = finish_template_type_parm (class_type_node, identifier); /* If the next token is an `=', we have a default argument. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { /* Consume the `=' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the default-argument. */ push_deferring_access_checks (dk_no_deferred); default_argument = cp_parser_type_id (parser); /* Template parameter packs cannot have default arguments. */ if (*is_parameter_pack) { if (identifier) error_at (token->location, "template parameter pack %qD cannot have a " "default argument", identifier); else error_at (token->location, "template parameter packs cannot have " "default arguments"); default_argument = NULL_TREE; } else if (check_for_bare_parameter_packs (default_argument)) default_argument = error_mark_node; pop_deferring_access_checks (); } else default_argument = NULL_TREE; /* Create the combined representation of the parameter and the default argument. */ parameter = build_tree_list (default_argument, parameter); } break; case RID_TEMPLATE: { tree identifier; tree default_argument; /* Look for the `<'. */ cp_parser_require (parser, CPP_LESS, RT_LESS); /* Parse the template-parameter-list. */ cp_parser_template_parameter_list (parser); /* Look for the `>'. */ cp_parser_require (parser, CPP_GREATER, RT_GREATER); /* Look for the `class' or 'typename' keywords. */ cp_parser_type_parameter_key (parser); /* If the next token is an ellipsis, we have a template argument pack. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); maybe_warn_variadic_templates (); *is_parameter_pack = true; } /* If the next token is an `=', then there is a default-argument. If the next token is a `>', we are at the end of the parameter-list. If the next token is a `,', then we are at the end of this parameter. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ) && cp_lexer_next_token_is_not (parser->lexer, CPP_GREATER) && cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) { identifier = cp_parser_identifier (parser); /* Treat invalid names as if the parameter were nameless. */ if (identifier == error_mark_node) identifier = NULL_TREE; } else identifier = NULL_TREE; /* Create the template parameter. */ parameter = finish_template_template_parm (class_type_node, identifier); /* If the next token is an `=', then there is a default-argument. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { bool is_template; /* Consume the `='. */ cp_lexer_consume_token (parser->lexer); /* Parse the id-expression. */ push_deferring_access_checks (dk_no_deferred); /* save token before parsing the id-expression, for error reporting */ token = cp_lexer_peek_token (parser->lexer); default_argument = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*template_p=*/&is_template, /*declarator_p=*/false, /*optional_p=*/false); if (TREE_CODE (default_argument) == TYPE_DECL) /* If the id-expression was a template-id that refers to a template-class, we already have the declaration here, so no further lookup is needed. */ ; else /* Look up the name. */ default_argument = cp_parser_lookup_name (parser, default_argument, none_type, /*is_template=*/is_template, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, token->location); /* See if the default argument is valid. */ default_argument = check_template_template_default_arg (default_argument); /* Template parameter packs cannot have default arguments. */ if (*is_parameter_pack) { if (identifier) error_at (token->location, "template parameter pack %qD cannot " "have a default argument", identifier); else error_at (token->location, "template parameter packs cannot " "have default arguments"); default_argument = NULL_TREE; } pop_deferring_access_checks (); } else default_argument = NULL_TREE; /* Create the combined representation of the parameter and the default argument. */ parameter = build_tree_list (default_argument, parameter); } break; default: gcc_unreachable (); break; } return parameter; } /* Parse a template-id. template-id: template-name < template-argument-list [opt] > If TEMPLATE_KEYWORD_P is TRUE, then we have just seen the `template' keyword. In this case, a TEMPLATE_ID_EXPR will be returned. Otherwise, if the template-name names a function, or set of functions, returns a TEMPLATE_ID_EXPR. If the template-name names a class, returns a TYPE_DECL for the specialization. If CHECK_DEPENDENCY_P is FALSE, names are looked up in uninstantiated templates. */ static tree cp_parser_template_id (cp_parser *parser, bool template_keyword_p, bool check_dependency_p, enum tag_types tag_type, bool is_declaration) { int i; tree templ; tree arguments; tree template_id; cp_token_position start_of_id = 0; deferred_access_check *chk; vec<deferred_access_check, va_gc> *access_check; cp_token *next_token = NULL, *next_token_2 = NULL; bool is_identifier; /* If the next token corresponds to a template-id, there is no need to reparse it. */ next_token = cp_lexer_peek_token (parser->lexer); if (next_token->type == CPP_TEMPLATE_ID) { struct tree_check *check_value; /* Get the stored value. */ check_value = cp_lexer_consume_token (parser->lexer)->u.tree_check_value; /* Perform any access checks that were deferred. */ access_check = check_value->checks; if (access_check) { FOR_EACH_VEC_ELT (*access_check, i, chk) perform_or_defer_access_check (chk->binfo, chk->decl, chk->diag_decl, tf_warning_or_error); } /* Return the stored value. */ return check_value->value; } /* Avoid performing name lookup if there is no possibility of finding a template-id. */ if ((next_token->type != CPP_NAME && next_token->keyword != RID_OPERATOR) || (next_token->type == CPP_NAME && !cp_parser_nth_token_starts_template_argument_list_p (parser, 2))) { cp_parser_error (parser, "expected template-id"); return error_mark_node; } /* Remember where the template-id starts. */ if (cp_parser_uncommitted_to_tentative_parse_p (parser)) start_of_id = cp_lexer_token_position (parser->lexer, false); push_deferring_access_checks (dk_deferred); /* Parse the template-name. */ is_identifier = false; templ = cp_parser_template_name (parser, template_keyword_p, check_dependency_p, is_declaration, tag_type, &is_identifier); if (templ == error_mark_node || is_identifier) { pop_deferring_access_checks (); return templ; } /* If we find the sequence `[:' after a template-name, it's probably a digraph-typo for `< ::'. Substitute the tokens and check if we can parse correctly the argument list. */ next_token = cp_lexer_peek_token (parser->lexer); next_token_2 = cp_lexer_peek_nth_token (parser->lexer, 2); if (next_token->type == CPP_OPEN_SQUARE && next_token->flags & DIGRAPH && next_token_2->type == CPP_COLON && !(next_token_2->flags & PREV_WHITE)) { cp_parser_parse_tentatively (parser); /* Change `:' into `::'. */ next_token_2->type = CPP_SCOPE; /* Consume the first token (CPP_OPEN_SQUARE - which we pretend it is CPP_LESS. */ cp_lexer_consume_token (parser->lexer); /* Parse the arguments. */ arguments = cp_parser_enclosed_template_argument_list (parser); if (!cp_parser_parse_definitely (parser)) { /* If we couldn't parse an argument list, then we revert our changes and return simply an error. Maybe this is not a template-id after all. */ next_token_2->type = CPP_COLON; cp_parser_error (parser, "expected %<<%>"); pop_deferring_access_checks (); return error_mark_node; } /* Otherwise, emit an error about the invalid digraph, but continue parsing because we got our argument list. */ if (permerror (next_token->location, "%<<::%> cannot begin a template-argument list")) { static bool hint = false; inform (next_token->location, "%<<:%> is an alternate spelling for %<[%>." " Insert whitespace between %<<%> and %<::%>"); if (!hint && !flag_permissive) { inform (next_token->location, "(if you use %<-fpermissive%> " "or %<-std=c++11%>, or %<-std=gnu++11%> G++ will " "accept your code)"); hint = true; } } } else { /* Look for the `<' that starts the template-argument-list. */ if (!cp_parser_require (parser, CPP_LESS, RT_LESS)) { pop_deferring_access_checks (); return error_mark_node; } /* Parse the arguments. */ arguments = cp_parser_enclosed_template_argument_list (parser); } /* Build a representation of the specialization. */ if (identifier_p (templ)) template_id = build_min_nt_loc (next_token->location, TEMPLATE_ID_EXPR, templ, arguments); else if (DECL_TYPE_TEMPLATE_P (templ) || DECL_TEMPLATE_TEMPLATE_PARM_P (templ)) { bool entering_scope; /* In "template <typename T> ... A<T>::", A<T> is the abstract A template (rather than some instantiation thereof) only if is not nested within some other construct. For example, in "template <typename T> void f(T) { A<T>::", A<T> is just an instantiation of A. */ entering_scope = (template_parm_scope_p () && cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)); template_id = finish_template_type (templ, arguments, entering_scope); } else if (variable_template_p (templ)) { template_id = lookup_template_variable (templ, arguments); } else { /* If it's not a class-template or a template-template, it should be a function-template. */ gcc_assert ((DECL_FUNCTION_TEMPLATE_P (templ) || TREE_CODE (templ) == OVERLOAD || BASELINK_P (templ))); template_id = lookup_template_function (templ, arguments); } /* If parsing tentatively, replace the sequence of tokens that makes up the template-id with a CPP_TEMPLATE_ID token. That way, should we re-parse the token stream, we will not have to repeat the effort required to do the parse, nor will we issue duplicate error messages about problems during instantiation of the template. */ if (start_of_id /* Don't do this if we had a parse error in a declarator; re-parsing might succeed if a name changes meaning (60361). */ && !(cp_parser_error_occurred (parser) && cp_parser_parsing_tentatively (parser) && parser->in_declarator_p)) { cp_token *token = cp_lexer_token_at (parser->lexer, start_of_id); /* Reset the contents of the START_OF_ID token. */ token->type = CPP_TEMPLATE_ID; /* Retrieve any deferred checks. Do not pop this access checks yet so the memory will not be reclaimed during token replacing below. */ token->u.tree_check_value = ggc_cleared_alloc<struct tree_check> (); token->u.tree_check_value->value = template_id; token->u.tree_check_value->checks = get_deferred_access_checks (); token->keyword = RID_MAX; /* Purge all subsequent tokens. */ cp_lexer_purge_tokens_after (parser->lexer, start_of_id); /* ??? Can we actually assume that, if template_id == error_mark_node, we will have issued a diagnostic to the user, as opposed to simply marking the tentative parse as failed? */ if (cp_parser_error_occurred (parser) && template_id != error_mark_node) error_at (token->location, "parse error in template argument list"); } pop_to_parent_deferring_access_checks (); return template_id; } /* Parse a template-name. template-name: identifier The standard should actually say: template-name: identifier operator-function-id A defect report has been filed about this issue. A conversion-function-id cannot be a template name because they cannot be part of a template-id. In fact, looking at this code: a.operator K<int>() the conversion-function-id is "operator K<int>", and K<int> is a type-id. It is impossible to call a templated conversion-function-id with an explicit argument list, since the only allowed template parameter is the type to which it is converting. If TEMPLATE_KEYWORD_P is true, then we have just seen the `template' keyword, in a construction like: T::template f<3>() In that case `f' is taken to be a template-name, even though there is no way of knowing for sure. Returns the TEMPLATE_DECL for the template, or an OVERLOAD if the name refers to a set of overloaded functions, at least one of which is a template, or an IDENTIFIER_NODE with the name of the template, if TEMPLATE_KEYWORD_P is true. If CHECK_DEPENDENCY_P is FALSE, names are looked up inside uninstantiated templates. */ static tree cp_parser_template_name (cp_parser* parser, bool template_keyword_p, bool check_dependency_p, bool is_declaration, enum tag_types tag_type, bool *is_identifier) { tree identifier; tree decl; tree fns; cp_token *token = cp_lexer_peek_token (parser->lexer); /* If the next token is `operator', then we have either an operator-function-id or a conversion-function-id. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_OPERATOR)) { /* We don't know whether we're looking at an operator-function-id or a conversion-function-id. */ cp_parser_parse_tentatively (parser); /* Try an operator-function-id. */ identifier = cp_parser_operator_function_id (parser); /* If that didn't work, try a conversion-function-id. */ if (!cp_parser_parse_definitely (parser)) { cp_parser_error (parser, "expected template-name"); return error_mark_node; } } /* Look for the identifier. */ else identifier = cp_parser_identifier (parser); /* If we didn't find an identifier, we don't have a template-id. */ if (identifier == error_mark_node) return error_mark_node; /* If the name immediately followed the `template' keyword, then it is a template-name. However, if the next token is not `<', then we do not treat it as a template-name, since it is not being used as part of a template-id. This enables us to handle constructs like: template <typename T> struct S { S(); }; template <typename T> S<T>::S(); correctly. We would treat `S' as a template -- if it were `S<T>' -- but we do not if there is no `<'. */ if (processing_template_decl && cp_parser_nth_token_starts_template_argument_list_p (parser, 1)) { /* In a declaration, in a dependent context, we pretend that the "template" keyword was present in order to improve error recovery. For example, given: template <typename T> void f(T::X<int>); we want to treat "X<int>" as a template-id. */ if (is_declaration && !template_keyword_p && parser->scope && TYPE_P (parser->scope) && check_dependency_p && dependent_scope_p (parser->scope) /* Do not do this for dtors (or ctors), since they never need the template keyword before their name. */ && !constructor_name_p (identifier, parser->scope)) { cp_token_position start = 0; /* Explain what went wrong. */ error_at (token->location, "non-template %qD used as template", identifier); inform (token->location, "use %<%T::template %D%> to indicate that it is a template", parser->scope, identifier); /* If parsing tentatively, find the location of the "<" token. */ if (cp_parser_simulate_error (parser)) start = cp_lexer_token_position (parser->lexer, true); /* Parse the template arguments so that we can issue error messages about them. */ cp_lexer_consume_token (parser->lexer); cp_parser_enclosed_template_argument_list (parser); /* Skip tokens until we find a good place from which to continue parsing. */ cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/true, /*consume_paren=*/false); /* If parsing tentatively, permanently remove the template argument list. That will prevent duplicate error messages from being issued about the missing "template" keyword. */ if (start) cp_lexer_purge_tokens_after (parser->lexer, start); if (is_identifier) *is_identifier = true; return identifier; } /* If the "template" keyword is present, then there is generally no point in doing name-lookup, so we just return IDENTIFIER. But, if the qualifying scope is non-dependent then we can (and must) do name-lookup normally. */ if (template_keyword_p && (!parser->scope || (TYPE_P (parser->scope) && dependent_type_p (parser->scope)))) return identifier; } /* Look up the name. */ decl = cp_parser_lookup_name (parser, identifier, tag_type, /*is_template=*/true, /*is_namespace=*/false, check_dependency_p, /*ambiguous_decls=*/NULL, token->location); decl = strip_using_decl (decl); /* If DECL is a template, then the name was a template-name. */ if (TREE_CODE (decl) == TEMPLATE_DECL) { if (TREE_DEPRECATED (decl) && deprecated_state != DEPRECATED_SUPPRESS) warn_deprecated_use (decl, NULL_TREE); } else { tree fn = NULL_TREE; /* The standard does not explicitly indicate whether a name that names a set of overloaded declarations, some of which are templates, is a template-name. However, such a name should be a template-name; otherwise, there is no way to form a template-id for the overloaded templates. */ fns = BASELINK_P (decl) ? BASELINK_FUNCTIONS (decl) : decl; if (TREE_CODE (fns) == OVERLOAD) for (fn = fns; fn; fn = OVL_NEXT (fn)) if (TREE_CODE (OVL_CURRENT (fn)) == TEMPLATE_DECL) break; if (!fn) { /* The name does not name a template. */ cp_parser_error (parser, "expected template-name"); return error_mark_node; } } /* If DECL is dependent, and refers to a function, then just return its name; we will look it up again during template instantiation. */ if (DECL_FUNCTION_TEMPLATE_P (decl) || !DECL_P (decl)) { tree scope = ovl_scope (decl); if (TYPE_P (scope) && dependent_type_p (scope)) return identifier; } return decl; } /* Parse a template-argument-list. template-argument-list: template-argument ... [opt] template-argument-list , template-argument ... [opt] Returns a TREE_VEC containing the arguments. */ static tree cp_parser_template_argument_list (cp_parser* parser) { tree fixed_args[10]; unsigned n_args = 0; unsigned alloced = 10; tree *arg_ary = fixed_args; tree vec; bool saved_in_template_argument_list_p; bool saved_ice_p; bool saved_non_ice_p; saved_in_template_argument_list_p = parser->in_template_argument_list_p; parser->in_template_argument_list_p = true; /* Even if the template-id appears in an integral constant-expression, the contents of the argument list do not. */ saved_ice_p = parser->integral_constant_expression_p; parser->integral_constant_expression_p = false; saved_non_ice_p = parser->non_integral_constant_expression_p; parser->non_integral_constant_expression_p = false; /* Parse the arguments. */ do { tree argument; if (n_args) /* Consume the comma. */ cp_lexer_consume_token (parser->lexer); /* Parse the template-argument. */ argument = cp_parser_template_argument (parser); /* If the next token is an ellipsis, we're expanding a template argument pack. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { if (argument == error_mark_node) { cp_token *token = cp_lexer_peek_token (parser->lexer); error_at (token->location, "expected parameter pack before %<...%>"); } /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); /* Make the argument into a TYPE_PACK_EXPANSION or EXPR_PACK_EXPANSION. */ argument = make_pack_expansion (argument); } if (n_args == alloced) { alloced *= 2; if (arg_ary == fixed_args) { arg_ary = XNEWVEC (tree, alloced); memcpy (arg_ary, fixed_args, sizeof (tree) * n_args); } else arg_ary = XRESIZEVEC (tree, arg_ary, alloced); } arg_ary[n_args++] = argument; } while (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)); vec = make_tree_vec (n_args); while (n_args--) TREE_VEC_ELT (vec, n_args) = arg_ary[n_args]; if (arg_ary != fixed_args) free (arg_ary); parser->non_integral_constant_expression_p = saved_non_ice_p; parser->integral_constant_expression_p = saved_ice_p; parser->in_template_argument_list_p = saved_in_template_argument_list_p; #ifdef ENABLE_CHECKING SET_NON_DEFAULT_TEMPLATE_ARGS_COUNT (vec, TREE_VEC_LENGTH (vec)); #endif return vec; } /* Parse a template-argument. template-argument: assignment-expression type-id id-expression The representation is that of an assignment-expression, type-id, or id-expression -- except that the qualified id-expression is evaluated, so that the value returned is either a DECL or an OVERLOAD. Although the standard says "assignment-expression", it forbids throw-expressions or assignments in the template argument. Therefore, we use "conditional-expression" instead. */ static tree cp_parser_template_argument (cp_parser* parser) { tree argument; bool template_p; bool address_p; bool maybe_type_id = false; cp_token *token = NULL, *argument_start_token = NULL; location_t loc = 0; cp_id_kind idk; /* There's really no way to know what we're looking at, so we just try each alternative in order. [temp.arg] In a template-argument, an ambiguity between a type-id and an expression is resolved to a type-id, regardless of the form of the corresponding template-parameter. Therefore, we try a type-id first. */ cp_parser_parse_tentatively (parser); argument = cp_parser_template_type_arg (parser); /* If there was no error parsing the type-id but the next token is a '>>', our behavior depends on which dialect of C++ we're parsing. In C++98, we probably found a typo for '> >'. But there are type-id which are also valid expressions. For instance: struct X { int operator >> (int); }; template <int V> struct Foo {}; Foo<X () >> 5> r; Here 'X()' is a valid type-id of a function type, but the user just wanted to write the expression "X() >> 5". Thus, we remember that we found a valid type-id, but we still try to parse the argument as an expression to see what happens. In C++0x, the '>>' will be considered two separate '>' tokens. */ if (!cp_parser_error_occurred (parser) && cxx_dialect == cxx98 && cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT)) { maybe_type_id = true; cp_parser_abort_tentative_parse (parser); } else { /* If the next token isn't a `,' or a `>', then this argument wasn't really finished. This means that the argument is not a valid type-id. */ if (!cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_error (parser, "expected template-argument"); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) return argument; } /* We're still not sure what the argument will be. */ cp_parser_parse_tentatively (parser); /* Try a template. */ argument_start_token = cp_lexer_peek_token (parser->lexer); argument = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, &template_p, /*declarator_p=*/false, /*optional_p=*/false); /* If the next token isn't a `,' or a `>', then this argument wasn't really finished. */ if (!cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_error (parser, "expected template-argument"); if (!cp_parser_error_occurred (parser)) { /* Figure out what is being referred to. If the id-expression was for a class template specialization, then we will have a TYPE_DECL at this point. There is no need to do name lookup at this point in that case. */ if (TREE_CODE (argument) != TYPE_DECL) argument = cp_parser_lookup_name (parser, argument, none_type, /*is_template=*/template_p, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, argument_start_token->location); if (TREE_CODE (argument) != TEMPLATE_DECL && TREE_CODE (argument) != UNBOUND_CLASS_TEMPLATE) cp_parser_error (parser, "expected template-name"); } if (cp_parser_parse_definitely (parser)) { if (TREE_DEPRECATED (argument)) warn_deprecated_use (argument, NULL_TREE); return argument; } /* It must be a non-type argument. There permitted cases are given in [temp.arg.nontype]: -- an integral constant-expression of integral or enumeration type; or -- the name of a non-type template-parameter; or -- the name of an object or function with external linkage... -- the address of an object or function with external linkage... -- a pointer to member... */ /* Look for a non-type template parameter. */ if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { cp_parser_parse_tentatively (parser); argument = cp_parser_primary_expression (parser, /*address_p=*/false, /*cast_p=*/false, /*template_arg_p=*/true, &idk); if (TREE_CODE (argument) != TEMPLATE_PARM_INDEX || !cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_simulate_error (parser); if (cp_parser_parse_definitely (parser)) return argument; } /* If the next token is "&", the argument must be the address of an object or function with external linkage. */ address_p = cp_lexer_next_token_is (parser->lexer, CPP_AND); if (address_p) { loc = cp_lexer_peek_token (parser->lexer)->location; cp_lexer_consume_token (parser->lexer); } /* See if we might have an id-expression. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_NAME || token->keyword == RID_OPERATOR || token->type == CPP_SCOPE || token->type == CPP_TEMPLATE_ID || token->type == CPP_NESTED_NAME_SPECIFIER) { cp_parser_parse_tentatively (parser); argument = cp_parser_primary_expression (parser, address_p, /*cast_p=*/false, /*template_arg_p=*/true, &idk); if (cp_parser_error_occurred (parser) || !cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_abort_tentative_parse (parser); else { tree probe; if (INDIRECT_REF_P (argument)) { /* Strip the dereference temporarily. */ gcc_assert (REFERENCE_REF_P (argument)); argument = TREE_OPERAND (argument, 0); } /* If we're in a template, we represent a qualified-id referring to a static data member as a SCOPE_REF even if the scope isn't dependent so that we can check access control later. */ probe = argument; if (TREE_CODE (probe) == SCOPE_REF) probe = TREE_OPERAND (probe, 1); if (VAR_P (probe)) { /* A variable without external linkage might still be a valid constant-expression, so no error is issued here if the external-linkage check fails. */ if (!address_p && !DECL_EXTERNAL_LINKAGE_P (probe)) cp_parser_simulate_error (parser); } else if (is_overloaded_fn (argument)) /* All overloaded functions are allowed; if the external linkage test does not pass, an error will be issued later. */ ; else if (address_p && (TREE_CODE (argument) == OFFSET_REF || TREE_CODE (argument) == SCOPE_REF)) /* A pointer-to-member. */ ; else if (TREE_CODE (argument) == TEMPLATE_PARM_INDEX) ; else cp_parser_simulate_error (parser); if (cp_parser_parse_definitely (parser)) { if (address_p) argument = build_x_unary_op (loc, ADDR_EXPR, argument, tf_warning_or_error); else argument = convert_from_reference (argument); return argument; } } } /* If the argument started with "&", there are no other valid alternatives at this point. */ if (address_p) { cp_parser_error (parser, "invalid non-type template argument"); return error_mark_node; } /* If the argument wasn't successfully parsed as a type-id followed by '>>', the argument can only be a constant expression now. Otherwise, we try parsing the constant-expression tentatively, because the argument could really be a type-id. */ if (maybe_type_id) cp_parser_parse_tentatively (parser); argument = cp_parser_constant_expression (parser); if (!maybe_type_id) return argument; if (!cp_parser_next_token_ends_template_argument_p (parser)) cp_parser_error (parser, "expected template-argument"); if (cp_parser_parse_definitely (parser)) return argument; /* We did our best to parse the argument as a non type-id, but that was the only alternative that matched (albeit with a '>' after it). We can assume it's just a typo from the user, and a diagnostic will then be issued. */ return cp_parser_template_type_arg (parser); } /* Parse an explicit-instantiation. explicit-instantiation: template declaration Although the standard says `declaration', what it really means is: explicit-instantiation: template decl-specifier-seq [opt] declarator [opt] ; Things like `template int S<int>::i = 5, int S<double>::j;' are not supposed to be allowed. A defect report has been filed about this issue. GNU Extension: explicit-instantiation: storage-class-specifier template decl-specifier-seq [opt] declarator [opt] ; function-specifier template decl-specifier-seq [opt] declarator [opt] ; */ static void cp_parser_explicit_instantiation (cp_parser* parser) { int declares_class_or_enum; cp_decl_specifier_seq decl_specifiers; tree extension_specifier = NULL_TREE; timevar_push (TV_TEMPLATE_INST); /* Look for an (optional) storage-class-specifier or function-specifier. */ if (cp_parser_allow_gnu_extensions_p (parser)) { extension_specifier = cp_parser_storage_class_specifier_opt (parser); if (!extension_specifier) extension_specifier = cp_parser_function_specifier_opt (parser, /*decl_specs=*/NULL); } /* Look for the `template' keyword. */ cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE); /* Let the front end know that we are processing an explicit instantiation. */ begin_explicit_instantiation (); /* [temp.explicit] says that we are supposed to ignore access control while processing explicit instantiation directives. */ push_deferring_access_checks (dk_no_check); /* Parse a decl-specifier-seq. */ cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &decl_specifiers, &declares_class_or_enum); /* If there was exactly one decl-specifier, and it declared a class, and there's no declarator, then we have an explicit type instantiation. */ if (declares_class_or_enum && cp_parser_declares_only_class_p (parser)) { tree type; type = check_tag_decl (&decl_specifiers, /*explicit_type_instantiation_p=*/true); /* Turn access control back on for names used during template instantiation. */ pop_deferring_access_checks (); if (type) do_type_instantiation (type, extension_specifier, /*complain=*/tf_error); } else { cp_declarator *declarator; tree decl; /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false, /*friend_p=*/false); if (declares_class_or_enum & 2) cp_parser_check_for_definition_in_return_type (declarator, decl_specifiers.type, decl_specifiers.locations[ds_type_spec]); if (declarator != cp_error_declarator) { if (decl_spec_seq_has_spec_p (&decl_specifiers, ds_inline)) permerror (decl_specifiers.locations[ds_inline], "explicit instantiation shall not use" " %<inline%> specifier"); if (decl_spec_seq_has_spec_p (&decl_specifiers, ds_constexpr)) permerror (decl_specifiers.locations[ds_constexpr], "explicit instantiation shall not use" " %<constexpr%> specifier"); decl = grokdeclarator (declarator, &decl_specifiers, NORMAL, 0, &decl_specifiers.attributes); /* Turn access control back on for names used during template instantiation. */ pop_deferring_access_checks (); /* Do the explicit instantiation. */ do_decl_instantiation (decl, extension_specifier); } else { pop_deferring_access_checks (); /* Skip the body of the explicit instantiation. */ cp_parser_skip_to_end_of_statement (parser); } } /* We're done with the instantiation. */ end_explicit_instantiation (); cp_parser_consume_semicolon_at_end_of_statement (parser); timevar_pop (TV_TEMPLATE_INST); } /* Parse an explicit-specialization. explicit-specialization: template < > declaration Although the standard says `declaration', what it really means is: explicit-specialization: template <> decl-specifier [opt] init-declarator [opt] ; template <> function-definition template <> explicit-specialization template <> template-declaration */ static void cp_parser_explicit_specialization (cp_parser* parser) { bool need_lang_pop; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Look for the `template' keyword. */ cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE); /* Look for the `<'. */ cp_parser_require (parser, CPP_LESS, RT_LESS); /* Look for the `>'. */ cp_parser_require (parser, CPP_GREATER, RT_GREATER); /* We have processed another parameter list. */ ++parser->num_template_parameter_lists; /* [temp] A template ... explicit specialization ... shall not have C linkage. */ if (current_lang_name == lang_name_c) { error_at (token->location, "template specialization with C linkage"); /* Give it C++ linkage to avoid confusing other parts of the front end. */ push_lang_context (lang_name_cplusplus); need_lang_pop = true; } else need_lang_pop = false; /* Let the front end know that we are beginning a specialization. */ if (!begin_specialization ()) { end_specialization (); return; } /* If the next keyword is `template', we need to figure out whether or not we're looking a template-declaration. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) { if (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS && cp_lexer_peek_nth_token (parser->lexer, 3)->type != CPP_GREATER) cp_parser_template_declaration_after_export (parser, /*member_p=*/false); else cp_parser_explicit_specialization (parser); } else /* Parse the dependent declaration. */ cp_parser_single_declaration (parser, /*checks=*/NULL, /*member_p=*/false, /*explicit_specialization_p=*/true, /*friend_p=*/NULL); /* We're done with the specialization. */ end_specialization (); /* For the erroneous case of a template with C linkage, we pushed an implicit C++ linkage scope; exit that scope now. */ if (need_lang_pop) pop_lang_context (); /* We're done with this parameter list. */ --parser->num_template_parameter_lists; } /* Parse a type-specifier. type-specifier: simple-type-specifier class-specifier enum-specifier elaborated-type-specifier cv-qualifier GNU Extension: type-specifier: __complex__ Returns a representation of the type-specifier. For a class-specifier, enum-specifier, or elaborated-type-specifier, a TREE_TYPE is returned; otherwise, a TYPE_DECL is returned. The parser flags FLAGS is used to control type-specifier parsing. If IS_DECLARATION is TRUE, then this type-specifier is appearing in a decl-specifier-seq. If DECLARES_CLASS_OR_ENUM is non-NULL, and the type-specifier is a class-specifier, enum-specifier, or elaborated-type-specifier, then *DECLARES_CLASS_OR_ENUM is set to a nonzero value. The value is 1 if a type is declared; 2 if it is defined. Otherwise, it is set to zero. If IS_CV_QUALIFIER is non-NULL, and the type-specifier is a cv-qualifier, then IS_CV_QUALIFIER is set to TRUE. Otherwise, it is set to FALSE. */ static tree cp_parser_type_specifier (cp_parser* parser, cp_parser_flags flags, cp_decl_specifier_seq *decl_specs, bool is_declaration, int* declares_class_or_enum, bool* is_cv_qualifier) { tree type_spec = NULL_TREE; cp_token *token; enum rid keyword; cp_decl_spec ds = ds_last; /* Assume this type-specifier does not declare a new type. */ if (declares_class_or_enum) *declares_class_or_enum = 0; /* And that it does not specify a cv-qualifier. */ if (is_cv_qualifier) *is_cv_qualifier = false; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If we're looking at a keyword, we can use that to guide the production we choose. */ keyword = token->keyword; switch (keyword) { case RID_ENUM: if ((flags & CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS)) goto elaborated_type_specifier; /* Look for the enum-specifier. */ type_spec = cp_parser_enum_specifier (parser); /* If that worked, we're done. */ if (type_spec) { if (declares_class_or_enum) *declares_class_or_enum = 2; if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type_spec, token, /*type_definition_p=*/true); return type_spec; } else goto elaborated_type_specifier; /* Any of these indicate either a class-specifier, or an elaborated-type-specifier. */ case RID_CLASS: case RID_STRUCT: case RID_UNION: if ((flags & CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS)) goto elaborated_type_specifier; /* Parse tentatively so that we can back up if we don't find a class-specifier. */ cp_parser_parse_tentatively (parser); /* Look for the class-specifier. */ type_spec = cp_parser_class_specifier (parser); invoke_plugin_callbacks (PLUGIN_FINISH_TYPE, type_spec); /* If that worked, we're done. */ if (cp_parser_parse_definitely (parser)) { if (declares_class_or_enum) *declares_class_or_enum = 2; if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type_spec, token, /*type_definition_p=*/true); return type_spec; } /* Fall through. */ elaborated_type_specifier: /* We're declaring (not defining) a class or enum. */ if (declares_class_or_enum) *declares_class_or_enum = 1; /* Fall through. */ case RID_TYPENAME: /* Look for an elaborated-type-specifier. */ type_spec = (cp_parser_elaborated_type_specifier (parser, decl_spec_seq_has_spec_p (decl_specs, ds_friend), is_declaration)); if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type_spec, token, /*type_definition_p=*/false); return type_spec; case RID_CONST: ds = ds_const; if (is_cv_qualifier) *is_cv_qualifier = true; break; case RID_VOLATILE: ds = ds_volatile; if (is_cv_qualifier) *is_cv_qualifier = true; break; case RID_RESTRICT: ds = ds_restrict; if (is_cv_qualifier) *is_cv_qualifier = true; break; case RID_COMPLEX: /* The `__complex__' keyword is a GNU extension. */ ds = ds_complex; break; default: break; } /* Handle simple keywords. */ if (ds != ds_last) { if (decl_specs) { set_and_check_decl_spec_loc (decl_specs, ds, token); decl_specs->any_specifiers_p = true; } return cp_lexer_consume_token (parser->lexer)->u.value; } /* If we do not already have a type-specifier, assume we are looking at a simple-type-specifier. */ type_spec = cp_parser_simple_type_specifier (parser, decl_specs, flags); /* If we didn't find a type-specifier, and a type-specifier was not optional in this context, issue an error message. */ if (!type_spec && !(flags & CP_PARSER_FLAGS_OPTIONAL)) { cp_parser_error (parser, "expected type specifier"); return error_mark_node; } return type_spec; } /* Parse a simple-type-specifier. simple-type-specifier: :: [opt] nested-name-specifier [opt] type-name :: [opt] nested-name-specifier template template-id char wchar_t bool short int long signed unsigned float double void C++0x Extension: simple-type-specifier: auto decltype ( expression ) char16_t char32_t __underlying_type ( type-id ) GNU Extension: simple-type-specifier: __int128 __typeof__ unary-expression __typeof__ ( type-id ) __typeof__ ( type-id ) { initializer-list , [opt] } Returns the indicated TYPE_DECL. If DECL_SPECS is not NULL, it is appropriately updated. */ static tree cp_parser_simple_type_specifier (cp_parser* parser, cp_decl_specifier_seq *decl_specs, cp_parser_flags flags) { tree type = NULL_TREE; cp_token *token; int idx; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If we're looking at a keyword, things are easy. */ switch (token->keyword) { case RID_CHAR: if (decl_specs) decl_specs->explicit_char_p = true; type = char_type_node; break; case RID_CHAR16: type = char16_type_node; break; case RID_CHAR32: type = char32_type_node; break; case RID_WCHAR: type = wchar_type_node; break; case RID_BOOL: type = boolean_type_node; break; case RID_SHORT: set_and_check_decl_spec_loc (decl_specs, ds_short, token); type = short_integer_type_node; break; case RID_INT: if (decl_specs) decl_specs->explicit_int_p = true; type = integer_type_node; break; case RID_INT_N_0: case RID_INT_N_1: case RID_INT_N_2: case RID_INT_N_3: idx = token->keyword - RID_INT_N_0; if (! int_n_enabled_p [idx]) break; if (decl_specs) { decl_specs->explicit_intN_p = true; decl_specs->int_n_idx = idx; } type = int_n_trees [idx].signed_type; break; case RID_LONG: if (decl_specs) set_and_check_decl_spec_loc (decl_specs, ds_long, token); type = long_integer_type_node; break; case RID_SIGNED: set_and_check_decl_spec_loc (decl_specs, ds_signed, token); type = integer_type_node; break; case RID_UNSIGNED: set_and_check_decl_spec_loc (decl_specs, ds_unsigned, token); type = unsigned_type_node; break; case RID_FLOAT: type = float_type_node; break; case RID_DOUBLE: type = double_type_node; break; case RID_VOID: type = void_type_node; break; case RID_AUTO: maybe_warn_cpp0x (CPP0X_AUTO); if (parser->auto_is_implicit_function_template_parm_p) { if (cxx_dialect >= cxx14) type = synthesize_implicit_template_parm (parser); else type = error_mark_node; if (current_class_type && LAMBDA_TYPE_P (current_class_type)) { if (cxx_dialect < cxx14) error_at (token->location, "use of %<auto%> in lambda parameter declaration " "only available with " "-std=c++14 or -std=gnu++14"); } else if (cxx_dialect < cxx14) error_at (token->location, "use of %<auto%> in parameter declaration " "only available with " "-std=c++14 or -std=gnu++14"); else pedwarn (token->location, OPT_Wpedantic, "ISO C++ forbids use of %<auto%> in parameter " "declaration"); } else type = make_auto (); break; case RID_DECLTYPE: /* Since DR 743, decltype can either be a simple-type-specifier by itself or begin a nested-name-specifier. Parsing it will replace it with a CPP_DECLTYPE, so just rewind and let the CPP_DECLTYPE handling below decide what to do. */ cp_parser_decltype (parser); cp_lexer_set_token_position (parser->lexer, token); break; case RID_TYPEOF: /* Consume the `typeof' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the operand to `typeof'. */ type = cp_parser_sizeof_operand (parser, RID_TYPEOF); /* If it is not already a TYPE, take its type. */ if (!TYPE_P (type)) type = finish_typeof (type); if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type, token, /*type_definition_p=*/false); return type; case RID_UNDERLYING_TYPE: type = cp_parser_trait_expr (parser, RID_UNDERLYING_TYPE); if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type, token, /*type_definition_p=*/false); return type; case RID_BASES: case RID_DIRECT_BASES: type = cp_parser_trait_expr (parser, token->keyword); if (decl_specs) cp_parser_set_decl_spec_type (decl_specs, type, token, /*type_definition_p=*/false); return type; default: break; } /* If token is an already-parsed decltype not followed by ::, it's a simple-type-specifier. */ if (token->type == CPP_DECLTYPE && cp_lexer_peek_nth_token (parser->lexer, 2)->type != CPP_SCOPE) { type = token->u.value; if (decl_specs) { cp_parser_set_decl_spec_type (decl_specs, type, token, /*type_definition_p=*/false); /* Remember that we are handling a decltype in order to implement the resolution of DR 1510 when the argument isn't instantiation dependent. */ decl_specs->decltype_p = true; } cp_lexer_consume_token (parser->lexer); return type; } /* If the type-specifier was for a built-in type, we're done. */ if (type) { /* Record the type. */ if (decl_specs && (token->keyword != RID_SIGNED && token->keyword != RID_UNSIGNED && token->keyword != RID_SHORT && token->keyword != RID_LONG)) cp_parser_set_decl_spec_type (decl_specs, type, token, /*type_definition_p=*/false); if (decl_specs) decl_specs->any_specifiers_p = true; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); if (type == error_mark_node) return error_mark_node; /* There is no valid C++ program where a non-template type is followed by a "<". That usually indicates that the user thought that the type was a template. */ cp_parser_check_for_invalid_template_id (parser, type, none_type, token->location); return TYPE_NAME (type); } /* The type-specifier must be a user-defined type. */ if (!(flags & CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES)) { bool qualified_p; bool global_p; /* Don't gobble tokens or issue error messages if this is an optional type-specifier. */ if (flags & CP_PARSER_FLAGS_OPTIONAL) cp_parser_parse_tentatively (parser); /* Look for the optional `::' operator. */ global_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* Look for the nested-name specifier. */ qualified_p = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/false) != NULL_TREE); token = cp_lexer_peek_token (parser->lexer); /* If we have seen a nested-name-specifier, and the next token is `template', then we are using the template-id production. */ if (parser->scope && cp_parser_optional_template_keyword (parser)) { /* Look for the template-id. */ type = cp_parser_template_id (parser, /*template_keyword_p=*/true, /*check_dependency_p=*/true, none_type, /*is_declaration=*/false); /* If the template-id did not name a type, we are out of luck. */ if (TREE_CODE (type) != TYPE_DECL) { cp_parser_error (parser, "expected template-id for type"); type = NULL_TREE; } } /* Otherwise, look for a type-name. */ else type = cp_parser_type_name (parser); /* Keep track of all name-lookups performed in class scopes. */ if (type && !global_p && !qualified_p && TREE_CODE (type) == TYPE_DECL && identifier_p (DECL_NAME (type))) maybe_note_name_used_in_class (DECL_NAME (type), type); /* If it didn't work out, we don't have a TYPE. */ if ((flags & CP_PARSER_FLAGS_OPTIONAL) && !cp_parser_parse_definitely (parser)) type = NULL_TREE; if (type && decl_specs) cp_parser_set_decl_spec_type (decl_specs, type, token, /*type_definition_p=*/false); } /* If we didn't get a type-name, issue an error message. */ if (!type && !(flags & CP_PARSER_FLAGS_OPTIONAL)) { cp_parser_error (parser, "expected type-name"); return error_mark_node; } if (type && type != error_mark_node) { /* See if TYPE is an Objective-C type, and if so, parse and accept any protocol references following it. Do this before the cp_parser_check_for_invalid_template_id() call, because Objective-C types can be followed by '<...>' which would enclose protocol names rather than template arguments, and so everything is fine. */ if (c_dialect_objc () && !parser->scope && (objc_is_id (type) || objc_is_class_name (type))) { tree protos = cp_parser_objc_protocol_refs_opt (parser); tree qual_type = objc_get_protocol_qualified_type (type, protos); /* Clobber the "unqualified" type previously entered into DECL_SPECS with the new, improved protocol-qualified version. */ if (decl_specs) decl_specs->type = qual_type; return qual_type; } /* There is no valid C++ program where a non-template type is followed by a "<". That usually indicates that the user thought that the type was a template. */ cp_parser_check_for_invalid_template_id (parser, TREE_TYPE (type), none_type, token->location); } return type; } /* Parse a type-name. type-name: class-name enum-name typedef-name simple-template-id [in c++0x] enum-name: identifier typedef-name: identifier Returns a TYPE_DECL for the type. */ static tree cp_parser_type_name (cp_parser* parser) { tree type_decl; /* We can't know yet whether it is a class-name or not. */ cp_parser_parse_tentatively (parser); /* Try a class-name. */ type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, none_type, /*check_dependency_p=*/true, /*class_head_p=*/false, /*is_declaration=*/false); /* If it's not a class-name, keep looking. */ if (!cp_parser_parse_definitely (parser)) { if (cxx_dialect < cxx11) /* It must be a typedef-name or an enum-name. */ return cp_parser_nonclass_name (parser); cp_parser_parse_tentatively (parser); /* It is either a simple-template-id representing an instantiation of an alias template... */ type_decl = cp_parser_template_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, none_type, /*is_declaration=*/false); /* Note that this must be an instantiation of an alias template because [temp.names]/6 says: A template-id that names an alias template specialization is a type-name. Whereas [temp.names]/7 says: A simple-template-id that names a class template specialization is a class-name. */ if (type_decl != NULL_TREE && TREE_CODE (type_decl) == TYPE_DECL && TYPE_DECL_ALIAS_P (type_decl)) gcc_assert (DECL_TEMPLATE_INSTANTIATION (type_decl)); else cp_parser_simulate_error (parser); if (!cp_parser_parse_definitely (parser)) /* ... Or a typedef-name or an enum-name. */ return cp_parser_nonclass_name (parser); } return type_decl; } /* Parse a non-class type-name, that is, either an enum-name or a typedef-name. enum-name: identifier typedef-name: identifier Returns a TYPE_DECL for the type. */ static tree cp_parser_nonclass_name (cp_parser* parser) { tree type_decl; tree identifier; cp_token *token = cp_lexer_peek_token (parser->lexer); identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return error_mark_node; /* Look up the type-name. */ type_decl = cp_parser_lookup_name_simple (parser, identifier, token->location); type_decl = strip_using_decl (type_decl); if (TREE_CODE (type_decl) != TYPE_DECL && (objc_is_id (identifier) || objc_is_class_name (identifier))) { /* See if this is an Objective-C type. */ tree protos = cp_parser_objc_protocol_refs_opt (parser); tree type = objc_get_protocol_qualified_type (identifier, protos); if (type) type_decl = TYPE_NAME (type); } /* Issue an error if we did not find a type-name. */ if (TREE_CODE (type_decl) != TYPE_DECL /* In Objective-C, we have the complication that class names are normally type names and start declarations (eg, the "NSObject" in "NSObject *object;"), but can be used in an Objective-C 2.0 dot-syntax (as in "NSObject.version") which is an expression. So, a classname followed by a dot is not a valid type-name. */ || (objc_is_class_name (TREE_TYPE (type_decl)) && cp_lexer_peek_token (parser->lexer)->type == CPP_DOT)) { if (!cp_parser_simulate_error (parser)) cp_parser_name_lookup_error (parser, identifier, type_decl, NLE_TYPE, token->location); return error_mark_node; } /* Remember that the name was used in the definition of the current class so that we can check later to see if the meaning would have been different after the class was entirely defined. */ else if (type_decl != error_mark_node && !parser->scope) maybe_note_name_used_in_class (identifier, type_decl); return type_decl; } /* Parse an elaborated-type-specifier. Note that the grammar given here incorporates the resolution to DR68. elaborated-type-specifier: class-key :: [opt] nested-name-specifier [opt] identifier class-key :: [opt] nested-name-specifier [opt] template [opt] template-id enum-key :: [opt] nested-name-specifier [opt] identifier typename :: [opt] nested-name-specifier identifier typename :: [opt] nested-name-specifier template [opt] template-id GNU extension: elaborated-type-specifier: class-key attributes :: [opt] nested-name-specifier [opt] identifier class-key attributes :: [opt] nested-name-specifier [opt] template [opt] template-id enum attributes :: [opt] nested-name-specifier [opt] identifier If IS_FRIEND is TRUE, then this elaborated-type-specifier is being declared `friend'. If IS_DECLARATION is TRUE, then this elaborated-type-specifier appears in a decl-specifiers-seq, i.e., something is being declared. Returns the TYPE specified. */ static tree cp_parser_elaborated_type_specifier (cp_parser* parser, bool is_friend, bool is_declaration) { enum tag_types tag_type; tree identifier; tree type = NULL_TREE; tree attributes = NULL_TREE; tree globalscope; cp_token *token = NULL; /* See if we're looking at the `enum' keyword. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ENUM)) { /* Consume the `enum' token. */ cp_lexer_consume_token (parser->lexer); /* Remember that it's an enumeration type. */ tag_type = enum_type; /* Issue a warning if the `struct' or `class' key (for C++0x scoped enums) is used here. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_CLASS) || cp_lexer_next_token_is_keyword (parser->lexer, RID_STRUCT)) { pedwarn (input_location, 0, "elaborated-type-specifier " "for a scoped enum must not use the %<%D%> keyword", cp_lexer_peek_token (parser->lexer)->u.value); /* Consume the `struct' or `class' and parse it anyway. */ cp_lexer_consume_token (parser->lexer); } /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); } /* Or, it might be `typename'. */ else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME)) { /* Consume the `typename' token. */ cp_lexer_consume_token (parser->lexer); /* Remember that it's a `typename' type. */ tag_type = typename_type; } /* Otherwise it must be a class-key. */ else { tag_type = cp_parser_class_key (parser); if (tag_type == none_type) return error_mark_node; /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); } /* Look for the `::' operator. */ globalscope = cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the nested-name-specifier. */ if (tag_type == typename_type && !globalscope) { if (!cp_parser_nested_name_specifier (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/true, /*type_p=*/true, is_declaration)) return error_mark_node; } else /* Even though `typename' is not present, the proposed resolution to Core Issue 180 says that in `class A<T>::B', `B' should be considered a type-name, even if `A<T>' is dependent. */ cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/true, /*type_p=*/true, is_declaration); /* For everything but enumeration types, consider a template-id. For an enumeration type, consider only a plain identifier. */ if (tag_type != enum_type) { bool template_p = false; tree decl; /* Allow the `template' keyword. */ template_p = cp_parser_optional_template_keyword (parser); /* If we didn't see `template', we don't know if there's a template-id or not. */ if (!template_p) cp_parser_parse_tentatively (parser); /* Parse the template-id. */ token = cp_lexer_peek_token (parser->lexer); decl = cp_parser_template_id (parser, template_p, /*check_dependency_p=*/true, tag_type, is_declaration); /* If we didn't find a template-id, look for an ordinary identifier. */ if (!template_p && !cp_parser_parse_definitely (parser)) ; /* We can get here when cp_parser_template_id, called by cp_parser_class_name with tag_type == none_type, succeeds and caches a BASELINK. Then, when called again here, instead of failing and returning an error_mark_node returns it (see template/typename17.C in C++11). ??? Could we diagnose this earlier? */ else if (tag_type == typename_type && BASELINK_P (decl)) { cp_parser_diagnose_invalid_type_name (parser, decl, token->location); type = error_mark_node; } /* If DECL is a TEMPLATE_ID_EXPR, and the `typename' keyword is in effect, then we must assume that, upon instantiation, the template will correspond to a class. */ else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR && tag_type == typename_type) type = make_typename_type (parser->scope, decl, typename_type, /*complain=*/tf_error); /* If the `typename' keyword is in effect and DECL is not a type decl, then type is non existent. */ else if (tag_type == typename_type && TREE_CODE (decl) != TYPE_DECL) ; else if (TREE_CODE (decl) == TYPE_DECL) type = check_elaborated_type_specifier (tag_type, decl, /*allow_template_p=*/true); else if (decl == error_mark_node) type = error_mark_node; } if (!type) { token = cp_lexer_peek_token (parser->lexer); identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) { parser->scope = NULL_TREE; return error_mark_node; } /* For a `typename', we needn't call xref_tag. */ if (tag_type == typename_type && TREE_CODE (parser->scope) != NAMESPACE_DECL) return cp_parser_make_typename_type (parser, identifier, token->location); /* Template parameter lists apply only if we are not within a function parameter list. */ bool template_parm_lists_apply = parser->num_template_parameter_lists; if (template_parm_lists_apply) for (cp_binding_level *s = current_binding_level; s && s->kind != sk_template_parms; s = s->level_chain) if (s->kind == sk_function_parms) template_parm_lists_apply = false; /* Look up a qualified name in the usual way. */ if (parser->scope) { tree decl; tree ambiguous_decls; decl = cp_parser_lookup_name (parser, identifier, tag_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, &ambiguous_decls, token->location); /* If the lookup was ambiguous, an error will already have been issued. */ if (ambiguous_decls) return error_mark_node; /* If we are parsing friend declaration, DECL may be a TEMPLATE_DECL tree node here. However, we need to check whether this TEMPLATE_DECL results in valid code. Consider the following example: namespace N { template <class T> class C {}; } class X { template <class T> friend class N::C; // #1, valid code }; template <class T> class Y { friend class N::C; // #2, invalid code }; For both case #1 and #2, we arrive at a TEMPLATE_DECL after name lookup of `N::C'. We see that friend declaration must be template for the code to be valid. Note that processing_template_decl does not work here since it is always 1 for the above two cases. */ decl = (cp_parser_maybe_treat_template_as_class (decl, /*tag_name_p=*/is_friend && template_parm_lists_apply)); if (TREE_CODE (decl) != TYPE_DECL) { cp_parser_diagnose_invalid_type_name (parser, identifier, token->location); return error_mark_node; } if (TREE_CODE (TREE_TYPE (decl)) != TYPENAME_TYPE) { bool allow_template = (template_parm_lists_apply || DECL_SELF_REFERENCE_P (decl)); type = check_elaborated_type_specifier (tag_type, decl, allow_template); if (type == error_mark_node) return error_mark_node; } /* Forward declarations of nested types, such as class C1::C2; class C1::C2::C3; are invalid unless all components preceding the final '::' are complete. If all enclosing types are complete, these declarations become merely pointless. Invalid forward declarations of nested types are errors caught elsewhere in parsing. Those that are pointless arrive here. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) && !is_friend && !processing_explicit_instantiation) warning (0, "declaration %qD does not declare anything", decl); type = TREE_TYPE (decl); } else { /* An elaborated-type-specifier sometimes introduces a new type and sometimes names an existing type. Normally, the rule is that it introduces a new type only if there is not an existing type of the same name already in scope. For example, given: struct S {}; void f() { struct S s; } the `struct S' in the body of `f' is the same `struct S' as in the global scope; the existing definition is used. However, if there were no global declaration, this would introduce a new local class named `S'. An exception to this rule applies to the following code: namespace N { struct S; } Here, the elaborated-type-specifier names a new type unconditionally; even if there is already an `S' in the containing scope this declaration names a new type. This exception only applies if the elaborated-type-specifier forms the complete declaration: [class.name] A declaration consisting solely of `class-key identifier ;' is either a redeclaration of the name in the current scope or a forward declaration of the identifier as a class name. It introduces the name into the current scope. We are in this situation precisely when the next token is a `;'. An exception to the exception is that a `friend' declaration does *not* name a new type; i.e., given: struct S { friend struct T; }; `T' is not a new type in the scope of `S'. Also, `new struct S' or `sizeof (struct S)' never results in the definition of a new type; a new type can only be declared in a declaration context. */ tag_scope ts; bool template_p; if (is_friend) /* Friends have special name lookup rules. */ ts = ts_within_enclosing_non_class; else if (is_declaration && cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) /* This is a `class-key identifier ;' */ ts = ts_current; else ts = ts_global; template_p = (template_parm_lists_apply && (cp_parser_next_token_starts_class_definition_p (parser) || cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON))); /* An unqualified name was used to reference this type, so there were no qualifying templates. */ if (template_parm_lists_apply && !cp_parser_check_template_parameters (parser, /*num_templates=*/0, token->location, /*declarator=*/NULL)) return error_mark_node; type = xref_tag (tag_type, identifier, ts, template_p); } } if (type == error_mark_node) return error_mark_node; /* Allow attributes on forward declarations of classes. */ if (attributes) { if (TREE_CODE (type) == TYPENAME_TYPE) warning (OPT_Wattributes, "attributes ignored on uninstantiated type"); else if (tag_type != enum_type && CLASSTYPE_TEMPLATE_INSTANTIATION (type) && ! processing_explicit_instantiation) warning (OPT_Wattributes, "attributes ignored on template instantiation"); else if (is_declaration && cp_parser_declares_only_class_p (parser)) cplus_decl_attributes (&type, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); else warning (OPT_Wattributes, "attributes ignored on elaborated-type-specifier that is not a forward declaration"); } if (tag_type != enum_type) { /* Indicate whether this class was declared as a `class' or as a `struct'. */ if (TREE_CODE (type) == RECORD_TYPE) CLASSTYPE_DECLARED_CLASS (type) = (tag_type == class_type); cp_parser_check_class_key (tag_type, type); } /* A "<" cannot follow an elaborated type specifier. If that happens, the user was probably trying to form a template-id. */ cp_parser_check_for_invalid_template_id (parser, type, tag_type, token->location); return type; } /* Parse an enum-specifier. enum-specifier: enum-head { enumerator-list [opt] } enum-head { enumerator-list , } [C++0x] enum-head: enum-key identifier [opt] enum-base [opt] enum-key nested-name-specifier identifier enum-base [opt] enum-key: enum enum class [C++0x] enum struct [C++0x] enum-base: [C++0x] : type-specifier-seq opaque-enum-specifier: enum-key identifier enum-base [opt] ; GNU Extensions: enum-key attributes[opt] identifier [opt] enum-base [opt] { enumerator-list [opt] }attributes[opt] enum-key attributes[opt] identifier [opt] enum-base [opt] { enumerator-list, }attributes[opt] [C++0x] Returns an ENUM_TYPE representing the enumeration, or NULL_TREE if the token stream isn't an enum-specifier after all. */ static tree cp_parser_enum_specifier (cp_parser* parser) { tree identifier; tree type = NULL_TREE; tree prev_scope; tree nested_name_specifier = NULL_TREE; tree attributes; bool scoped_enum_p = false; bool has_underlying_type = false; bool nested_being_defined = false; bool new_value_list = false; bool is_new_type = false; bool is_anonymous = false; tree underlying_type = NULL_TREE; cp_token *type_start_token = NULL; bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; parser->colon_corrects_to_scope_p = false; /* Parse tentatively so that we can back up if we don't find a enum-specifier. */ cp_parser_parse_tentatively (parser); /* Caller guarantees that the current token is 'enum', an identifier possibly follows, and the token after that is an opening brace. If we don't have an identifier, fabricate an anonymous name for the enumeration being defined. */ cp_lexer_consume_token (parser->lexer); /* Parse the "class" or "struct", which indicates a scoped enumeration type in C++0x. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_CLASS) || cp_lexer_next_token_is_keyword (parser->lexer, RID_STRUCT)) { if (cxx_dialect < cxx11) maybe_warn_cpp0x (CPP0X_SCOPED_ENUMS); /* Consume the `struct' or `class' token. */ cp_lexer_consume_token (parser->lexer); scoped_enum_p = true; } attributes = cp_parser_attributes_opt (parser); /* Clear the qualification. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; /* Figure out in what scope the declaration is being placed. */ prev_scope = current_scope (); type_start_token = cp_lexer_peek_token (parser->lexer); push_deferring_access_checks (dk_no_check); nested_name_specifier = cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/false, /*type_p=*/false, /*is_declaration=*/false); if (nested_name_specifier) { tree name; identifier = cp_parser_identifier (parser); name = cp_parser_lookup_name (parser, identifier, enum_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, input_location); if (name && name != error_mark_node) { type = TREE_TYPE (name); if (TREE_CODE (type) == TYPENAME_TYPE) { /* Are template enums allowed in ISO? */ if (template_parm_scope_p ()) pedwarn (type_start_token->location, OPT_Wpedantic, "%qD is an enumeration template", name); /* ignore a typename reference, for it will be solved by name in start_enum. */ type = NULL_TREE; } } else if (nested_name_specifier == error_mark_node) /* We already issued an error. */; else error_at (type_start_token->location, "%qD is not an enumerator-name", identifier); } else { if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) identifier = cp_parser_identifier (parser); else { identifier = make_anon_name (); is_anonymous = true; if (scoped_enum_p) error_at (type_start_token->location, "anonymous scoped enum is not allowed"); } } pop_deferring_access_checks (); /* Check for the `:' that denotes a specified underlying type in C++0x. Note that a ':' could also indicate a bitfield width, however. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { cp_decl_specifier_seq type_specifiers; /* Consume the `:'. */ cp_lexer_consume_token (parser->lexer); /* Parse the type-specifier-seq. */ cp_parser_type_specifier_seq (parser, /*is_declaration=*/false, /*is_trailing_return=*/false, &type_specifiers); /* At this point this is surely not elaborated type specifier. */ if (!cp_parser_parse_definitely (parser)) return NULL_TREE; if (cxx_dialect < cxx11) maybe_warn_cpp0x (CPP0X_SCOPED_ENUMS); has_underlying_type = true; /* If that didn't work, stop. */ if (type_specifiers.type != error_mark_node) { underlying_type = grokdeclarator (NULL, &type_specifiers, TYPENAME, /*initialized=*/0, NULL); if (underlying_type == error_mark_node || check_for_bare_parameter_packs (underlying_type)) underlying_type = NULL_TREE; } } /* Look for the `{' but don't consume it yet. */ if (!cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { if (cxx_dialect < cxx11 || (!scoped_enum_p && !underlying_type)) { cp_parser_error (parser, "expected %<{%>"); if (has_underlying_type) { type = NULL_TREE; goto out; } } /* An opaque-enum-specifier must have a ';' here. */ if ((scoped_enum_p || underlying_type) && cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { cp_parser_error (parser, "expected %<;%> or %<{%>"); if (has_underlying_type) { type = NULL_TREE; goto out; } } } if (!has_underlying_type && !cp_parser_parse_definitely (parser)) return NULL_TREE; if (nested_name_specifier) { if (CLASS_TYPE_P (nested_name_specifier)) { nested_being_defined = TYPE_BEING_DEFINED (nested_name_specifier); TYPE_BEING_DEFINED (nested_name_specifier) = 1; push_scope (nested_name_specifier); } else if (TREE_CODE (nested_name_specifier) == NAMESPACE_DECL) { push_nested_namespace (nested_name_specifier); } } /* Issue an error message if type-definitions are forbidden here. */ if (!cp_parser_check_type_definition (parser)) type = error_mark_node; else /* Create the new type. We do this before consuming the opening brace so the enum will be recorded as being on the line of its tag (or the 'enum' keyword, if there is no tag). */ type = start_enum (identifier, type, underlying_type, scoped_enum_p, &is_new_type); /* If the next token is not '{' it is an opaque-enum-specifier or an elaborated-type-specifier. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { timevar_push (TV_PARSE_ENUM); if (nested_name_specifier && nested_name_specifier != error_mark_node) { /* The following catches invalid code such as: enum class S<int>::E { A, B, C }; */ if (!processing_specialization && CLASS_TYPE_P (nested_name_specifier) && CLASSTYPE_USE_TEMPLATE (nested_name_specifier)) error_at (type_start_token->location, "cannot add an enumerator " "list to a template instantiation"); if (TREE_CODE (nested_name_specifier) == TYPENAME_TYPE) { error_at (type_start_token->location, "%<%T::%E%> has not been declared", TYPE_CONTEXT (nested_name_specifier), nested_name_specifier); type = error_mark_node; } /* If that scope does not contain the scope in which the class was originally declared, the program is invalid. */ else if (prev_scope && !is_ancestor (prev_scope, nested_name_specifier)) { if (at_namespace_scope_p ()) error_at (type_start_token->location, "declaration of %qD in namespace %qD which does not " "enclose %qD", type, prev_scope, nested_name_specifier); else error_at (type_start_token->location, "declaration of %qD in %qD which does not " "enclose %qD", type, prev_scope, nested_name_specifier); type = error_mark_node; } } if (scoped_enum_p) begin_scope (sk_scoped_enum, type); /* Consume the opening brace. */ cp_lexer_consume_token (parser->lexer); if (type == error_mark_node) ; /* Nothing to add */ else if (OPAQUE_ENUM_P (type) || (cxx_dialect > cxx98 && processing_specialization)) { new_value_list = true; SET_OPAQUE_ENUM_P (type, false); DECL_SOURCE_LOCATION (TYPE_NAME (type)) = type_start_token->location; } else { error_at (type_start_token->location, "multiple definition of %q#T", type); inform (DECL_SOURCE_LOCATION (TYPE_MAIN_DECL (type)), "previous definition here"); type = error_mark_node; } if (type == error_mark_node) cp_parser_skip_to_end_of_block_or_statement (parser); /* If the next token is not '}', then there are some enumerators. */ else if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) { if (is_anonymous && !scoped_enum_p) pedwarn (type_start_token->location, OPT_Wpedantic, "ISO C++ forbids empty anonymous enum"); } else cp_parser_enumerator_list (parser, type); /* Consume the final '}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); if (scoped_enum_p) finish_scope (); timevar_pop (TV_PARSE_ENUM); } else { /* If a ';' follows, then it is an opaque-enum-specifier and additional restrictions apply. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { if (is_anonymous) error_at (type_start_token->location, "opaque-enum-specifier without name"); else if (nested_name_specifier) error_at (type_start_token->location, "opaque-enum-specifier must use a simple identifier"); } } /* Look for trailing attributes to apply to this enumeration, and apply them if appropriate. */ if (cp_parser_allow_gnu_extensions_p (parser)) { tree trailing_attr = cp_parser_gnu_attributes_opt (parser); trailing_attr = chainon (trailing_attr, attributes); cplus_decl_attributes (&type, trailing_attr, (int) ATTR_FLAG_TYPE_IN_PLACE); } /* Finish up the enumeration. */ if (type != error_mark_node) { if (new_value_list) finish_enum_value_list (type); if (is_new_type) finish_enum (type); } if (nested_name_specifier) { if (CLASS_TYPE_P (nested_name_specifier)) { TYPE_BEING_DEFINED (nested_name_specifier) = nested_being_defined; pop_scope (nested_name_specifier); } else if (TREE_CODE (nested_name_specifier) == NAMESPACE_DECL) { pop_nested_namespace (nested_name_specifier); } } out: parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; return type; } /* Parse an enumerator-list. The enumerators all have the indicated TYPE. enumerator-list: enumerator-definition enumerator-list , enumerator-definition */ static void cp_parser_enumerator_list (cp_parser* parser, tree type) { while (true) { /* Parse an enumerator-definition. */ cp_parser_enumerator_definition (parser, type); /* If the next token is not a ',', we've reached the end of the list. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Otherwise, consume the `,' and keep going. */ cp_lexer_consume_token (parser->lexer); /* If the next token is a `}', there is a trailing comma. */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) { if (cxx_dialect < cxx11 && !in_system_header_at (input_location)) pedwarn (input_location, OPT_Wpedantic, "comma at end of enumerator list"); break; } } } /* Parse an enumerator-definition. The enumerator has the indicated TYPE. enumerator-definition: enumerator enumerator = constant-expression enumerator: identifier */ static void cp_parser_enumerator_definition (cp_parser* parser, tree type) { tree identifier; tree value; location_t loc; /* Save the input location because we are interested in the location of the identifier and not the location of the explicit value. */ loc = cp_lexer_peek_token (parser->lexer)->location; /* Look for the identifier. */ identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return; /* If the next token is an '=', then there is an explicit value. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { /* Consume the `=' token. */ cp_lexer_consume_token (parser->lexer); /* Parse the value. */ value = cp_parser_constant_expression (parser); } else value = NULL_TREE; /* If we are processing a template, make sure the initializer of the enumerator doesn't contain any bare template parameter pack. */ if (check_for_bare_parameter_packs (value)) value = error_mark_node; /* Create the enumerator. */ build_enumerator (identifier, value, type, loc); } /* Parse a namespace-name. namespace-name: original-namespace-name namespace-alias Returns the NAMESPACE_DECL for the namespace. */ static tree cp_parser_namespace_name (cp_parser* parser) { tree identifier; tree namespace_decl; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Get the name of the namespace. */ identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return error_mark_node; /* Look up the identifier in the currently active scope. Look only for namespaces, due to: [basic.lookup.udir] When looking up a namespace-name in a using-directive or alias definition, only namespace names are considered. And: [basic.lookup.qual] During the lookup of a name preceding the :: scope resolution operator, object, function, and enumerator names are ignored. (Note that cp_parser_qualifying_entity only calls this function if the token after the name is the scope resolution operator.) */ namespace_decl = cp_parser_lookup_name (parser, identifier, none_type, /*is_template=*/false, /*is_namespace=*/true, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, token->location); /* If it's not a namespace, issue an error. */ if (namespace_decl == error_mark_node || TREE_CODE (namespace_decl) != NAMESPACE_DECL) { if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) error_at (token->location, "%qD is not a namespace-name", identifier); cp_parser_error (parser, "expected namespace-name"); namespace_decl = error_mark_node; } return namespace_decl; } /* Parse a namespace-definition. namespace-definition: named-namespace-definition unnamed-namespace-definition named-namespace-definition: original-namespace-definition extension-namespace-definition original-namespace-definition: namespace identifier { namespace-body } extension-namespace-definition: namespace original-namespace-name { namespace-body } unnamed-namespace-definition: namespace { namespace-body } */ static void cp_parser_namespace_definition (cp_parser* parser) { tree identifier, attribs; bool has_visibility; bool is_inline; cp_ensure_no_omp_declare_simd (parser); if (cp_lexer_next_token_is_keyword (parser->lexer, RID_INLINE)) { maybe_warn_cpp0x (CPP0X_INLINE_NAMESPACES); is_inline = true; cp_lexer_consume_token (parser->lexer); } else is_inline = false; /* Look for the `namespace' keyword. */ cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE); /* Get the name of the namespace. We do not attempt to distinguish between an original-namespace-definition and an extension-namespace-definition at this point. The semantic analysis routines are responsible for that. */ if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) identifier = cp_parser_identifier (parser); else identifier = NULL_TREE; /* Parse any specified attributes. */ attribs = cp_parser_attributes_opt (parser); /* Look for the `{' to start the namespace. */ cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE); /* Start the namespace. */ push_namespace (identifier); /* "inline namespace" is equivalent to a stub namespace definition followed by a strong using directive. */ if (is_inline) { tree name_space = current_namespace; /* Set up namespace association. */ DECL_NAMESPACE_ASSOCIATIONS (name_space) = tree_cons (CP_DECL_CONTEXT (name_space), NULL_TREE, DECL_NAMESPACE_ASSOCIATIONS (name_space)); /* Import the contents of the inline namespace. */ pop_namespace (); do_using_directive (name_space); push_namespace (identifier); } has_visibility = handle_namespace_attrs (current_namespace, attribs); /* Parse the body of the namespace. */ cp_parser_namespace_body (parser); if (has_visibility) pop_visibility (1); /* Finish the namespace. */ pop_namespace (); /* Look for the final `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); } /* Parse a namespace-body. namespace-body: declaration-seq [opt] */ static void cp_parser_namespace_body (cp_parser* parser) { cp_parser_declaration_seq_opt (parser); } /* Parse a namespace-alias-definition. namespace-alias-definition: namespace identifier = qualified-namespace-specifier ; */ static void cp_parser_namespace_alias_definition (cp_parser* parser) { tree identifier; tree namespace_specifier; cp_token *token = cp_lexer_peek_token (parser->lexer); /* Look for the `namespace' keyword. */ cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE); /* Look for the identifier. */ identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return; /* Look for the `=' token. */ if (!cp_parser_uncommitted_to_tentative_parse_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { error_at (token->location, "%<namespace%> definition is not allowed here"); /* Skip the definition. */ cp_lexer_consume_token (parser->lexer); if (cp_parser_skip_to_closing_brace (parser)) cp_lexer_consume_token (parser->lexer); return; } cp_parser_require (parser, CPP_EQ, RT_EQ); /* Look for the qualified-namespace-specifier. */ namespace_specifier = cp_parser_qualified_namespace_specifier (parser); /* Look for the `;' token. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); /* Register the alias in the symbol table. */ do_namespace_alias (identifier, namespace_specifier); } /* Parse a qualified-namespace-specifier. qualified-namespace-specifier: :: [opt] nested-name-specifier [opt] namespace-name Returns a NAMESPACE_DECL corresponding to the specified namespace. */ static tree cp_parser_qualified_namespace_specifier (cp_parser* parser) { /* Look for the optional `::'. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the optional nested-name-specifier. */ cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/true); return cp_parser_namespace_name (parser); } /* Parse a using-declaration, or, if ACCESS_DECLARATION_P is true, an access declaration. using-declaration: using typename [opt] :: [opt] nested-name-specifier unqualified-id ; using :: unqualified-id ; access-declaration: qualified-id ; */ static bool cp_parser_using_declaration (cp_parser* parser, bool access_declaration_p) { cp_token *token; bool typename_p = false; bool global_scope_p; tree decl; tree identifier; tree qscope; int oldcount = errorcount; cp_token *diag_token = NULL; if (access_declaration_p) { diag_token = cp_lexer_peek_token (parser->lexer); cp_parser_parse_tentatively (parser); } else { /* Look for the `using' keyword. */ cp_parser_require_keyword (parser, RID_USING, RT_USING); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* See if it's `typename'. */ if (token->keyword == RID_TYPENAME) { /* Remember that we've seen it. */ typename_p = true; /* Consume the `typename' token. */ cp_lexer_consume_token (parser->lexer); } } /* Look for the optional global scope qualification. */ global_scope_p = (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false) != NULL_TREE); /* If we saw `typename', or didn't see `::', then there must be a nested-name-specifier present. */ if (typename_p || !global_scope_p) { qscope = cp_parser_nested_name_specifier (parser, typename_p, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/true); if (!qscope && !cp_parser_uncommitted_to_tentative_parse_p (parser)) { cp_parser_skip_to_end_of_block_or_statement (parser); return false; } } /* Otherwise, we could be in either of the two productions. In that case, treat the nested-name-specifier as optional. */ else qscope = cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/true); if (!qscope) qscope = global_namespace; else if (UNSCOPED_ENUM_P (qscope)) qscope = CP_TYPE_CONTEXT (qscope); if (access_declaration_p && cp_parser_error_occurred (parser)) /* Something has already gone wrong; there's no need to parse further. Since an error has occurred, the return value of cp_parser_parse_definitely will be false, as required. */ return cp_parser_parse_definitely (parser); token = cp_lexer_peek_token (parser->lexer); /* Parse the unqualified-id. */ identifier = cp_parser_unqualified_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, /*declarator_p=*/true, /*optional_p=*/false); if (access_declaration_p) { if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) cp_parser_simulate_error (parser); if (!cp_parser_parse_definitely (parser)) return false; } /* The function we call to handle a using-declaration is different depending on what scope we are in. */ if (qscope == error_mark_node || identifier == error_mark_node) ; else if (!identifier_p (identifier) && TREE_CODE (identifier) != BIT_NOT_EXPR) /* [namespace.udecl] A using declaration shall not name a template-id. */ error_at (token->location, "a template-id may not appear in a using-declaration"); else { if (at_class_scope_p ()) { /* Create the USING_DECL. */ decl = do_class_using_decl (parser->scope, identifier); if (decl && typename_p) USING_DECL_TYPENAME_P (decl) = 1; if (check_for_bare_parameter_packs (decl)) { cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); return false; } else /* Add it to the list of members in this class. */ finish_member_declaration (decl); } else { decl = cp_parser_lookup_name_simple (parser, identifier, token->location); if (decl == error_mark_node) cp_parser_name_lookup_error (parser, identifier, decl, NLE_NULL, token->location); else if (check_for_bare_parameter_packs (decl)) { cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); return false; } else if (!at_namespace_scope_p ()) do_local_using_decl (decl, qscope, identifier); else do_toplevel_using_decl (decl, qscope, identifier); } } /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); if (access_declaration_p && errorcount == oldcount) warning_at (diag_token->location, OPT_Wdeprecated, "access declarations are deprecated " "in favour of using-declarations; " "suggestion: add the %<using%> keyword"); return true; } /* Parse an alias-declaration. alias-declaration: using identifier attribute-specifier-seq [opt] = type-id */ static tree cp_parser_alias_declaration (cp_parser* parser) { tree id, type, decl, pushed_scope = NULL_TREE, attributes; location_t id_location; cp_declarator *declarator; cp_decl_specifier_seq decl_specs; bool member_p; const char *saved_message = NULL; /* Look for the `using' keyword. */ cp_token *using_token = cp_parser_require_keyword (parser, RID_USING, RT_USING); if (using_token == NULL) return error_mark_node; id_location = cp_lexer_peek_token (parser->lexer)->location; id = cp_parser_identifier (parser); if (id == error_mark_node) return error_mark_node; cp_token *attrs_token = cp_lexer_peek_token (parser->lexer); attributes = cp_parser_attributes_opt (parser); if (attributes == error_mark_node) return error_mark_node; cp_parser_require (parser, CPP_EQ, RT_EQ); if (cp_parser_error_occurred (parser)) return error_mark_node; cp_parser_commit_to_tentative_parse (parser); /* Now we are going to parse the type-id of the declaration. */ /* [dcl.type]/3 says: "A type-specifier-seq shall not define a class or enumeration unless it appears in the type-id of an alias-declaration (7.1.3) that is not the declaration of a template-declaration." In other words, if we currently are in an alias template, the type-id should not define a type. So let's set parser->type_definition_forbidden_message in that case; cp_parser_check_type_definition (called by cp_parser_class_specifier) will then emit an error if a type is defined in the type-id. */ if (parser->num_template_parameter_lists) { saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in alias template declarations"); } type = cp_parser_type_id (parser); /* Restore the error message if need be. */ if (parser->num_template_parameter_lists) parser->type_definition_forbidden_message = saved_message; if (type == error_mark_node || !cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON)) { cp_parser_skip_to_end_of_block_or_statement (parser); return error_mark_node; } /* A typedef-name can also be introduced by an alias-declaration. The identifier following the using keyword becomes a typedef-name. It has the same semantics as if it were introduced by the typedef specifier. In particular, it does not define a new type and it shall not appear in the type-id. */ clear_decl_specs (&decl_specs); decl_specs.type = type; if (attributes != NULL_TREE) { decl_specs.attributes = attributes; set_and_check_decl_spec_loc (&decl_specs, ds_attribute, attrs_token); } set_and_check_decl_spec_loc (&decl_specs, ds_typedef, using_token); set_and_check_decl_spec_loc (&decl_specs, ds_alias, using_token); declarator = make_id_declarator (NULL_TREE, id, sfk_none); declarator->id_loc = id_location; member_p = at_class_scope_p (); if (member_p) decl = grokfield (declarator, &decl_specs, NULL_TREE, false, NULL_TREE, attributes); else decl = start_decl (declarator, &decl_specs, 0, attributes, NULL_TREE, &pushed_scope); if (decl == error_mark_node) return decl; cp_finish_decl (decl, NULL_TREE, 0, NULL_TREE, 0); if (pushed_scope) pop_scope (pushed_scope); /* If decl is a template, return its TEMPLATE_DECL so that it gets added into the symbol table; otherwise, return the TYPE_DECL. */ if (DECL_LANG_SPECIFIC (decl) && DECL_TEMPLATE_INFO (decl) && PRIMARY_TEMPLATE_P (DECL_TI_TEMPLATE (decl))) { decl = DECL_TI_TEMPLATE (decl); if (member_p) check_member_template (decl); } return decl; } /* Parse a using-directive. using-directive: using namespace :: [opt] nested-name-specifier [opt] namespace-name ; */ static void cp_parser_using_directive (cp_parser* parser) { tree namespace_decl; tree attribs; /* Look for the `using' keyword. */ cp_parser_require_keyword (parser, RID_USING, RT_USING); /* And the `namespace' keyword. */ cp_parser_require_keyword (parser, RID_NAMESPACE, RT_NAMESPACE); /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* And the optional nested-name-specifier. */ cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/true); /* Get the namespace being used. */ namespace_decl = cp_parser_namespace_name (parser); /* And any specified attributes. */ attribs = cp_parser_attributes_opt (parser); /* Update the symbol table. */ parse_using_directive (namespace_decl, attribs); /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); } /* Parse an asm-definition. asm-definition: asm ( string-literal ) ; GNU Extension: asm-definition: asm volatile [opt] ( string-literal ) ; asm volatile [opt] ( string-literal : asm-operand-list [opt] ) ; asm volatile [opt] ( string-literal : asm-operand-list [opt] : asm-operand-list [opt] ) ; asm volatile [opt] ( string-literal : asm-operand-list [opt] : asm-operand-list [opt] : asm-clobber-list [opt] ) ; asm volatile [opt] goto ( string-literal : : asm-operand-list [opt] : asm-clobber-list [opt] : asm-goto-list ) ; */ static void cp_parser_asm_definition (cp_parser* parser) { tree string; tree outputs = NULL_TREE; tree inputs = NULL_TREE; tree clobbers = NULL_TREE; tree labels = NULL_TREE; tree asm_stmt; bool volatile_p = false; bool extended_p = false; bool invalid_inputs_p = false; bool invalid_outputs_p = false; bool goto_p = false; required_token missing = RT_NONE; /* Look for the `asm' keyword. */ cp_parser_require_keyword (parser, RID_ASM, RT_ASM); if (parser->in_function_body && DECL_DECLARED_CONSTEXPR_P (current_function_decl)) { error ("%<asm%> in %<constexpr%> function"); cp_function_chain->invalid_constexpr = true; } /* See if the next token is `volatile'. */ if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is_keyword (parser->lexer, RID_VOLATILE)) { /* Remember that we saw the `volatile' keyword. */ volatile_p = true; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } if (cp_parser_allow_gnu_extensions_p (parser) && parser->in_function_body && cp_lexer_next_token_is_keyword (parser->lexer, RID_GOTO)) { /* Remember that we saw the `goto' keyword. */ goto_p = true; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } /* Look for the opening `('. */ if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return; /* Look for the string. */ string = cp_parser_string_literal (parser, false, false); if (string == error_mark_node) { cp_parser_skip_to_closing_parenthesis (parser, true, false, /*consume_paren=*/true); return; } /* If we're allowing GNU extensions, check for the extended assembly syntax. Unfortunately, the `:' tokens need not be separated by a space in C, and so, for compatibility, we tolerate that here too. Doing that means that we have to treat the `::' operator as two `:' tokens. */ if (cp_parser_allow_gnu_extensions_p (parser) && parser->in_function_body && (cp_lexer_next_token_is (parser->lexer, CPP_COLON) || cp_lexer_next_token_is (parser->lexer, CPP_SCOPE))) { bool inputs_p = false; bool clobbers_p = false; bool labels_p = false; /* The extended syntax was used. */ extended_p = true; /* Look for outputs. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { /* Consume the `:'. */ cp_lexer_consume_token (parser->lexer); /* Parse the output-operands. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON) && cp_lexer_next_token_is_not (parser->lexer, CPP_SCOPE) && cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN) && !goto_p) outputs = cp_parser_asm_operand_list (parser); if (outputs == error_mark_node) invalid_outputs_p = true; } /* If the next token is `::', there are no outputs, and the next token is the beginning of the inputs. */ else if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) /* The inputs are coming next. */ inputs_p = true; /* Look for inputs. */ if (inputs_p || cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { /* Consume the `:' or `::'. */ cp_lexer_consume_token (parser->lexer); /* Parse the output-operands. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON) && cp_lexer_next_token_is_not (parser->lexer, CPP_SCOPE) && cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) inputs = cp_parser_asm_operand_list (parser); if (inputs == error_mark_node) invalid_inputs_p = true; } else if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) /* The clobbers are coming next. */ clobbers_p = true; /* Look for clobbers. */ if (clobbers_p || cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { clobbers_p = true; /* Consume the `:' or `::'. */ cp_lexer_consume_token (parser->lexer); /* Parse the clobbers. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON) && cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) clobbers = cp_parser_asm_clobber_list (parser); } else if (goto_p && cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) /* The labels are coming next. */ labels_p = true; /* Look for labels. */ if (labels_p || (goto_p && cp_lexer_next_token_is (parser->lexer, CPP_COLON))) { labels_p = true; /* Consume the `:' or `::'. */ cp_lexer_consume_token (parser->lexer); /* Parse the labels. */ labels = cp_parser_asm_label_list (parser); } if (goto_p && !labels_p) missing = clobbers_p ? RT_COLON : RT_COLON_SCOPE; } else if (goto_p) missing = RT_COLON_SCOPE; /* Look for the closing `)'. */ if (!cp_parser_require (parser, missing ? CPP_COLON : CPP_CLOSE_PAREN, missing ? missing : RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, true, false, /*consume_paren=*/true); cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); if (!invalid_inputs_p && !invalid_outputs_p) { /* Create the ASM_EXPR. */ if (parser->in_function_body) { asm_stmt = finish_asm_stmt (volatile_p, string, outputs, inputs, clobbers, labels); /* If the extended syntax was not used, mark the ASM_EXPR. */ if (!extended_p) { tree temp = asm_stmt; if (TREE_CODE (temp) == CLEANUP_POINT_EXPR) temp = TREE_OPERAND (temp, 0); ASM_INPUT_P (temp) = 1; } } else symtab->finalize_toplevel_asm (string); } } /* Declarators [gram.dcl.decl] */ /* Parse an init-declarator. init-declarator: declarator initializer [opt] GNU Extension: init-declarator: declarator asm-specification [opt] attributes [opt] initializer [opt] function-definition: decl-specifier-seq [opt] declarator ctor-initializer [opt] function-body decl-specifier-seq [opt] declarator function-try-block GNU Extension: function-definition: __extension__ function-definition TM Extension: function-definition: decl-specifier-seq [opt] declarator function-transaction-block The DECL_SPECIFIERS apply to this declarator. Returns a representation of the entity declared. If MEMBER_P is TRUE, then this declarator appears in a class scope. The new DECL created by this declarator is returned. The CHECKS are access checks that should be performed once we know what entity is being declared (and, therefore, what classes have befriended it). If FUNCTION_DEFINITION_ALLOWED_P then we handle the declarator and for a function-definition here as well. If the declarator is a declarator for a function-definition, *FUNCTION_DEFINITION_P will be TRUE upon return. By that point, the function-definition will have been completely parsed. FUNCTION_DEFINITION_P may be NULL if FUNCTION_DEFINITION_ALLOWED_P is FALSE. If MAYBE_RANGE_FOR_DECL is not NULL, the pointed tree will be set to the parsed declaration if it is an uninitialized single declarator not followed by a `;', or to error_mark_node otherwise. Either way, the trailing `;', if present, will not be consumed. If returned, this declarator will be created with SD_INITIALIZED but will not call cp_finish_decl. If INIT_LOC is not NULL, and *INIT_LOC is equal to UNKNOWN_LOCATION, and there is an initializer, the pointed location_t is set to the location of the '=' or `(', or '{' in C++11 token introducing the initializer. */ static tree cp_parser_init_declarator (cp_parser* parser, cp_decl_specifier_seq *decl_specifiers, vec<deferred_access_check, va_gc> *checks, bool function_definition_allowed_p, bool member_p, int declares_class_or_enum, bool* function_definition_p, tree* maybe_range_for_decl, location_t* init_loc) { cp_token *token = NULL, *asm_spec_start_token = NULL, *attributes_start_token = NULL; cp_declarator *declarator; tree prefix_attributes; tree attributes = NULL; tree asm_specification; tree initializer; tree decl = NULL_TREE; tree scope; int is_initialized; /* Only valid if IS_INITIALIZED is true. In that case, CPP_EQ if initialized with "= ..", CPP_OPEN_PAREN if initialized with "(...)". */ enum cpp_ttype initialization_kind; bool is_direct_init = false; bool is_non_constant_init; int ctor_dtor_or_conv_p; bool friend_p = cp_parser_friend_p (decl_specifiers); tree pushed_scope = NULL_TREE; bool range_for_decl_p = false; bool saved_default_arg_ok_p = parser->default_arg_ok_p; location_t tmp_init_loc = UNKNOWN_LOCATION; /* Gather the attributes that were provided with the decl-specifiers. */ prefix_attributes = decl_specifiers->attributes; /* Assume that this is not the declarator for a function definition. */ if (function_definition_p) *function_definition_p = false; /* Default arguments are only permitted for function parameters. */ if (decl_spec_seq_has_spec_p (decl_specifiers, ds_typedef)) parser->default_arg_ok_p = false; /* Defer access checks while parsing the declarator; we cannot know what names are accessible until we know what is being declared. */ resume_deferring_access_checks (); /* Parse the declarator. */ token = cp_lexer_peek_token (parser->lexer); declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, &ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, member_p, friend_p); /* Gather up the deferred checks. */ stop_deferring_access_checks (); parser->default_arg_ok_p = saved_default_arg_ok_p; /* If the DECLARATOR was erroneous, there's no need to go further. */ if (declarator == cp_error_declarator) return error_mark_node; /* Check that the number of template-parameter-lists is OK. */ if (!cp_parser_check_declarator_template_parameters (parser, declarator, token->location)) return error_mark_node; if (declares_class_or_enum & 2) cp_parser_check_for_definition_in_return_type (declarator, decl_specifiers->type, decl_specifiers->locations[ds_type_spec]); /* Figure out what scope the entity declared by the DECLARATOR is located in. `grokdeclarator' sometimes changes the scope, so we compute it now. */ scope = get_scope_of_declarator (declarator); /* Perform any lookups in the declared type which were thought to be dependent, but are not in the scope of the declarator. */ decl_specifiers->type = maybe_update_decl_type (decl_specifiers->type, scope); /* If we're allowing GNU extensions, look for an asm-specification. */ if (cp_parser_allow_gnu_extensions_p (parser)) { /* Look for an asm-specification. */ asm_spec_start_token = cp_lexer_peek_token (parser->lexer); asm_specification = cp_parser_asm_specification_opt (parser); } else asm_specification = NULL_TREE; /* Look for attributes. */ attributes_start_token = cp_lexer_peek_token (parser->lexer); attributes = cp_parser_attributes_opt (parser); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); bool bogus_implicit_tmpl = false; if (function_declarator_p (declarator)) { /* Check to see if the token indicates the start of a function-definition. */ if (cp_parser_token_starts_function_definition_p (token)) { if (!function_definition_allowed_p) { /* If a function-definition should not appear here, issue an error message. */ cp_parser_error (parser, "a function-definition is not allowed here"); return error_mark_node; } location_t func_brace_location = cp_lexer_peek_token (parser->lexer)->location; /* Neither attributes nor an asm-specification are allowed on a function-definition. */ if (asm_specification) error_at (asm_spec_start_token->location, "an asm-specification is not allowed " "on a function-definition"); if (attributes) error_at (attributes_start_token->location, "attributes are not allowed " "on a function-definition"); /* This is a function-definition. */ *function_definition_p = true; /* Parse the function definition. */ if (member_p) decl = cp_parser_save_member_function_body (parser, decl_specifiers, declarator, prefix_attributes); else decl = (cp_parser_function_definition_from_specifiers_and_declarator (parser, decl_specifiers, prefix_attributes, declarator)); if (decl != error_mark_node && DECL_STRUCT_FUNCTION (decl)) { /* This is where the prologue starts... */ DECL_STRUCT_FUNCTION (decl)->function_start_locus = func_brace_location; } return decl; } } else if (parser->fully_implicit_function_template_p) { /* A non-template declaration involving a function parameter list containing an implicit template parameter will be made into a template. If the resulting declaration is not going to be an actual function then finish the template scope here to prevent it. An error message will be issued once we have a decl to talk about. FIXME probably we should do type deduction rather than create an implicit template, but the standard currently doesn't allow it. */ bogus_implicit_tmpl = true; finish_fully_implicit_template (parser, NULL_TREE); } /* [dcl.dcl] Only in function declarations for constructors, destructors, and type conversions can the decl-specifier-seq be omitted. We explicitly postpone this check past the point where we handle function-definitions because we tolerate function-definitions that are missing their return types in some modes. */ if (!decl_specifiers->any_specifiers_p && ctor_dtor_or_conv_p <= 0) { cp_parser_error (parser, "expected constructor, destructor, or type conversion"); return error_mark_node; } /* An `=' or an `(', or an '{' in C++0x, indicates an initializer. */ if (token->type == CPP_EQ || token->type == CPP_OPEN_PAREN || token->type == CPP_OPEN_BRACE) { is_initialized = SD_INITIALIZED; initialization_kind = token->type; if (maybe_range_for_decl) *maybe_range_for_decl = error_mark_node; tmp_init_loc = token->location; if (init_loc && *init_loc == UNKNOWN_LOCATION) *init_loc = tmp_init_loc; if (token->type == CPP_EQ && function_declarator_p (declarator)) { cp_token *t2 = cp_lexer_peek_nth_token (parser->lexer, 2); if (t2->keyword == RID_DEFAULT) is_initialized = SD_DEFAULTED; else if (t2->keyword == RID_DELETE) is_initialized = SD_DELETED; } } else { /* If the init-declarator isn't initialized and isn't followed by a `,' or `;', it's not a valid init-declarator. */ if (token->type != CPP_COMMA && token->type != CPP_SEMICOLON) { if (maybe_range_for_decl && *maybe_range_for_decl != error_mark_node) range_for_decl_p = true; else { if (!maybe_range_for_decl) cp_parser_error (parser, "expected initializer"); return error_mark_node; } } is_initialized = SD_UNINITIALIZED; initialization_kind = CPP_EOF; } /* Because start_decl has side-effects, we should only call it if we know we're going ahead. By this point, we know that we cannot possibly be looking at any other construct. */ cp_parser_commit_to_tentative_parse (parser); /* Enter the newly declared entry in the symbol table. If we're processing a declaration in a class-specifier, we wait until after processing the initializer. */ if (!member_p) { if (parser->in_unbraced_linkage_specification_p) decl_specifiers->storage_class = sc_extern; decl = start_decl (declarator, decl_specifiers, range_for_decl_p? SD_INITIALIZED : is_initialized, attributes, prefix_attributes, &pushed_scope); cp_finalize_omp_declare_simd (parser, decl); /* Adjust location of decl if declarator->id_loc is more appropriate: set, and decl wasn't merged with another decl, in which case its location would be different from input_location, and more accurate. */ if (DECL_P (decl) && declarator->id_loc != UNKNOWN_LOCATION && DECL_SOURCE_LOCATION (decl) == input_location) DECL_SOURCE_LOCATION (decl) = declarator->id_loc; } else if (scope) /* Enter the SCOPE. That way unqualified names appearing in the initializer will be looked up in SCOPE. */ pushed_scope = push_scope (scope); /* Perform deferred access control checks, now that we know in which SCOPE the declared entity resides. */ if (!member_p && decl) { tree saved_current_function_decl = NULL_TREE; /* If the entity being declared is a function, pretend that we are in its scope. If it is a `friend', it may have access to things that would not otherwise be accessible. */ if (TREE_CODE (decl) == FUNCTION_DECL) { saved_current_function_decl = current_function_decl; current_function_decl = decl; } /* Perform access checks for template parameters. */ cp_parser_perform_template_parameter_access_checks (checks); /* Perform the access control checks for the declarator and the decl-specifiers. */ perform_deferred_access_checks (tf_warning_or_error); /* Restore the saved value. */ if (TREE_CODE (decl) == FUNCTION_DECL) current_function_decl = saved_current_function_decl; } /* Parse the initializer. */ initializer = NULL_TREE; is_direct_init = false; is_non_constant_init = true; if (is_initialized) { if (function_declarator_p (declarator)) { if (initialization_kind == CPP_EQ) initializer = cp_parser_pure_specifier (parser); else { /* If the declaration was erroneous, we don't really know what the user intended, so just silently consume the initializer. */ if (decl != error_mark_node) error_at (tmp_init_loc, "initializer provided for function"); cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); } } else { /* We want to record the extra mangling scope for in-class initializers of class members and initializers of static data member templates. The former involves deferring parsing of the initializer until end of class as with default arguments. So right here we only handle the latter. */ if (!member_p && processing_template_decl) start_lambda_scope (decl); initializer = cp_parser_initializer (parser, &is_direct_init, &is_non_constant_init); if (!member_p && processing_template_decl) finish_lambda_scope (); if (initializer == error_mark_node) cp_parser_skip_to_end_of_statement (parser); } } /* The old parser allows attributes to appear after a parenthesized initializer. Mark Mitchell proposed removing this functionality on the GCC mailing lists on 2002-08-13. This parser accepts the attributes -- but ignores them. */ if (cp_parser_allow_gnu_extensions_p (parser) && initialization_kind == CPP_OPEN_PAREN) if (cp_parser_attributes_opt (parser)) warning (OPT_Wattributes, "attributes after parenthesized initializer ignored"); /* And now complain about a non-function implicit template. */ if (bogus_implicit_tmpl) error_at (DECL_SOURCE_LOCATION (decl), "non-function %qD declared as implicit template", decl); /* For an in-class declaration, use `grokfield' to create the declaration. */ if (member_p) { if (pushed_scope) { pop_scope (pushed_scope); pushed_scope = NULL_TREE; } decl = grokfield (declarator, decl_specifiers, initializer, !is_non_constant_init, /*asmspec=*/NULL_TREE, chainon (attributes, prefix_attributes)); if (decl && TREE_CODE (decl) == FUNCTION_DECL) cp_parser_save_default_args (parser, decl); cp_finalize_omp_declare_simd (parser, decl); } /* Finish processing the declaration. But, skip member declarations. */ if (!member_p && decl && decl != error_mark_node && !range_for_decl_p) { cp_finish_decl (decl, initializer, !is_non_constant_init, asm_specification, /* If the initializer is in parentheses, then this is a direct-initialization, which means that an `explicit' constructor is OK. Otherwise, an `explicit' constructor cannot be used. */ ((is_direct_init || !is_initialized) ? LOOKUP_NORMAL : LOOKUP_IMPLICIT)); } else if ((cxx_dialect != cxx98) && friend_p && decl && TREE_CODE (decl) == FUNCTION_DECL) /* Core issue #226 (C++0x only): A default template-argument shall not be specified in a friend class template declaration. */ check_default_tmpl_args (decl, current_template_parms, /*is_primary=*/true, /*is_partial=*/false, /*is_friend_decl=*/1); if (!friend_p && pushed_scope) pop_scope (pushed_scope); if (function_declarator_p (declarator) && parser->fully_implicit_function_template_p) { if (member_p) decl = finish_fully_implicit_template (parser, decl); else finish_fully_implicit_template (parser, /*member_decl_opt=*/0); } return decl; } /* Parse a declarator. declarator: direct-declarator ptr-operator declarator abstract-declarator: ptr-operator abstract-declarator [opt] direct-abstract-declarator GNU Extensions: declarator: attributes [opt] direct-declarator attributes [opt] ptr-operator declarator abstract-declarator: attributes [opt] ptr-operator abstract-declarator [opt] attributes [opt] direct-abstract-declarator If CTOR_DTOR_OR_CONV_P is not NULL, *CTOR_DTOR_OR_CONV_P is used to detect constructor, destructor or conversion operators. It is set to -1 if the declarator is a name, and +1 if it is a function. Otherwise it is set to zero. Usually you just want to test for >0, but internally the negative value is used. (The reason for CTOR_DTOR_OR_CONV_P is that a declaration must have a decl-specifier-seq unless it declares a constructor, destructor, or conversion. It might seem that we could check this condition in semantic analysis, rather than parsing, but that makes it difficult to handle something like `f()'. We want to notice that there are no decl-specifiers, and therefore realize that this is an expression, not a declaration.) If PARENTHESIZED_P is non-NULL, *PARENTHESIZED_P is set to true iff the declarator is a direct-declarator of the form "(...)". MEMBER_P is true iff this declarator is a member-declarator. FRIEND_P is true iff this declarator is a friend. */ static cp_declarator * cp_parser_declarator (cp_parser* parser, cp_parser_declarator_kind dcl_kind, int* ctor_dtor_or_conv_p, bool* parenthesized_p, bool member_p, bool friend_p) { cp_declarator *declarator; enum tree_code code; cp_cv_quals cv_quals; tree class_type; tree gnu_attributes = NULL_TREE, std_attributes = NULL_TREE; /* Assume this is not a constructor, destructor, or type-conversion operator. */ if (ctor_dtor_or_conv_p) *ctor_dtor_or_conv_p = 0; if (cp_parser_allow_gnu_extensions_p (parser)) gnu_attributes = cp_parser_gnu_attributes_opt (parser); /* Check for the ptr-operator production. */ cp_parser_parse_tentatively (parser); /* Parse the ptr-operator. */ code = cp_parser_ptr_operator (parser, &class_type, &cv_quals, &std_attributes); /* If that worked, then we have a ptr-operator. */ if (cp_parser_parse_definitely (parser)) { /* If a ptr-operator was found, then this declarator was not parenthesized. */ if (parenthesized_p) *parenthesized_p = true; /* The dependent declarator is optional if we are parsing an abstract-declarator. */ if (dcl_kind != CP_PARSER_DECLARATOR_NAMED) cp_parser_parse_tentatively (parser); /* Parse the dependent declarator. */ declarator = cp_parser_declarator (parser, dcl_kind, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false, friend_p); /* If we are parsing an abstract-declarator, we must handle the case where the dependent declarator is absent. */ if (dcl_kind != CP_PARSER_DECLARATOR_NAMED && !cp_parser_parse_definitely (parser)) declarator = NULL; declarator = cp_parser_make_indirect_declarator (code, class_type, cv_quals, declarator, std_attributes); } /* Everything else is a direct-declarator. */ else { if (parenthesized_p) *parenthesized_p = cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN); declarator = cp_parser_direct_declarator (parser, dcl_kind, ctor_dtor_or_conv_p, member_p, friend_p); } if (gnu_attributes && declarator && declarator != cp_error_declarator) declarator->attributes = gnu_attributes; return declarator; } /* Parse a direct-declarator or direct-abstract-declarator. direct-declarator: declarator-id direct-declarator ( parameter-declaration-clause ) cv-qualifier-seq [opt] ref-qualifier [opt] exception-specification [opt] direct-declarator [ constant-expression [opt] ] ( declarator ) direct-abstract-declarator: direct-abstract-declarator [opt] ( parameter-declaration-clause ) cv-qualifier-seq [opt] ref-qualifier [opt] exception-specification [opt] direct-abstract-declarator [opt] [ constant-expression [opt] ] ( abstract-declarator ) Returns a representation of the declarator. DCL_KIND is CP_PARSER_DECLARATOR_ABSTRACT, if we are parsing a direct-abstract-declarator. It is CP_PARSER_DECLARATOR_NAMED, if we are parsing a direct-declarator. It is CP_PARSER_DECLARATOR_EITHER, if we can accept either - in the case of ambiguity we prefer an abstract declarator, as per [dcl.ambig.res]. CTOR_DTOR_OR_CONV_P, MEMBER_P, and FRIEND_P are as for cp_parser_declarator. */ static cp_declarator * cp_parser_direct_declarator (cp_parser* parser, cp_parser_declarator_kind dcl_kind, int* ctor_dtor_or_conv_p, bool member_p, bool friend_p) { cp_token *token; cp_declarator *declarator = NULL; tree scope = NULL_TREE; bool saved_default_arg_ok_p = parser->default_arg_ok_p; bool saved_in_declarator_p = parser->in_declarator_p; bool first = true; tree pushed_scope = NULL_TREE; while (true) { /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_OPEN_PAREN) { /* This is either a parameter-declaration-clause, or a parenthesized declarator. When we know we are parsing a named declarator, it must be a parenthesized declarator if FIRST is true. For instance, `(int)' is a parameter-declaration-clause, with an omitted direct-abstract-declarator. But `((*))', is a parenthesized abstract declarator. Finally, when T is a template parameter `(T)' is a parameter-declaration-clause, and not a parenthesized named declarator. We first try and parse a parameter-declaration-clause, and then try a nested declarator (if FIRST is true). It is not an error for it not to be a parameter-declaration-clause, even when FIRST is false. Consider, int i (int); int i (3); The first is the declaration of a function while the second is the definition of a variable, including its initializer. Having seen only the parenthesis, we cannot know which of these two alternatives should be selected. Even more complex are examples like: int i (int (a)); int i (int (3)); The former is a function-declaration; the latter is a variable initialization. Thus again, we try a parameter-declaration-clause, and if that fails, we back out and return. */ if (!first || dcl_kind != CP_PARSER_DECLARATOR_NAMED) { tree params; bool is_declarator = false; /* In a member-declarator, the only valid interpretation of a parenthesis is the start of a parameter-declaration-clause. (It is invalid to initialize a static data member with a parenthesized initializer; only the "=" form of initialization is permitted.) */ if (!member_p) cp_parser_parse_tentatively (parser); /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); if (first) { /* If this is going to be an abstract declarator, we're in a declarator and we can't have default args. */ parser->default_arg_ok_p = false; parser->in_declarator_p = true; } begin_scope (sk_function_parms, NULL_TREE); /* Parse the parameter-declaration-clause. */ params = cp_parser_parameter_declaration_clause (parser); /* Consume the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* If all went well, parse the cv-qualifier-seq, ref-qualifier and the exception-specification. */ if (member_p || cp_parser_parse_definitely (parser)) { cp_cv_quals cv_quals; cp_virt_specifiers virt_specifiers; cp_ref_qualifier ref_qual; tree exception_specification; tree late_return; tree attrs; bool memfn = (member_p || (pushed_scope && CLASS_TYPE_P (pushed_scope))); is_declarator = true; if (ctor_dtor_or_conv_p) *ctor_dtor_or_conv_p = *ctor_dtor_or_conv_p < 0; first = false; /* Parse the cv-qualifier-seq. */ cv_quals = cp_parser_cv_qualifier_seq_opt (parser); /* Parse the ref-qualifier. */ ref_qual = cp_parser_ref_qualifier_opt (parser); /* And the exception-specification. */ exception_specification = cp_parser_exception_specification_opt (parser); attrs = cp_parser_std_attribute_spec_seq (parser); /* In here, we handle cases where attribute is used after the function declaration. For example: void func (int x) __attribute__((vector(..))); */ if (flag_cilkplus && cp_next_tokens_can_be_gnu_attribute_p (parser)) { cp_parser_parse_tentatively (parser); tree attr = cp_parser_gnu_attributes_opt (parser); if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON) && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)) cp_parser_abort_tentative_parse (parser); else if (!cp_parser_parse_definitely (parser)) ; else attrs = chainon (attr, attrs); } late_return = (cp_parser_late_return_type_opt (parser, declarator, memfn ? cv_quals : -1)); /* Parse the virt-specifier-seq. */ virt_specifiers = cp_parser_virt_specifier_seq_opt (parser); /* Create the function-declarator. */ declarator = make_call_declarator (declarator, params, cv_quals, virt_specifiers, ref_qual, exception_specification, late_return); declarator->std_attributes = attrs; /* Any subsequent parameter lists are to do with return type, so are not those of the declared function. */ parser->default_arg_ok_p = false; } /* Remove the function parms from scope. */ pop_bindings_and_leave_scope (); if (is_declarator) /* Repeat the main loop. */ continue; } /* If this is the first, we can try a parenthesized declarator. */ if (first) { bool saved_in_type_id_in_expr_p; parser->default_arg_ok_p = saved_default_arg_ok_p; parser->in_declarator_p = saved_in_declarator_p; /* Consume the `('. */ cp_lexer_consume_token (parser->lexer); /* Parse the nested declarator. */ saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; declarator = cp_parser_declarator (parser, dcl_kind, ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, member_p, friend_p); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; first = false; /* Expect a `)'. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) declarator = cp_error_declarator; if (declarator == cp_error_declarator) break; goto handle_declarator; } /* Otherwise, we must be done. */ else break; } else if ((!first || dcl_kind != CP_PARSER_DECLARATOR_NAMED) && token->type == CPP_OPEN_SQUARE && !cp_next_tokens_can_be_attribute_p (parser)) { /* Parse an array-declarator. */ tree bounds, attrs; if (ctor_dtor_or_conv_p) *ctor_dtor_or_conv_p = 0; first = false; parser->default_arg_ok_p = false; parser->in_declarator_p = true; /* Consume the `['. */ cp_lexer_consume_token (parser->lexer); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token is `]', then there is no constant-expression. */ if (token->type != CPP_CLOSE_SQUARE) { bool non_constant_p; bounds = cp_parser_constant_expression (parser, /*allow_non_constant=*/true, &non_constant_p); if (!non_constant_p) /* OK */; else if (error_operand_p (bounds)) /* Already gave an error. */; else if (!parser->in_function_body || current_binding_level->kind == sk_function_parms) { /* Normally, the array bound must be an integral constant expression. However, as an extension, we allow VLAs in function scopes as long as they aren't part of a parameter declaration. */ cp_parser_error (parser, "array bound is not an integer constant"); bounds = error_mark_node; } else if (processing_template_decl && !type_dependent_expression_p (bounds)) { /* Remember this wasn't a constant-expression. */ bounds = build_nop (TREE_TYPE (bounds), bounds); TREE_SIDE_EFFECTS (bounds) = 1; } } else bounds = NULL_TREE; /* Look for the closing `]'. */ if (!cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE)) { declarator = cp_error_declarator; break; } attrs = cp_parser_std_attribute_spec_seq (parser); declarator = make_array_declarator (declarator, bounds); declarator->std_attributes = attrs; } else if (first && dcl_kind != CP_PARSER_DECLARATOR_ABSTRACT) { { tree qualifying_scope; tree unqualified_name; tree attrs; special_function_kind sfk; bool abstract_ok; bool pack_expansion_p = false; cp_token *declarator_id_start_token; /* Parse a declarator-id */ abstract_ok = (dcl_kind == CP_PARSER_DECLARATOR_EITHER); if (abstract_ok) { cp_parser_parse_tentatively (parser); /* If we see an ellipsis, we should be looking at a parameter pack. */ if (token->type == CPP_ELLIPSIS) { /* Consume the `...' */ cp_lexer_consume_token (parser->lexer); pack_expansion_p = true; } } declarator_id_start_token = cp_lexer_peek_token (parser->lexer); unqualified_name = cp_parser_declarator_id (parser, /*optional_p=*/abstract_ok); qualifying_scope = parser->scope; if (abstract_ok) { bool okay = false; if (!unqualified_name && pack_expansion_p) { /* Check whether an error occurred. */ okay = !cp_parser_error_occurred (parser); /* We already consumed the ellipsis to mark a parameter pack, but we have no way to report it, so abort the tentative parse. We will be exiting immediately anyway. */ cp_parser_abort_tentative_parse (parser); } else okay = cp_parser_parse_definitely (parser); if (!okay) unqualified_name = error_mark_node; else if (unqualified_name && (qualifying_scope || (!identifier_p (unqualified_name)))) { cp_parser_error (parser, "expected unqualified-id"); unqualified_name = error_mark_node; } } if (!unqualified_name) return NULL; if (unqualified_name == error_mark_node) { declarator = cp_error_declarator; pack_expansion_p = false; declarator->parameter_pack_p = false; break; } attrs = cp_parser_std_attribute_spec_seq (parser); if (qualifying_scope && at_namespace_scope_p () && TREE_CODE (qualifying_scope) == TYPENAME_TYPE) { /* In the declaration of a member of a template class outside of the class itself, the SCOPE will sometimes be a TYPENAME_TYPE. For example, given: template <typename T> int S<T>::R::i = 3; the SCOPE will be a TYPENAME_TYPE for `S<T>::R'. In this context, we must resolve S<T>::R to an ordinary type, rather than a typename type. The reason we normally avoid resolving TYPENAME_TYPEs is that a specialization of `S' might render `S<T>::R' not a type. However, if `S' is specialized, then this `i' will not be used, so there is no harm in resolving the types here. */ tree type; /* Resolve the TYPENAME_TYPE. */ type = resolve_typename_type (qualifying_scope, /*only_current_p=*/false); /* If that failed, the declarator is invalid. */ if (TREE_CODE (type) == TYPENAME_TYPE) { if (typedef_variant_p (type)) error_at (declarator_id_start_token->location, "cannot define member of dependent typedef " "%qT", type); else error_at (declarator_id_start_token->location, "%<%T::%E%> is not a type", TYPE_CONTEXT (qualifying_scope), TYPE_IDENTIFIER (qualifying_scope)); } qualifying_scope = type; } sfk = sfk_none; if (unqualified_name) { tree class_type; if (qualifying_scope && CLASS_TYPE_P (qualifying_scope)) class_type = qualifying_scope; else class_type = current_class_type; if (TREE_CODE (unqualified_name) == TYPE_DECL) { tree name_type = TREE_TYPE (unqualified_name); if (class_type && same_type_p (name_type, class_type)) { if (qualifying_scope && CLASSTYPE_USE_TEMPLATE (name_type)) { error_at (declarator_id_start_token->location, "invalid use of constructor as a template"); inform (declarator_id_start_token->location, "use %<%T::%D%> instead of %<%T::%D%> to " "name the constructor in a qualified name", class_type, DECL_NAME (TYPE_TI_TEMPLATE (class_type)), class_type, name_type); declarator = cp_error_declarator; break; } else unqualified_name = constructor_name (class_type); } else { /* We do not attempt to print the declarator here because we do not have enough information about its original syntactic form. */ cp_parser_error (parser, "invalid declarator"); declarator = cp_error_declarator; break; } } if (class_type) { if (TREE_CODE (unqualified_name) == BIT_NOT_EXPR) sfk = sfk_destructor; else if (IDENTIFIER_TYPENAME_P (unqualified_name)) sfk = sfk_conversion; else if (/* There's no way to declare a constructor for an anonymous type, even if the type got a name for linkage purposes. */ !TYPE_WAS_ANONYMOUS (class_type) /* Handle correctly (c++/19200): struct S { struct T{}; friend void S(T); }; and also: namespace N { void S(); } struct S { friend void N::S(); }; */ && !(friend_p && class_type != qualifying_scope) && constructor_name_p (unqualified_name, class_type)) { unqualified_name = constructor_name (class_type); sfk = sfk_constructor; } else if (is_overloaded_fn (unqualified_name) && DECL_CONSTRUCTOR_P (get_first_fn (unqualified_name))) sfk = sfk_constructor; if (ctor_dtor_or_conv_p && sfk != sfk_none) *ctor_dtor_or_conv_p = -1; } } declarator = make_id_declarator (qualifying_scope, unqualified_name, sfk); declarator->std_attributes = attrs; declarator->id_loc = token->location; declarator->parameter_pack_p = pack_expansion_p; if (pack_expansion_p) maybe_warn_variadic_templates (); } handle_declarator:; scope = get_scope_of_declarator (declarator); if (scope) { /* Any names that appear after the declarator-id for a member are looked up in the containing scope. */ if (at_function_scope_p ()) { /* But declarations with qualified-ids can't appear in a function. */ cp_parser_error (parser, "qualified-id in declaration"); declarator = cp_error_declarator; break; } pushed_scope = push_scope (scope); } parser->in_declarator_p = true; if ((ctor_dtor_or_conv_p && *ctor_dtor_or_conv_p) || (declarator && declarator->kind == cdk_id)) /* Default args are only allowed on function declarations. */ parser->default_arg_ok_p = saved_default_arg_ok_p; else parser->default_arg_ok_p = false; first = false; } /* We're done. */ else break; } /* For an abstract declarator, we might wind up with nothing at this point. That's an error; the declarator is not optional. */ if (!declarator) cp_parser_error (parser, "expected declarator"); /* If we entered a scope, we must exit it now. */ if (pushed_scope) pop_scope (pushed_scope); parser->default_arg_ok_p = saved_default_arg_ok_p; parser->in_declarator_p = saved_in_declarator_p; return declarator; } /* Parse a ptr-operator. ptr-operator: * attribute-specifier-seq [opt] cv-qualifier-seq [opt] (C++11) * cv-qualifier-seq [opt] & :: [opt] nested-name-specifier * cv-qualifier-seq [opt] nested-name-specifier * attribute-specifier-seq [opt] cv-qualifier-seq [opt] (C++11) GNU Extension: ptr-operator: & cv-qualifier-seq [opt] Returns INDIRECT_REF if a pointer, or pointer-to-member, was used. Returns ADDR_EXPR if a reference was used, or NON_LVALUE_EXPR for an rvalue reference. In the case of a pointer-to-member, *TYPE is filled in with the TYPE containing the member. *CV_QUALS is filled in with the cv-qualifier-seq, or TYPE_UNQUALIFIED, if there are no cv-qualifiers. Returns ERROR_MARK if an error occurred. Note that the tree codes returned by this function have nothing to do with the types of trees that will be eventually be created to represent the pointer or reference type being parsed. They are just constants with suggestive names. */ static enum tree_code cp_parser_ptr_operator (cp_parser* parser, tree* type, cp_cv_quals *cv_quals, tree *attributes) { enum tree_code code = ERROR_MARK; cp_token *token; tree attrs = NULL_TREE; /* Assume that it's not a pointer-to-member. */ *type = NULL_TREE; /* And that there are no cv-qualifiers. */ *cv_quals = TYPE_UNQUALIFIED; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `*', `&' or `&&' we have a pointer or reference. */ if (token->type == CPP_MULT) code = INDIRECT_REF; else if (token->type == CPP_AND) code = ADDR_EXPR; else if ((cxx_dialect != cxx98) && token->type == CPP_AND_AND) /* C++0x only */ code = NON_LVALUE_EXPR; if (code != ERROR_MARK) { /* Consume the `*', `&' or `&&'. */ cp_lexer_consume_token (parser->lexer); /* A `*' can be followed by a cv-qualifier-seq, and so can a `&', if we are allowing GNU extensions. (The only qualifier that can legally appear after `&' is `restrict', but that is enforced during semantic analysis. */ if (code == INDIRECT_REF || cp_parser_allow_gnu_extensions_p (parser)) *cv_quals = cp_parser_cv_qualifier_seq_opt (parser); attrs = cp_parser_std_attribute_spec_seq (parser); if (attributes != NULL) *attributes = attrs; } else { /* Try the pointer-to-member case. */ cp_parser_parse_tentatively (parser); /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the nested-name specifier. */ token = cp_lexer_peek_token (parser->lexer); cp_parser_nested_name_specifier (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/true, /*type_p=*/false, /*is_declaration=*/false); /* If we found it, and the next token is a `*', then we are indeed looking at a pointer-to-member operator. */ if (!cp_parser_error_occurred (parser) && cp_parser_require (parser, CPP_MULT, RT_MULT)) { /* Indicate that the `*' operator was used. */ code = INDIRECT_REF; if (TREE_CODE (parser->scope) == NAMESPACE_DECL) error_at (token->location, "%qD is a namespace", parser->scope); else if (TREE_CODE (parser->scope) == ENUMERAL_TYPE) error_at (token->location, "cannot form pointer to member of " "non-class %q#T", parser->scope); else { /* The type of which the member is a member is given by the current SCOPE. */ *type = parser->scope; /* The next name will not be qualified. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; /* Look for optional c++11 attributes. */ attrs = cp_parser_std_attribute_spec_seq (parser); if (attributes != NULL) *attributes = attrs; /* Look for the optional cv-qualifier-seq. */ *cv_quals = cp_parser_cv_qualifier_seq_opt (parser); } } /* If that didn't work we don't have a ptr-operator. */ if (!cp_parser_parse_definitely (parser)) cp_parser_error (parser, "expected ptr-operator"); } return code; } /* Parse an (optional) cv-qualifier-seq. cv-qualifier-seq: cv-qualifier cv-qualifier-seq [opt] cv-qualifier: const volatile GNU Extension: cv-qualifier: __restrict__ Returns a bitmask representing the cv-qualifiers. */ static cp_cv_quals cp_parser_cv_qualifier_seq_opt (cp_parser* parser) { cp_cv_quals cv_quals = TYPE_UNQUALIFIED; while (true) { cp_token *token; cp_cv_quals cv_qualifier; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* See if it's a cv-qualifier. */ switch (token->keyword) { case RID_CONST: cv_qualifier = TYPE_QUAL_CONST; break; case RID_VOLATILE: cv_qualifier = TYPE_QUAL_VOLATILE; break; case RID_RESTRICT: cv_qualifier = TYPE_QUAL_RESTRICT; break; default: cv_qualifier = TYPE_UNQUALIFIED; break; } if (!cv_qualifier) break; if (cv_quals & cv_qualifier) { error_at (token->location, "duplicate cv-qualifier"); cp_lexer_purge_token (parser->lexer); } else { cp_lexer_consume_token (parser->lexer); cv_quals |= cv_qualifier; } } return cv_quals; } /* Parse an (optional) ref-qualifier ref-qualifier: & && Returns cp_ref_qualifier representing ref-qualifier. */ static cp_ref_qualifier cp_parser_ref_qualifier_opt (cp_parser* parser) { cp_ref_qualifier ref_qual = REF_QUAL_NONE; /* Don't try to parse bitwise '&' as a ref-qualifier (c++/57532). */ if (cxx_dialect < cxx11 && cp_parser_parsing_tentatively (parser)) return ref_qual; while (true) { cp_ref_qualifier curr_ref_qual = REF_QUAL_NONE; cp_token *token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_AND: curr_ref_qual = REF_QUAL_LVALUE; break; case CPP_AND_AND: curr_ref_qual = REF_QUAL_RVALUE; break; default: curr_ref_qual = REF_QUAL_NONE; break; } if (!curr_ref_qual) break; else if (ref_qual) { error_at (token->location, "multiple ref-qualifiers"); cp_lexer_purge_token (parser->lexer); } else { ref_qual = curr_ref_qual; cp_lexer_consume_token (parser->lexer); } } return ref_qual; } /* Parse an (optional) virt-specifier-seq. virt-specifier-seq: virt-specifier virt-specifier-seq [opt] virt-specifier: override final Returns a bitmask representing the virt-specifiers. */ static cp_virt_specifiers cp_parser_virt_specifier_seq_opt (cp_parser* parser) { cp_virt_specifiers virt_specifiers = VIRT_SPEC_UNSPECIFIED; while (true) { cp_token *token; cp_virt_specifiers virt_specifier; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* See if it's a virt-specifier-qualifier. */ if (token->type != CPP_NAME) break; if (!strcmp (IDENTIFIER_POINTER(token->u.value), "override")) { maybe_warn_cpp0x (CPP0X_OVERRIDE_CONTROLS); virt_specifier = VIRT_SPEC_OVERRIDE; } else if (!strcmp (IDENTIFIER_POINTER(token->u.value), "final")) { maybe_warn_cpp0x (CPP0X_OVERRIDE_CONTROLS); virt_specifier = VIRT_SPEC_FINAL; } else if (!strcmp (IDENTIFIER_POINTER(token->u.value), "__final")) { virt_specifier = VIRT_SPEC_FINAL; } else break; if (virt_specifiers & virt_specifier) { error_at (token->location, "duplicate virt-specifier"); cp_lexer_purge_token (parser->lexer); } else { cp_lexer_consume_token (parser->lexer); virt_specifiers |= virt_specifier; } } return virt_specifiers; } /* Used by handling of trailing-return-types and NSDMI, in which 'this' is in scope even though it isn't real. */ void inject_this_parameter (tree ctype, cp_cv_quals quals) { tree this_parm; if (current_class_ptr) { /* We don't clear this between NSDMIs. Is it already what we want? */ tree type = TREE_TYPE (TREE_TYPE (current_class_ptr)); if (same_type_ignoring_top_level_qualifiers_p (ctype, type) && cp_type_quals (type) == quals) return; } this_parm = build_this_parm (ctype, quals); /* Clear this first to avoid shortcut in cp_build_indirect_ref. */ current_class_ptr = NULL_TREE; current_class_ref = cp_build_indirect_ref (this_parm, RO_NULL, tf_warning_or_error); current_class_ptr = this_parm; } /* Return true iff our current scope is a non-static data member initializer. */ bool parsing_nsdmi (void) { /* We recognize NSDMI context by the context-less 'this' pointer set up by the function above. */ if (current_class_ptr && TREE_CODE (current_class_ptr) == PARM_DECL && DECL_CONTEXT (current_class_ptr) == NULL_TREE) return true; return false; } /* Parse a late-specified return type, if any. This is not a separate non-terminal, but part of a function declarator, which looks like -> trailing-type-specifier-seq abstract-declarator(opt) Returns the type indicated by the type-id. In addition to this this parses any queued up omp declare simd clauses and Cilk Plus SIMD-enabled function's vector attributes. QUALS is either a bitmask of cv_qualifiers or -1 for a non-member function. */ static tree cp_parser_late_return_type_opt (cp_parser* parser, cp_declarator *declarator, cp_cv_quals quals) { cp_token *token; tree type = NULL_TREE; bool declare_simd_p = (parser->omp_declare_simd && declarator && declarator->kind == cdk_id); bool cilk_simd_fn_vector_p = (parser->cilk_simd_fn_info && declarator && declarator->kind == cdk_id); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* A late-specified return type is indicated by an initial '->'. */ if (token->type != CPP_DEREF && !(declare_simd_p || cilk_simd_fn_vector_p)) return NULL_TREE; tree save_ccp = current_class_ptr; tree save_ccr = current_class_ref; if (quals >= 0) { /* DR 1207: 'this' is in scope in the trailing return type. */ inject_this_parameter (current_class_type, quals); } if (token->type == CPP_DEREF) { /* Consume the ->. */ cp_lexer_consume_token (parser->lexer); type = cp_parser_trailing_type_id (parser); } if (cilk_simd_fn_vector_p) declarator->std_attributes = cp_parser_late_parsing_cilk_simd_fn_info (parser, declarator->std_attributes); if (declare_simd_p) declarator->std_attributes = cp_parser_late_parsing_omp_declare_simd (parser, declarator->std_attributes); if (quals >= 0) { current_class_ptr = save_ccp; current_class_ref = save_ccr; } return type; } /* Parse a declarator-id. declarator-id: id-expression :: [opt] nested-name-specifier [opt] type-name In the `id-expression' case, the value returned is as for cp_parser_id_expression if the id-expression was an unqualified-id. If the id-expression was a qualified-id, then a SCOPE_REF is returned. The first operand is the scope (either a NAMESPACE_DECL or TREE_TYPE), but the second is still just a representation of an unqualified-id. */ static tree cp_parser_declarator_id (cp_parser* parser, bool optional_p) { tree id; /* The expression must be an id-expression. Assume that qualified names are the names of types so that: template <class T> int S<T>::R::i = 3; will work; we must treat `S<T>::R' as the name of a type. Similarly, assume that qualified names are templates, where required, so that: template <class T> int S<T>::R<T>::i = 3; will work, too. */ id = cp_parser_id_expression (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/false, /*template_p=*/NULL, /*declarator_p=*/true, optional_p); if (id && BASELINK_P (id)) id = BASELINK_FUNCTIONS (id); return id; } /* Parse a type-id. type-id: type-specifier-seq abstract-declarator [opt] Returns the TYPE specified. */ static tree cp_parser_type_id_1 (cp_parser* parser, bool is_template_arg, bool is_trailing_return) { cp_decl_specifier_seq type_specifier_seq; cp_declarator *abstract_declarator; /* Parse the type-specifier-seq. */ cp_parser_type_specifier_seq (parser, /*is_declaration=*/false, is_trailing_return, &type_specifier_seq); if (type_specifier_seq.type == error_mark_node) return error_mark_node; /* There might or might not be an abstract declarator. */ cp_parser_parse_tentatively (parser); /* Look for the declarator. */ abstract_declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_ABSTRACT, NULL, /*parenthesized_p=*/NULL, /*member_p=*/false, /*friend_p=*/false); /* Check to see if there really was a declarator. */ if (!cp_parser_parse_definitely (parser)) abstract_declarator = NULL; if (type_specifier_seq.type /* None of the valid uses of 'auto' in C++14 involve the type-id nonterminal, but it is valid in a trailing-return-type. */ && !(cxx_dialect >= cxx14 && is_trailing_return) && type_uses_auto (type_specifier_seq.type)) { /* A type-id with type 'auto' is only ok if the abstract declarator is a function declarator with a late-specified return type. */ if (abstract_declarator && abstract_declarator->kind == cdk_function && abstract_declarator->u.function.late_return_type) /* OK */; else { error ("invalid use of %<auto%>"); return error_mark_node; } } return groktypename (&type_specifier_seq, abstract_declarator, is_template_arg); } static tree cp_parser_type_id (cp_parser *parser) { return cp_parser_type_id_1 (parser, false, false); } static tree cp_parser_template_type_arg (cp_parser *parser) { tree r; const char *saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in template arguments"); r = cp_parser_type_id_1 (parser, true, false); parser->type_definition_forbidden_message = saved_message; if (cxx_dialect >= cxx14 && type_uses_auto (r)) { error ("invalid use of %<auto%> in template argument"); r = error_mark_node; } return r; } static tree cp_parser_trailing_type_id (cp_parser *parser) { return cp_parser_type_id_1 (parser, false, true); } /* Parse a type-specifier-seq. type-specifier-seq: type-specifier type-specifier-seq [opt] GNU extension: type-specifier-seq: attributes type-specifier-seq [opt] If IS_DECLARATION is true, we are at the start of a "condition" or exception-declaration, so we might be followed by a declarator-id. If IS_TRAILING_RETURN is true, we are in a trailing-return-type, i.e. we've just seen "->". Sets *TYPE_SPECIFIER_SEQ to represent the sequence. */ static void cp_parser_type_specifier_seq (cp_parser* parser, bool is_declaration, bool is_trailing_return, cp_decl_specifier_seq *type_specifier_seq) { bool seen_type_specifier = false; cp_parser_flags flags = CP_PARSER_FLAGS_OPTIONAL; cp_token *start_token = NULL; /* Clear the TYPE_SPECIFIER_SEQ. */ clear_decl_specs (type_specifier_seq); /* In the context of a trailing return type, enum E { } is an elaborated-type-specifier followed by a function-body, not an enum-specifier. */ if (is_trailing_return) flags |= CP_PARSER_FLAGS_NO_TYPE_DEFINITIONS; /* Parse the type-specifiers and attributes. */ while (true) { tree type_specifier; bool is_cv_qualifier; /* Check for attributes first. */ if (cp_next_tokens_can_be_attribute_p (parser)) { type_specifier_seq->attributes = chainon (type_specifier_seq->attributes, cp_parser_attributes_opt (parser)); continue; } /* record the token of the beginning of the type specifier seq, for error reporting purposes*/ if (!start_token) start_token = cp_lexer_peek_token (parser->lexer); /* Look for the type-specifier. */ type_specifier = cp_parser_type_specifier (parser, flags, type_specifier_seq, /*is_declaration=*/false, NULL, &is_cv_qualifier); if (!type_specifier) { /* If the first type-specifier could not be found, this is not a type-specifier-seq at all. */ if (!seen_type_specifier) { /* Set in_declarator_p to avoid skipping to the semicolon. */ int in_decl = parser->in_declarator_p; parser->in_declarator_p = true; if (cp_parser_uncommitted_to_tentative_parse_p (parser) || !cp_parser_parse_and_diagnose_invalid_type_name (parser)) cp_parser_error (parser, "expected type-specifier"); parser->in_declarator_p = in_decl; type_specifier_seq->type = error_mark_node; return; } /* If subsequent type-specifiers could not be found, the type-specifier-seq is complete. */ break; } seen_type_specifier = true; /* The standard says that a condition can be: type-specifier-seq declarator = assignment-expression However, given: struct S {}; if (int S = ...) we should treat the "S" as a declarator, not as a type-specifier. The standard doesn't say that explicitly for type-specifier-seq, but it does say that for decl-specifier-seq in an ordinary declaration. Perhaps it would be clearer just to allow a decl-specifier-seq here, and then add a semantic restriction that if any decl-specifiers that are not type-specifiers appear, the program is invalid. */ if (is_declaration && !is_cv_qualifier) flags |= CP_PARSER_FLAGS_NO_USER_DEFINED_TYPES; } } /* Return whether the function currently being declared has an associated template parameter list. */ static bool function_being_declared_is_template_p (cp_parser* parser) { if (!current_template_parms || processing_template_parmlist) return false; if (parser->implicit_template_scope) return true; if (at_class_scope_p () && TYPE_BEING_DEFINED (current_class_type)) return parser->num_template_parameter_lists != 0; return ((int) parser->num_template_parameter_lists > template_class_depth (current_class_type)); } /* Parse a parameter-declaration-clause. parameter-declaration-clause: parameter-declaration-list [opt] ... [opt] parameter-declaration-list , ... Returns a representation for the parameter declarations. A return value of NULL indicates a parameter-declaration-clause consisting only of an ellipsis. */ static tree cp_parser_parameter_declaration_clause (cp_parser* parser) { tree parameters; cp_token *token; bool ellipsis_p; bool is_error; struct cleanup { cp_parser* parser; int auto_is_implicit_function_template_parm_p; ~cleanup() { parser->auto_is_implicit_function_template_parm_p = auto_is_implicit_function_template_parm_p; } } cleanup = { parser, parser->auto_is_implicit_function_template_parm_p }; (void) cleanup; if (!processing_specialization && !processing_template_parmlist && !processing_explicit_instantiation) if (!current_function_decl || (current_class_type && LAMBDA_TYPE_P (current_class_type))) parser->auto_is_implicit_function_template_parm_p = true; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Check for trivial parameter-declaration-clauses. */ if (token->type == CPP_ELLIPSIS) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); return NULL_TREE; } else if (token->type == CPP_CLOSE_PAREN) /* There are no parameters. */ { #ifndef NO_IMPLICIT_EXTERN_C if (in_system_header_at (input_location) && current_class_type == NULL && current_lang_name == lang_name_c) return NULL_TREE; else #endif return void_list_node; } /* Check for `(void)', too, which is a special case. */ else if (token->keyword == RID_VOID && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_CLOSE_PAREN)) { /* Consume the `void' token. */ cp_lexer_consume_token (parser->lexer); /* There are no parameters. */ return void_list_node; } /* Parse the parameter-declaration-list. */ parameters = cp_parser_parameter_declaration_list (parser, &is_error); /* If a parse error occurred while parsing the parameter-declaration-list, then the entire parameter-declaration-clause is erroneous. */ if (is_error) return NULL; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `,', the clause should terminate with an ellipsis. */ if (token->type == CPP_COMMA) { /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); /* Expect an ellipsis. */ ellipsis_p = (cp_parser_require (parser, CPP_ELLIPSIS, RT_ELLIPSIS) != NULL); } /* It might also be `...' if the optional trailing `,' was omitted. */ else if (token->type == CPP_ELLIPSIS) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); /* And remember that we saw it. */ ellipsis_p = true; } else ellipsis_p = false; /* Finish the parameter list. */ if (!ellipsis_p) parameters = chainon (parameters, void_list_node); return parameters; } /* Parse a parameter-declaration-list. parameter-declaration-list: parameter-declaration parameter-declaration-list , parameter-declaration Returns a representation of the parameter-declaration-list, as for cp_parser_parameter_declaration_clause. However, the `void_list_node' is never appended to the list. Upon return, *IS_ERROR will be true iff an error occurred. */ static tree cp_parser_parameter_declaration_list (cp_parser* parser, bool *is_error) { tree parameters = NULL_TREE; tree *tail = &parameters; bool saved_in_unbraced_linkage_specification_p; int index = 0; /* Assume all will go well. */ *is_error = false; /* The special considerations that apply to a function within an unbraced linkage specifications do not apply to the parameters to the function. */ saved_in_unbraced_linkage_specification_p = parser->in_unbraced_linkage_specification_p; parser->in_unbraced_linkage_specification_p = false; /* Look for more parameters. */ while (true) { cp_parameter_declarator *parameter; tree decl = error_mark_node; bool parenthesized_p = false; int template_parm_idx = (function_being_declared_is_template_p (parser)? TREE_VEC_LENGTH (INNERMOST_TEMPLATE_PARMS (current_template_parms)) : 0); /* Parse the parameter. */ parameter = cp_parser_parameter_declaration (parser, /*template_parm_p=*/false, &parenthesized_p); /* We don't know yet if the enclosing context is deprecated, so wait and warn in grokparms if appropriate. */ deprecated_state = DEPRECATED_SUPPRESS; if (parameter) { /* If a function parameter pack was specified and an implicit template parameter was introduced during cp_parser_parameter_declaration, change any implicit parameters introduced into packs. */ if (parser->implicit_template_parms && parameter->declarator && parameter->declarator->parameter_pack_p) { int latest_template_parm_idx = TREE_VEC_LENGTH (INNERMOST_TEMPLATE_PARMS (current_template_parms)); if (latest_template_parm_idx != template_parm_idx) parameter->decl_specifiers.type = convert_generic_types_to_packs (parameter->decl_specifiers.type, template_parm_idx, latest_template_parm_idx); } decl = grokdeclarator (parameter->declarator, &parameter->decl_specifiers, PARM, parameter->default_argument != NULL_TREE, &parameter->decl_specifiers.attributes); } deprecated_state = DEPRECATED_NORMAL; /* If a parse error occurred parsing the parameter declaration, then the entire parameter-declaration-list is erroneous. */ if (decl == error_mark_node) { *is_error = true; parameters = error_mark_node; break; } if (parameter->decl_specifiers.attributes) cplus_decl_attributes (&decl, parameter->decl_specifiers.attributes, 0); if (DECL_NAME (decl)) decl = pushdecl (decl); if (decl != error_mark_node) { retrofit_lang_decl (decl); DECL_PARM_INDEX (decl) = ++index; DECL_PARM_LEVEL (decl) = function_parm_depth (); } /* Add the new parameter to the list. */ *tail = build_tree_list (parameter->default_argument, decl); tail = &TREE_CHAIN (*tail); /* Peek at the next token. */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN) || cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS) /* These are for Objective-C++ */ || cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) /* The parameter-declaration-list is complete. */ break; else if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) { cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); /* If it's an ellipsis, then the list is complete. */ if (token->type == CPP_ELLIPSIS) break; /* Otherwise, there must be more parameters. Consume the `,'. */ cp_lexer_consume_token (parser->lexer); /* When parsing something like: int i(float f, double d) we can tell after seeing the declaration for "f" that we are not looking at an initialization of a variable "i", but rather at the declaration of a function "i". Due to the fact that the parsing of template arguments (as specified to a template-id) requires backtracking we cannot use this technique when inside a template argument list. */ if (!parser->in_template_argument_list_p && !parser->in_type_id_in_expr_p && cp_parser_uncommitted_to_tentative_parse_p (parser) /* However, a parameter-declaration of the form "float(f)" (which is a valid declaration of a parameter "f") can also be interpreted as an expression (the conversion of "f" to "float"). */ && !parenthesized_p) cp_parser_commit_to_tentative_parse (parser); } else { cp_parser_error (parser, "expected %<,%> or %<...%>"); if (!cp_parser_uncommitted_to_tentative_parse_p (parser)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/false); break; } } parser->in_unbraced_linkage_specification_p = saved_in_unbraced_linkage_specification_p; /* Reset implicit_template_scope if we are about to leave the function parameter list that introduced it. Note that for out-of-line member definitions, there will be one or more class scopes before we get to the template parameter scope. */ if (cp_binding_level *its = parser->implicit_template_scope) if (cp_binding_level *maybe_its = current_binding_level->level_chain) { while (maybe_its->kind == sk_class) maybe_its = maybe_its->level_chain; if (maybe_its == its) { parser->implicit_template_parms = 0; parser->implicit_template_scope = 0; } } return parameters; } /* Parse a parameter declaration. parameter-declaration: decl-specifier-seq ... [opt] declarator decl-specifier-seq declarator = assignment-expression decl-specifier-seq ... [opt] abstract-declarator [opt] decl-specifier-seq abstract-declarator [opt] = assignment-expression If TEMPLATE_PARM_P is TRUE, then this parameter-declaration declares a template parameter. (In that case, a non-nested `>' token encountered during the parsing of the assignment-expression is not interpreted as a greater-than operator.) Returns a representation of the parameter, or NULL if an error occurs. If PARENTHESIZED_P is non-NULL, *PARENTHESIZED_P is set to true iff the declarator is of the form "(p)". */ static cp_parameter_declarator * cp_parser_parameter_declaration (cp_parser *parser, bool template_parm_p, bool *parenthesized_p) { int declares_class_or_enum; cp_decl_specifier_seq decl_specifiers; cp_declarator *declarator; tree default_argument; cp_token *token = NULL, *declarator_token_start = NULL; const char *saved_message; /* In a template parameter, `>' is not an operator. [temp.param] When parsing a default template-argument for a non-type template-parameter, the first non-nested `>' is taken as the end of the template parameter-list rather than a greater-than operator. */ /* Type definitions may not appear in parameter types. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in parameter types"); /* Parse the declaration-specifiers. */ cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_NONE, &decl_specifiers, &declares_class_or_enum); /* Complain about missing 'typename' or other invalid type names. */ if (!decl_specifiers.any_type_specifiers_p && cp_parser_parse_and_diagnose_invalid_type_name (parser)) decl_specifiers.type = error_mark_node; /* If an error occurred, there's no reason to attempt to parse the rest of the declaration. */ if (cp_parser_error_occurred (parser)) { parser->type_definition_forbidden_message = saved_message; return NULL; } /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token is a `)', `,', `=', `>', or `...', then there is no declarator. However, when variadic templates are enabled, there may be a declarator following `...'. */ if (token->type == CPP_CLOSE_PAREN || token->type == CPP_COMMA || token->type == CPP_EQ || token->type == CPP_GREATER) { declarator = NULL; if (parenthesized_p) *parenthesized_p = false; } /* Otherwise, there should be a declarator. */ else { bool saved_default_arg_ok_p = parser->default_arg_ok_p; parser->default_arg_ok_p = false; /* After seeing a decl-specifier-seq, if the next token is not a "(", there is no possibility that the code is a valid expression. Therefore, if parsing tentatively, we commit at this point. */ if (!parser->in_template_argument_list_p /* In an expression context, having seen: (int((char ... we cannot be sure whether we are looking at a function-type (taking a "char" as a parameter) or a cast of some object of type "char" to "int". */ && !parser->in_type_id_in_expr_p && cp_parser_uncommitted_to_tentative_parse_p (parser) && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE) && cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_PAREN)) cp_parser_commit_to_tentative_parse (parser); /* Parse the declarator. */ declarator_token_start = token; declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_EITHER, /*ctor_dtor_or_conv_p=*/NULL, parenthesized_p, /*member_p=*/false, /*friend_p=*/false); parser->default_arg_ok_p = saved_default_arg_ok_p; /* After the declarator, allow more attributes. */ decl_specifiers.attributes = chainon (decl_specifiers.attributes, cp_parser_attributes_opt (parser)); } /* If the next token is an ellipsis, and we have not seen a declarator name, and the type of the declarator contains parameter packs but it is not a TYPE_PACK_EXPANSION, then we actually have a parameter pack expansion expression. Otherwise, leave the ellipsis for a C-style variadic function. */ token = cp_lexer_peek_token (parser->lexer); if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { tree type = decl_specifiers.type; if (type && DECL_P (type)) type = TREE_TYPE (type); if (type && TREE_CODE (type) != TYPE_PACK_EXPANSION && declarator_can_be_parameter_pack (declarator) && (!declarator || !declarator->parameter_pack_p) && uses_parameter_packs (type)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); maybe_warn_variadic_templates (); /* Build a pack expansion type */ if (declarator) declarator->parameter_pack_p = true; else decl_specifiers.type = make_pack_expansion (type); } } /* The restriction on defining new types applies only to the type of the parameter, not to the default argument. */ parser->type_definition_forbidden_message = saved_message; /* If the next token is `=', then process a default argument. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { token = cp_lexer_peek_token (parser->lexer); /* If we are defining a class, then the tokens that make up the default argument must be saved and processed later. */ if (!template_parm_p && at_class_scope_p () && TYPE_BEING_DEFINED (current_class_type) && !LAMBDA_TYPE_P (current_class_type)) default_argument = cp_parser_cache_defarg (parser, /*nsdmi=*/false); /* Outside of a class definition, we can just parse the assignment-expression. */ else default_argument = cp_parser_default_argument (parser, template_parm_p); if (!parser->default_arg_ok_p) { if (flag_permissive) warning (0, "deprecated use of default argument for parameter of non-function"); else { error_at (token->location, "default arguments are only " "permitted for function parameters"); default_argument = NULL_TREE; } } else if ((declarator && declarator->parameter_pack_p) || (decl_specifiers.type && PACK_EXPANSION_P (decl_specifiers.type))) { /* Find the name of the parameter pack. */ cp_declarator *id_declarator = declarator; while (id_declarator && id_declarator->kind != cdk_id) id_declarator = id_declarator->declarator; if (id_declarator && id_declarator->kind == cdk_id) error_at (declarator_token_start->location, template_parm_p ? G_("template parameter pack %qD " "cannot have a default argument") : G_("parameter pack %qD cannot have " "a default argument"), id_declarator->u.id.unqualified_name); else error_at (declarator_token_start->location, template_parm_p ? G_("template parameter pack cannot have " "a default argument") : G_("parameter pack cannot have a " "default argument")); default_argument = NULL_TREE; } } else default_argument = NULL_TREE; return make_parameter_declarator (&decl_specifiers, declarator, default_argument); } /* Parse a default argument and return it. TEMPLATE_PARM_P is true if this is a default argument for a non-type template parameter. */ static tree cp_parser_default_argument (cp_parser *parser, bool template_parm_p) { tree default_argument = NULL_TREE; bool saved_greater_than_is_operator_p; bool saved_local_variables_forbidden_p; bool non_constant_p, is_direct_init; /* Make sure that PARSER->GREATER_THAN_IS_OPERATOR_P is set correctly. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = !template_parm_p; /* Local variable names (and the `this' keyword) may not appear in a default argument. */ saved_local_variables_forbidden_p = parser->local_variables_forbidden_p; parser->local_variables_forbidden_p = true; /* Parse the assignment-expression. */ if (template_parm_p) push_deferring_access_checks (dk_no_deferred); tree saved_class_ptr = NULL_TREE; tree saved_class_ref = NULL_TREE; /* The "this" pointer is not valid in a default argument. */ if (cfun) { saved_class_ptr = current_class_ptr; cp_function_chain->x_current_class_ptr = NULL_TREE; saved_class_ref = current_class_ref; cp_function_chain->x_current_class_ref = NULL_TREE; } default_argument = cp_parser_initializer (parser, &is_direct_init, &non_constant_p); /* Restore the "this" pointer. */ if (cfun) { cp_function_chain->x_current_class_ptr = saved_class_ptr; cp_function_chain->x_current_class_ref = saved_class_ref; } if (BRACE_ENCLOSED_INITIALIZER_P (default_argument)) maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); if (template_parm_p) pop_deferring_access_checks (); parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; parser->local_variables_forbidden_p = saved_local_variables_forbidden_p; return default_argument; } /* Parse a function-body. function-body: compound_statement */ static void cp_parser_function_body (cp_parser *parser, bool in_function_try_block) { cp_parser_compound_statement (parser, NULL, in_function_try_block, true); } /* Parse a ctor-initializer-opt followed by a function-body. Return true if a ctor-initializer was present. When IN_FUNCTION_TRY_BLOCK is true we are parsing a function-try-block. */ static bool cp_parser_ctor_initializer_opt_and_function_body (cp_parser *parser, bool in_function_try_block) { tree body, list; bool ctor_initializer_p; const bool check_body_p = DECL_CONSTRUCTOR_P (current_function_decl) && DECL_DECLARED_CONSTEXPR_P (current_function_decl); tree last = NULL; /* Begin the function body. */ body = begin_function_body (); /* Parse the optional ctor-initializer. */ ctor_initializer_p = cp_parser_ctor_initializer_opt (parser); /* If we're parsing a constexpr constructor definition, we need to check that the constructor body is indeed empty. However, before we get to cp_parser_function_body lot of junk has been generated, so we can't just check that we have an empty block. Rather we take a snapshot of the outermost block, and check whether cp_parser_function_body changed its state. */ if (check_body_p) { list = cur_stmt_list; if (STATEMENT_LIST_TAIL (list)) last = STATEMENT_LIST_TAIL (list)->stmt; } /* Parse the function-body. */ cp_parser_function_body (parser, in_function_try_block); if (check_body_p) check_constexpr_ctor_body (last, list, /*complain=*/true); /* Finish the function body. */ finish_function_body (body); return ctor_initializer_p; } /* Parse an initializer. initializer: = initializer-clause ( expression-list ) Returns an expression representing the initializer. If no initializer is present, NULL_TREE is returned. *IS_DIRECT_INIT is set to FALSE if the `= initializer-clause' production is used, and TRUE otherwise. *IS_DIRECT_INIT is set to TRUE if there is no initializer present. If there is an initializer, and it is not a constant-expression, *NON_CONSTANT_P is set to true; otherwise it is set to false. */ static tree cp_parser_initializer (cp_parser* parser, bool* is_direct_init, bool* non_constant_p) { cp_token *token; tree init; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Let our caller know whether or not this initializer was parenthesized. */ *is_direct_init = (token->type != CPP_EQ); /* Assume that the initializer is constant. */ *non_constant_p = false; if (token->type == CPP_EQ) { /* Consume the `='. */ cp_lexer_consume_token (parser->lexer); /* Parse the initializer-clause. */ init = cp_parser_initializer_clause (parser, non_constant_p); } else if (token->type == CPP_OPEN_PAREN) { vec<tree, va_gc> *vec; vec = cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, non_constant_p); if (vec == NULL) return error_mark_node; init = build_tree_list_vec (vec); release_tree_vector (vec); } else if (token->type == CPP_OPEN_BRACE) { cp_lexer_set_source_position (parser->lexer); maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); init = cp_parser_braced_list (parser, non_constant_p); CONSTRUCTOR_IS_DIRECT_INIT (init) = 1; } else { /* Anything else is an error. */ cp_parser_error (parser, "expected initializer"); init = error_mark_node; } return init; } /* Parse an initializer-clause. initializer-clause: assignment-expression braced-init-list Returns an expression representing the initializer. If the `assignment-expression' production is used the value returned is simply a representation for the expression. Otherwise, calls cp_parser_braced_list. */ static tree cp_parser_initializer_clause (cp_parser* parser, bool* non_constant_p) { tree initializer; /* Assume the expression is constant. */ *non_constant_p = false; /* If it is not a `{', then we are looking at an assignment-expression. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)) { initializer = cp_parser_constant_expression (parser, /*allow_non_constant_p=*/true, non_constant_p); } else initializer = cp_parser_braced_list (parser, non_constant_p); return initializer; } /* Parse a brace-enclosed initializer list. braced-init-list: { initializer-list , [opt] } { } Returns a CONSTRUCTOR. The CONSTRUCTOR_ELTS will be the elements of the initializer-list (or NULL, if the last production is used). The TREE_TYPE for the CONSTRUCTOR will be NULL_TREE. There is no way to detect whether or not the optional trailing `,' was provided. NON_CONSTANT_P is as for cp_parser_initializer. */ static tree cp_parser_braced_list (cp_parser* parser, bool* non_constant_p) { tree initializer; /* Consume the `{' token. */ cp_lexer_consume_token (parser->lexer); /* Create a CONSTRUCTOR to represent the braced-initializer. */ initializer = make_node (CONSTRUCTOR); /* If it's not a `}', then there is a non-trivial initializer. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_BRACE)) { /* Parse the initializer list. */ CONSTRUCTOR_ELTS (initializer) = cp_parser_initializer_list (parser, non_constant_p); /* A trailing `,' token is allowed. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); } else *non_constant_p = false; /* Now, there should be a trailing `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); TREE_TYPE (initializer) = init_list_type_node; return initializer; } /* Consume tokens up to, and including, the next non-nested closing `]'. Returns true iff we found a closing `]'. */ static bool cp_parser_skip_to_closing_square_bracket (cp_parser *parser) { unsigned square_depth = 0; while (true) { cp_token * token = cp_lexer_peek_token (parser->lexer); switch (token->type) { case CPP_EOF: case CPP_PRAGMA_EOL: /* If we've run out of tokens, then there is no closing `]'. */ return false; case CPP_OPEN_SQUARE: ++square_depth; break; case CPP_CLOSE_SQUARE: if (!square_depth--) { cp_lexer_consume_token (parser->lexer); return true; } break; default: break; } /* Consume the token. */ cp_lexer_consume_token (parser->lexer); } } /* Return true if we are looking at an array-designator, false otherwise. */ static bool cp_parser_array_designator_p (cp_parser *parser) { /* Consume the `['. */ cp_lexer_consume_token (parser->lexer); cp_lexer_save_tokens (parser->lexer); /* Skip tokens until the next token is a closing square bracket. If we find the closing `]', and the next token is a `=', then we are looking at an array designator. */ bool array_designator_p = (cp_parser_skip_to_closing_square_bracket (parser) && cp_lexer_next_token_is (parser->lexer, CPP_EQ)); /* Roll back the tokens we skipped. */ cp_lexer_rollback_tokens (parser->lexer); return array_designator_p; } /* Parse an initializer-list. initializer-list: initializer-clause ... [opt] initializer-list , initializer-clause ... [opt] GNU Extension: initializer-list: designation initializer-clause ...[opt] initializer-list , designation initializer-clause ...[opt] designation: . identifier = identifier : [ constant-expression ] = Returns a vec of constructor_elt. The VALUE of each elt is an expression for the initializer. If the INDEX of the elt is non-NULL, it is the IDENTIFIER_NODE naming the field to initialize. NON_CONSTANT_P is as for cp_parser_initializer. */ static vec<constructor_elt, va_gc> * cp_parser_initializer_list (cp_parser* parser, bool* non_constant_p) { vec<constructor_elt, va_gc> *v = NULL; /* Assume all of the expressions are constant. */ *non_constant_p = false; /* Parse the rest of the list. */ while (true) { cp_token *token; tree designator; tree initializer; bool clause_non_constant_p; /* If the next token is an identifier and the following one is a colon, we are looking at the GNU designated-initializer syntax. */ if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_NAME) && cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON) { /* Warn the user that they are using an extension. */ pedwarn (input_location, OPT_Wpedantic, "ISO C++ does not allow designated initializers"); /* Consume the identifier. */ designator = cp_lexer_consume_token (parser->lexer)->u.value; /* Consume the `:'. */ cp_lexer_consume_token (parser->lexer); } /* Also handle the C99 syntax, '. id ='. */ else if (cp_parser_allow_gnu_extensions_p (parser) && cp_lexer_next_token_is (parser->lexer, CPP_DOT) && cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_NAME && cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_EQ) { /* Warn the user that they are using an extension. */ pedwarn (input_location, OPT_Wpedantic, "ISO C++ does not allow C99 designated initializers"); /* Consume the `.'. */ cp_lexer_consume_token (parser->lexer); /* Consume the identifier. */ designator = cp_lexer_consume_token (parser->lexer)->u.value; /* Consume the `='. */ cp_lexer_consume_token (parser->lexer); } /* Also handle C99 array designators, '[ const ] ='. */ else if (cp_parser_allow_gnu_extensions_p (parser) && !c_dialect_objc () && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) { /* In C++11, [ could start a lambda-introducer. */ bool non_const = false; cp_parser_parse_tentatively (parser); if (!cp_parser_array_designator_p (parser)) { cp_parser_simulate_error (parser); designator = NULL_TREE; } else { designator = cp_parser_constant_expression (parser, true, &non_const); cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); cp_parser_require (parser, CPP_EQ, RT_EQ); } if (!cp_parser_parse_definitely (parser)) designator = NULL_TREE; else if (non_const) require_potential_rvalue_constant_expression (designator); } else designator = NULL_TREE; /* Parse the initializer. */ initializer = cp_parser_initializer_clause (parser, &clause_non_constant_p); /* If any clause is non-constant, so is the entire initializer. */ if (clause_non_constant_p) *non_constant_p = true; /* If we have an ellipsis, this is an initializer pack expansion. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); /* Turn the initializer into an initializer expansion. */ initializer = make_pack_expansion (initializer); } /* Add it to the vector. */ CONSTRUCTOR_APPEND_ELT (v, designator, initializer); /* If the next token is not a comma, we have reached the end of the list. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Peek at the next token. */ token = cp_lexer_peek_nth_token (parser->lexer, 2); /* If the next token is a `}', then we're still done. An initializer-clause can have a trailing `,' after the initializer-list and before the closing `}'. */ if (token->type == CPP_CLOSE_BRACE) break; /* Consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } return v; } /* Classes [gram.class] */ /* Parse a class-name. class-name: identifier template-id TYPENAME_KEYWORD_P is true iff the `typename' keyword has been used to indicate that names looked up in dependent types should be assumed to be types. TEMPLATE_KEYWORD_P is true iff the `template' keyword has been used to indicate that the name that appears next is a template. TAG_TYPE indicates the explicit tag given before the type name, if any. If CHECK_DEPENDENCY_P is FALSE, names are looked up in dependent scopes. If CLASS_HEAD_P is TRUE, this class is the class being defined in a class-head. Returns the TYPE_DECL representing the class. */ static tree cp_parser_class_name (cp_parser *parser, bool typename_keyword_p, bool template_keyword_p, enum tag_types tag_type, bool check_dependency_p, bool class_head_p, bool is_declaration) { tree decl; tree scope; bool typename_p; cp_token *token; tree identifier = NULL_TREE; /* All class-names start with an identifier. */ token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_NAME && token->type != CPP_TEMPLATE_ID) { cp_parser_error (parser, "expected class-name"); return error_mark_node; } /* PARSER->SCOPE can be cleared when parsing the template-arguments to a template-id, so we save it here. */ scope = parser->scope; if (scope == error_mark_node) return error_mark_node; /* Any name names a type if we're following the `typename' keyword in a qualified name where the enclosing scope is type-dependent. */ typename_p = (typename_keyword_p && scope && TYPE_P (scope) && dependent_type_p (scope)); /* Handle the common case (an identifier, but not a template-id) efficiently. */ if (token->type == CPP_NAME && !cp_parser_nth_token_starts_template_argument_list_p (parser, 2)) { cp_token *identifier_token; bool ambiguous_p; /* Look for the identifier. */ identifier_token = cp_lexer_peek_token (parser->lexer); ambiguous_p = identifier_token->error_reported; identifier = cp_parser_identifier (parser); /* If the next token isn't an identifier, we are certainly not looking at a class-name. */ if (identifier == error_mark_node) decl = error_mark_node; /* If we know this is a type-name, there's no need to look it up. */ else if (typename_p) decl = identifier; else { tree ambiguous_decls; /* If we already know that this lookup is ambiguous, then we've already issued an error message; there's no reason to check again. */ if (ambiguous_p) { cp_parser_simulate_error (parser); return error_mark_node; } /* If the next token is a `::', then the name must be a type name. [basic.lookup.qual] During the lookup for a name preceding the :: scope resolution operator, object, function, and enumerator names are ignored. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) tag_type = typename_type; /* Look up the name. */ decl = cp_parser_lookup_name (parser, identifier, tag_type, /*is_template=*/false, /*is_namespace=*/false, check_dependency_p, &ambiguous_decls, identifier_token->location); if (ambiguous_decls) { if (cp_parser_parsing_tentatively (parser)) cp_parser_simulate_error (parser); return error_mark_node; } } } else { /* Try a template-id. */ decl = cp_parser_template_id (parser, template_keyword_p, check_dependency_p, tag_type, is_declaration); if (decl == error_mark_node) return error_mark_node; } decl = cp_parser_maybe_treat_template_as_class (decl, class_head_p); /* If this is a typename, create a TYPENAME_TYPE. */ if (typename_p && decl != error_mark_node) { decl = make_typename_type (scope, decl, typename_type, /*complain=*/tf_error); if (decl != error_mark_node) decl = TYPE_NAME (decl); } decl = strip_using_decl (decl); /* Check to see that it is really the name of a class. */ if (TREE_CODE (decl) == TEMPLATE_ID_EXPR && identifier_p (TREE_OPERAND (decl, 0)) && cp_lexer_next_token_is (parser->lexer, CPP_SCOPE)) /* Situations like this: template <typename T> struct A { typename T::template X<int>::I i; }; are problematic. Is `T::template X<int>' a class-name? The standard does not seem to be definitive, but there is no other valid interpretation of the following `::'. Therefore, those names are considered class-names. */ { decl = make_typename_type (scope, decl, tag_type, tf_error); if (decl != error_mark_node) decl = TYPE_NAME (decl); } else if (TREE_CODE (decl) != TYPE_DECL || TREE_TYPE (decl) == error_mark_node || !MAYBE_CLASS_TYPE_P (TREE_TYPE (decl)) /* In Objective-C 2.0, a classname followed by '.' starts a dot-syntax expression, and it's not a type-name. */ || (c_dialect_objc () && cp_lexer_peek_token (parser->lexer)->type == CPP_DOT && objc_is_class_name (decl))) decl = error_mark_node; if (decl == error_mark_node) cp_parser_error (parser, "expected class-name"); else if (identifier && !parser->scope) maybe_note_name_used_in_class (identifier, decl); return decl; } /* Parse a class-specifier. class-specifier: class-head { member-specification [opt] } Returns the TREE_TYPE representing the class. */ static tree cp_parser_class_specifier_1 (cp_parser* parser) { tree type; tree attributes = NULL_TREE; bool nested_name_specifier_p; unsigned saved_num_template_parameter_lists; bool saved_in_function_body; unsigned char in_statement; bool in_switch_statement_p; bool saved_in_unbraced_linkage_specification_p; tree old_scope = NULL_TREE; tree scope = NULL_TREE; cp_token *closing_brace; push_deferring_access_checks (dk_no_deferred); /* Parse the class-head. */ type = cp_parser_class_head (parser, &nested_name_specifier_p); /* If the class-head was a semantic disaster, skip the entire body of the class. */ if (!type) { cp_parser_skip_to_end_of_block_or_statement (parser); pop_deferring_access_checks (); return error_mark_node; } /* Look for the `{'. */ if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE)) { pop_deferring_access_checks (); return error_mark_node; } cp_ensure_no_omp_declare_simd (parser); /* Issue an error message if type-definitions are forbidden here. */ cp_parser_check_type_definition (parser); /* Remember that we are defining one more class. */ ++parser->num_classes_being_defined; /* Inside the class, surrounding template-parameter-lists do not apply. */ saved_num_template_parameter_lists = parser->num_template_parameter_lists; parser->num_template_parameter_lists = 0; /* We are not in a function body. */ saved_in_function_body = parser->in_function_body; parser->in_function_body = false; /* Or in a loop. */ in_statement = parser->in_statement; parser->in_statement = 0; /* Or in a switch. */ in_switch_statement_p = parser->in_switch_statement_p; parser->in_switch_statement_p = false; /* We are not immediately inside an extern "lang" block. */ saved_in_unbraced_linkage_specification_p = parser->in_unbraced_linkage_specification_p; parser->in_unbraced_linkage_specification_p = false; /* Start the class. */ if (nested_name_specifier_p) { scope = CP_DECL_CONTEXT (TYPE_MAIN_DECL (type)); old_scope = push_inner_scope (scope); } type = begin_class_definition (type); if (type == error_mark_node) /* If the type is erroneous, skip the entire body of the class. */ cp_parser_skip_to_closing_brace (parser); else /* Parse the member-specification. */ cp_parser_member_specification_opt (parser); /* Look for the trailing `}'. */ closing_brace = cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); /* Look for trailing attributes to apply to this class. */ if (cp_parser_allow_gnu_extensions_p (parser)) attributes = cp_parser_gnu_attributes_opt (parser); if (type != error_mark_node) type = finish_struct (type, attributes); if (nested_name_specifier_p) pop_inner_scope (old_scope, scope); /* We've finished a type definition. Check for the common syntax error of forgetting a semicolon after the definition. We need to be careful, as we can't just check for not-a-semicolon and be done with it; the user might have typed: class X { } c = ...; class X { } *p = ...; and so forth. Instead, enumerate all the possible tokens that might follow this production; if we don't see one of them, then complain and silently insert the semicolon. */ { cp_token *token = cp_lexer_peek_token (parser->lexer); bool want_semicolon = true; if (cp_next_tokens_can_be_std_attribute_p (parser)) /* Don't try to parse c++11 attributes here. As per the grammar, that should be a task for cp_parser_decl_specifier_seq. */ want_semicolon = false; switch (token->type) { case CPP_NAME: case CPP_SEMICOLON: case CPP_MULT: case CPP_AND: case CPP_OPEN_PAREN: case CPP_CLOSE_PAREN: case CPP_COMMA: want_semicolon = false; break; /* While it's legal for type qualifiers and storage class specifiers to follow type definitions in the grammar, only compiler testsuites contain code like that. Assume that if we see such code, then what we're really seeing is a case like: class X { } const <type> var = ...; or class Y { } static <type> func (...) ... i.e. the qualifier or specifier applies to the next declaration. To do so, however, we need to look ahead one more token to see if *that* token is a type specifier. This code could be improved to handle: class Z { } static const <type> var = ...; */ case CPP_KEYWORD: if (keyword_is_decl_specifier (token->keyword)) { cp_token *lookahead = cp_lexer_peek_nth_token (parser->lexer, 2); /* Handling user-defined types here would be nice, but very tricky. */ want_semicolon = (lookahead->type == CPP_KEYWORD && keyword_begins_type_specifier (lookahead->keyword)); } break; default: break; } /* If we don't have a type, then something is very wrong and we shouldn't try to do anything clever. Likewise for not seeing the closing brace. */ if (closing_brace && TYPE_P (type) && want_semicolon) { cp_token_position prev = cp_lexer_previous_token_position (parser->lexer); cp_token *prev_token = cp_lexer_token_at (parser->lexer, prev); location_t loc = prev_token->location; if (CLASSTYPE_DECLARED_CLASS (type)) error_at (loc, "expected %<;%> after class definition"); else if (TREE_CODE (type) == RECORD_TYPE) error_at (loc, "expected %<;%> after struct definition"); else if (TREE_CODE (type) == UNION_TYPE) error_at (loc, "expected %<;%> after union definition"); else gcc_unreachable (); /* Unget one token and smash it to look as though we encountered a semicolon in the input stream. */ cp_lexer_set_token_position (parser->lexer, prev); token = cp_lexer_peek_token (parser->lexer); token->type = CPP_SEMICOLON; token->keyword = RID_MAX; } } /* If this class is not itself within the scope of another class, then we need to parse the bodies of all of the queued function definitions. Note that the queued functions defined in a class are not always processed immediately following the class-specifier for that class. Consider: struct A { struct B { void f() { sizeof (A); } }; }; If `f' were processed before the processing of `A' were completed, there would be no way to compute the size of `A'. Note that the nesting we are interested in here is lexical -- not the semantic nesting given by TYPE_CONTEXT. In particular, for: struct A { struct B; }; struct A::B { void f() { } }; there is no need to delay the parsing of `A::B::f'. */ if (--parser->num_classes_being_defined == 0) { tree decl; tree class_type = NULL_TREE; tree pushed_scope = NULL_TREE; unsigned ix; cp_default_arg_entry *e; tree save_ccp, save_ccr; /* In a first pass, parse default arguments to the functions. Then, in a second pass, parse the bodies of the functions. This two-phased approach handles cases like: struct S { void f() { g(); } void g(int i = 3); }; */ FOR_EACH_VEC_SAFE_ELT (unparsed_funs_with_default_args, ix, e) { decl = e->decl; /* If there are default arguments that have not yet been processed, take care of them now. */ if (class_type != e->class_type) { if (pushed_scope) pop_scope (pushed_scope); class_type = e->class_type; pushed_scope = push_scope (class_type); } /* Make sure that any template parameters are in scope. */ maybe_begin_member_template_processing (decl); /* Parse the default argument expressions. */ cp_parser_late_parsing_default_args (parser, decl); /* Remove any template parameters from the symbol table. */ maybe_end_member_template_processing (); } vec_safe_truncate (unparsed_funs_with_default_args, 0); /* Now parse any NSDMIs. */ save_ccp = current_class_ptr; save_ccr = current_class_ref; FOR_EACH_VEC_SAFE_ELT (unparsed_nsdmis, ix, decl) { if (class_type != DECL_CONTEXT (decl)) { if (pushed_scope) pop_scope (pushed_scope); class_type = DECL_CONTEXT (decl); pushed_scope = push_scope (class_type); } inject_this_parameter (class_type, TYPE_UNQUALIFIED); cp_parser_late_parsing_nsdmi (parser, decl); } vec_safe_truncate (unparsed_nsdmis, 0); current_class_ptr = save_ccp; current_class_ref = save_ccr; if (pushed_scope) pop_scope (pushed_scope); /* Now do some post-NSDMI bookkeeping. */ FOR_EACH_VEC_SAFE_ELT (unparsed_classes, ix, class_type) after_nsdmi_defaulted_late_checks (class_type); vec_safe_truncate (unparsed_classes, 0); after_nsdmi_defaulted_late_checks (type); /* Now parse the body of the functions. */ if (flag_openmp) { /* OpenMP UDRs need to be parsed before all other functions. */ FOR_EACH_VEC_SAFE_ELT (unparsed_funs_with_definitions, ix, decl) if (DECL_OMP_DECLARE_REDUCTION_P (decl)) cp_parser_late_parsing_for_member (parser, decl); FOR_EACH_VEC_SAFE_ELT (unparsed_funs_with_definitions, ix, decl) if (!DECL_OMP_DECLARE_REDUCTION_P (decl)) cp_parser_late_parsing_for_member (parser, decl); } else FOR_EACH_VEC_SAFE_ELT (unparsed_funs_with_definitions, ix, decl) cp_parser_late_parsing_for_member (parser, decl); vec_safe_truncate (unparsed_funs_with_definitions, 0); } else vec_safe_push (unparsed_classes, type); /* Put back any saved access checks. */ pop_deferring_access_checks (); /* Restore saved state. */ parser->in_switch_statement_p = in_switch_statement_p; parser->in_statement = in_statement; parser->in_function_body = saved_in_function_body; parser->num_template_parameter_lists = saved_num_template_parameter_lists; parser->in_unbraced_linkage_specification_p = saved_in_unbraced_linkage_specification_p; return type; } static tree cp_parser_class_specifier (cp_parser* parser) { tree ret; timevar_push (TV_PARSE_STRUCT); ret = cp_parser_class_specifier_1 (parser); timevar_pop (TV_PARSE_STRUCT); return ret; } /* Parse a class-head. class-head: class-key identifier [opt] base-clause [opt] class-key nested-name-specifier identifier class-virt-specifier [opt] base-clause [opt] class-key nested-name-specifier [opt] template-id base-clause [opt] class-virt-specifier: final GNU Extensions: class-key attributes identifier [opt] base-clause [opt] class-key attributes nested-name-specifier identifier base-clause [opt] class-key attributes nested-name-specifier [opt] template-id base-clause [opt] Upon return BASES is initialized to the list of base classes (or NULL, if there are none) in the same form returned by cp_parser_base_clause. Returns the TYPE of the indicated class. Sets *NESTED_NAME_SPECIFIER_P to TRUE iff one of the productions involving a nested-name-specifier was used, and FALSE otherwise. Returns error_mark_node if this is not a class-head. Returns NULL_TREE if the class-head is syntactically valid, but semantically invalid in a way that means we should skip the entire body of the class. */ static tree cp_parser_class_head (cp_parser* parser, bool* nested_name_specifier_p) { tree nested_name_specifier; enum tag_types class_key; tree id = NULL_TREE; tree type = NULL_TREE; tree attributes; tree bases; cp_virt_specifiers virt_specifiers = VIRT_SPEC_UNSPECIFIED; bool template_id_p = false; bool qualified_p = false; bool invalid_nested_name_p = false; bool invalid_explicit_specialization_p = false; bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; tree pushed_scope = NULL_TREE; unsigned num_templates; cp_token *type_start_token = NULL, *nested_name_specifier_token_start = NULL; /* Assume no nested-name-specifier will be present. */ *nested_name_specifier_p = false; /* Assume no template parameter lists will be used in defining the type. */ num_templates = 0; parser->colon_corrects_to_scope_p = false; /* Look for the class-key. */ class_key = cp_parser_class_key (parser); if (class_key == none_type) return error_mark_node; /* Parse the attributes. */ attributes = cp_parser_attributes_opt (parser); /* If the next token is `::', that is invalid -- but sometimes people do try to write: struct ::S {}; Handle this gracefully by accepting the extra qualifier, and then issuing an error about it later if this really is a class-head. If it turns out just to be an elaborated type specifier, remain silent. */ if (cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false)) qualified_p = true; push_deferring_access_checks (dk_no_check); /* Determine the name of the class. Begin by looking for an optional nested-name-specifier. */ nested_name_specifier_token_start = cp_lexer_peek_token (parser->lexer); nested_name_specifier = cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/false, /*type_p=*/true, /*is_declaration=*/false); /* If there was a nested-name-specifier, then there *must* be an identifier. */ if (nested_name_specifier) { type_start_token = cp_lexer_peek_token (parser->lexer); /* Although the grammar says `identifier', it really means `class-name' or `template-name'. You are only allowed to define a class that has already been declared with this syntax. The proposed resolution for Core Issue 180 says that wherever you see `class T::X' you should treat `X' as a type-name. It is OK to define an inaccessible class; for example: class A { class B; }; class A::B {}; We do not know if we will see a class-name, or a template-name. We look for a class-name first, in case the class-name is a template-id; if we looked for the template-name first we would stop after the template-name. */ cp_parser_parse_tentatively (parser); type = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, class_type, /*check_dependency_p=*/false, /*class_head_p=*/true, /*is_declaration=*/false); /* If that didn't work, ignore the nested-name-specifier. */ if (!cp_parser_parse_definitely (parser)) { invalid_nested_name_p = true; type_start_token = cp_lexer_peek_token (parser->lexer); id = cp_parser_identifier (parser); if (id == error_mark_node) id = NULL_TREE; } /* If we could not find a corresponding TYPE, treat this declaration like an unqualified declaration. */ if (type == error_mark_node) nested_name_specifier = NULL_TREE; /* Otherwise, count the number of templates used in TYPE and its containing scopes. */ else { tree scope; for (scope = TREE_TYPE (type); scope && TREE_CODE (scope) != NAMESPACE_DECL; scope = get_containing_scope (scope)) if (TYPE_P (scope) && CLASS_TYPE_P (scope) && CLASSTYPE_TEMPLATE_INFO (scope) && PRIMARY_TEMPLATE_P (CLASSTYPE_TI_TEMPLATE (scope)) && (!CLASSTYPE_TEMPLATE_SPECIALIZATION (scope) || uses_template_parms (CLASSTYPE_TI_ARGS (scope)))) ++num_templates; } } /* Otherwise, the identifier is optional. */ else { /* We don't know whether what comes next is a template-id, an identifier, or nothing at all. */ cp_parser_parse_tentatively (parser); /* Check for a template-id. */ type_start_token = cp_lexer_peek_token (parser->lexer); id = cp_parser_template_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/true, class_key, /*is_declaration=*/true); /* If that didn't work, it could still be an identifier. */ if (!cp_parser_parse_definitely (parser)) { if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { type_start_token = cp_lexer_peek_token (parser->lexer); id = cp_parser_identifier (parser); } else id = NULL_TREE; } else { template_id_p = true; ++num_templates; } } pop_deferring_access_checks (); if (id) { cp_parser_check_for_invalid_template_id (parser, id, class_key, type_start_token->location); } virt_specifiers = cp_parser_virt_specifier_seq_opt (parser); /* If it's not a `:' or a `{' then we can't really be looking at a class-head, since a class-head only appears as part of a class-specifier. We have to detect this situation before calling xref_tag, since that has irreversible side-effects. */ if (!cp_parser_next_token_starts_class_definition_p (parser)) { cp_parser_error (parser, "expected %<{%> or %<:%>"); type = error_mark_node; goto out; } /* At this point, we're going ahead with the class-specifier, even if some other problem occurs. */ cp_parser_commit_to_tentative_parse (parser); if (virt_specifiers & VIRT_SPEC_OVERRIDE) { cp_parser_error (parser, "cannot specify %<override%> for a class"); type = error_mark_node; goto out; } /* Issue the error about the overly-qualified name now. */ if (qualified_p) { cp_parser_error (parser, "global qualification of class name is invalid"); type = error_mark_node; goto out; } else if (invalid_nested_name_p) { cp_parser_error (parser, "qualified name does not name a class"); type = error_mark_node; goto out; } else if (nested_name_specifier) { tree scope; /* Reject typedef-names in class heads. */ if (!DECL_IMPLICIT_TYPEDEF_P (type)) { error_at (type_start_token->location, "invalid class name in declaration of %qD", type); type = NULL_TREE; goto done; } /* Figure out in what scope the declaration is being placed. */ scope = current_scope (); /* If that scope does not contain the scope in which the class was originally declared, the program is invalid. */ if (scope && !is_ancestor (scope, nested_name_specifier)) { if (at_namespace_scope_p ()) error_at (type_start_token->location, "declaration of %qD in namespace %qD which does not " "enclose %qD", type, scope, nested_name_specifier); else error_at (type_start_token->location, "declaration of %qD in %qD which does not enclose %qD", type, scope, nested_name_specifier); type = NULL_TREE; goto done; } /* [dcl.meaning] A declarator-id shall not be qualified except for the definition of a ... nested class outside of its class ... [or] the definition or explicit instantiation of a class member of a namespace outside of its namespace. */ if (scope == nested_name_specifier) { permerror (nested_name_specifier_token_start->location, "extra qualification not allowed"); nested_name_specifier = NULL_TREE; num_templates = 0; } } /* An explicit-specialization must be preceded by "template <>". If it is not, try to recover gracefully. */ if (at_namespace_scope_p () && parser->num_template_parameter_lists == 0 && template_id_p) { error_at (type_start_token->location, "an explicit specialization must be preceded by %<template <>%>"); invalid_explicit_specialization_p = true; /* Take the same action that would have been taken by cp_parser_explicit_specialization. */ ++parser->num_template_parameter_lists; begin_specialization (); } /* There must be no "return" statements between this point and the end of this function; set "type "to the correct return value and use "goto done;" to return. */ /* Make sure that the right number of template parameters were present. */ if (!cp_parser_check_template_parameters (parser, num_templates, type_start_token->location, /*declarator=*/NULL)) { /* If something went wrong, there is no point in even trying to process the class-definition. */ type = NULL_TREE; goto done; } /* Look up the type. */ if (template_id_p) { if (TREE_CODE (id) == TEMPLATE_ID_EXPR && (DECL_FUNCTION_TEMPLATE_P (TREE_OPERAND (id, 0)) || TREE_CODE (TREE_OPERAND (id, 0)) == OVERLOAD)) { error_at (type_start_token->location, "function template %qD redeclared as a class template", id); type = error_mark_node; } else { type = TREE_TYPE (id); type = maybe_process_partial_specialization (type); } if (nested_name_specifier) pushed_scope = push_scope (nested_name_specifier); } else if (nested_name_specifier) { tree class_type; /* Given: template <typename T> struct S { struct T }; template <typename T> struct S<T>::T { }; we will get a TYPENAME_TYPE when processing the definition of `S::T'. We need to resolve it to the actual type before we try to define it. */ if (TREE_CODE (TREE_TYPE (type)) == TYPENAME_TYPE) { class_type = resolve_typename_type (TREE_TYPE (type), /*only_current_p=*/false); if (TREE_CODE (class_type) != TYPENAME_TYPE) type = TYPE_NAME (class_type); else { cp_parser_error (parser, "could not resolve typename type"); type = error_mark_node; } } if (maybe_process_partial_specialization (TREE_TYPE (type)) == error_mark_node) { type = NULL_TREE; goto done; } class_type = current_class_type; /* Enter the scope indicated by the nested-name-specifier. */ pushed_scope = push_scope (nested_name_specifier); /* Get the canonical version of this type. */ type = TYPE_MAIN_DECL (TREE_TYPE (type)); /* Call push_template_decl if it seems like we should be defining a template either from the template headers or the type we're defining, so that we diagnose both extra and missing headers. */ if ((PROCESSING_REAL_TEMPLATE_DECL_P () || CLASSTYPE_TEMPLATE_INFO (TREE_TYPE (type))) && !CLASSTYPE_TEMPLATE_SPECIALIZATION (TREE_TYPE (type))) { type = push_template_decl (type); if (type == error_mark_node) { type = NULL_TREE; goto done; } } type = TREE_TYPE (type); *nested_name_specifier_p = true; } else /* The name is not a nested name. */ { /* If the class was unnamed, create a dummy name. */ if (!id) id = make_anon_name (); type = xref_tag (class_key, id, /*tag_scope=*/ts_current, parser->num_template_parameter_lists); } /* Indicate whether this class was declared as a `class' or as a `struct'. */ if (TREE_CODE (type) == RECORD_TYPE) CLASSTYPE_DECLARED_CLASS (type) = (class_key == class_type); cp_parser_check_class_key (class_key, type); /* If this type was already complete, and we see another definition, that's an error. */ if (type != error_mark_node && COMPLETE_TYPE_P (type)) { error_at (type_start_token->location, "redefinition of %q#T", type); error_at (type_start_token->location, "previous definition of %q+#T", type); type = NULL_TREE; goto done; } else if (type == error_mark_node) type = NULL_TREE; if (type) { /* Apply attributes now, before any use of the class as a template argument in its base list. */ cplus_decl_attributes (&type, attributes, (int)ATTR_FLAG_TYPE_IN_PLACE); fixup_attribute_variants (type); } /* We will have entered the scope containing the class; the names of base classes should be looked up in that context. For example: struct A { struct B {}; struct C; }; struct A::C : B {}; is valid. */ /* Get the list of base-classes, if there is one. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { /* PR59482: enter the class scope so that base-specifiers are looked up correctly. */ if (type) pushclass (type); bases = cp_parser_base_clause (parser); /* PR59482: get out of the previously pushed class scope so that the subsequent pops pop the right thing. */ if (type) popclass (); } else bases = NULL_TREE; /* If we're really defining a class, process the base classes. If they're invalid, fail. */ if (type && cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE) && !xref_basetypes (type, bases)) type = NULL_TREE; done: /* Leave the scope given by the nested-name-specifier. We will enter the class scope itself while processing the members. */ if (pushed_scope) pop_scope (pushed_scope); if (invalid_explicit_specialization_p) { end_specialization (); --parser->num_template_parameter_lists; } if (type) DECL_SOURCE_LOCATION (TYPE_NAME (type)) = type_start_token->location; if (type && (virt_specifiers & VIRT_SPEC_FINAL)) CLASSTYPE_FINAL (type) = 1; out: parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; return type; } /* Parse a class-key. class-key: class struct union Returns the kind of class-key specified, or none_type to indicate error. */ static enum tag_types cp_parser_class_key (cp_parser* parser) { cp_token *token; enum tag_types tag_type; /* Look for the class-key. */ token = cp_parser_require (parser, CPP_KEYWORD, RT_CLASS_KEY); if (!token) return none_type; /* Check to see if the TOKEN is a class-key. */ tag_type = cp_parser_token_is_class_key (token); if (!tag_type) cp_parser_error (parser, "expected class-key"); return tag_type; } /* Parse a type-parameter-key. type-parameter-key: class typename */ static void cp_parser_type_parameter_key (cp_parser* parser) { /* Look for the type-parameter-key. */ enum tag_types tag_type = none_type; cp_token *token = cp_lexer_peek_token (parser->lexer); if ((tag_type = cp_parser_token_is_type_parameter_key (token)) != none_type) { cp_lexer_consume_token (parser->lexer); if (pedantic && tag_type == typename_type && cxx_dialect < cxx1z) /* typename is not allowed in a template template parameter by the standard until C++1Z. */ pedwarn (token->location, OPT_Wpedantic, "ISO C++ forbids typename key in template template parameter;" " use -std=c++1z or -std=gnu++1z"); } else cp_parser_error (parser, "expected %<class%> or %<typename%>"); return; } /* Parse an (optional) member-specification. member-specification: member-declaration member-specification [opt] access-specifier : member-specification [opt] */ static void cp_parser_member_specification_opt (cp_parser* parser) { while (true) { cp_token *token; enum rid keyword; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's a `}', or EOF then we've seen all the members. */ if (token->type == CPP_CLOSE_BRACE || token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL) break; /* See if this token is a keyword. */ keyword = token->keyword; switch (keyword) { case RID_PUBLIC: case RID_PROTECTED: case RID_PRIVATE: /* Consume the access-specifier. */ cp_lexer_consume_token (parser->lexer); /* Remember which access-specifier is active. */ current_access_specifier = token->u.value; /* Look for the `:'. */ cp_parser_require (parser, CPP_COLON, RT_COLON); break; default: /* Accept #pragmas at class scope. */ if (token->type == CPP_PRAGMA) { cp_parser_pragma (parser, pragma_member); break; } /* Otherwise, the next construction must be a member-declaration. */ cp_parser_member_declaration (parser); } } } /* Parse a member-declaration. member-declaration: decl-specifier-seq [opt] member-declarator-list [opt] ; function-definition ; [opt] :: [opt] nested-name-specifier template [opt] unqualified-id ; using-declaration template-declaration alias-declaration member-declarator-list: member-declarator member-declarator-list , member-declarator member-declarator: declarator pure-specifier [opt] declarator constant-initializer [opt] identifier [opt] : constant-expression GNU Extensions: member-declaration: __extension__ member-declaration member-declarator: declarator attributes [opt] pure-specifier [opt] declarator attributes [opt] constant-initializer [opt] identifier [opt] attributes [opt] : constant-expression C++0x Extensions: member-declaration: static_assert-declaration */ static void cp_parser_member_declaration (cp_parser* parser) { cp_decl_specifier_seq decl_specifiers; tree prefix_attributes; tree decl; int declares_class_or_enum; bool friend_p; cp_token *token = NULL; cp_token *decl_spec_token_start = NULL; cp_token *initializer_token_start = NULL; int saved_pedantic; bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; /* Check for the `__extension__' keyword. */ if (cp_parser_extension_opt (parser, &saved_pedantic)) { /* Recurse. */ cp_parser_member_declaration (parser); /* Restore the old value of the PEDANTIC flag. */ pedantic = saved_pedantic; return; } /* Check for a template-declaration. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) { /* An explicit specialization here is an error condition, and we expect the specialization handler to detect and report this. */ if (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_LESS && cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_GREATER) cp_parser_explicit_specialization (parser); else cp_parser_template_declaration (parser, /*member_p=*/true); return; } /* Check for a using-declaration. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_USING)) { if (cxx_dialect < cxx11) { /* Parse the using-declaration. */ cp_parser_using_declaration (parser, /*access_declaration_p=*/false); return; } else { tree decl; bool alias_decl_expected; cp_parser_parse_tentatively (parser); decl = cp_parser_alias_declaration (parser); /* Note that if we actually see the '=' token after the identifier, cp_parser_alias_declaration commits the tentative parse. In that case, we really expects an alias-declaration. Otherwise, we expect a using declaration. */ alias_decl_expected = !cp_parser_uncommitted_to_tentative_parse_p (parser); cp_parser_parse_definitely (parser); if (alias_decl_expected) finish_member_declaration (decl); else cp_parser_using_declaration (parser, /*access_declaration_p=*/false); return; } } /* Check for @defs. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_DEFS)) { tree ivar, member; tree ivar_chains = cp_parser_objc_defs_expression (parser); ivar = ivar_chains; while (ivar) { member = ivar; ivar = TREE_CHAIN (member); TREE_CHAIN (member) = NULL_TREE; finish_member_declaration (member); } return; } /* If the next token is `static_assert' we have a static assertion. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC_ASSERT)) { cp_parser_static_assert (parser, /*member_p=*/true); return; } parser->colon_corrects_to_scope_p = false; if (cp_parser_using_declaration (parser, /*access_declaration=*/true)) goto out; /* Parse the decl-specifier-seq. */ decl_spec_token_start = cp_lexer_peek_token (parser->lexer); cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &decl_specifiers, &declares_class_or_enum); /* Check for an invalid type-name. */ if (!decl_specifiers.any_type_specifiers_p && cp_parser_parse_and_diagnose_invalid_type_name (parser)) goto out; /* If there is no declarator, then the decl-specifier-seq should specify a type. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { /* If there was no decl-specifier-seq, and the next token is a `;', then we have something like: struct S { ; }; [class.mem] Each member-declaration shall declare at least one member name of the class. */ if (!decl_specifiers.any_specifiers_p) { cp_token *token = cp_lexer_peek_token (parser->lexer); if (!in_system_header_at (token->location)) pedwarn (token->location, OPT_Wpedantic, "extra %<;%>"); } else { tree type; /* See if this declaration is a friend. */ friend_p = cp_parser_friend_p (&decl_specifiers); /* If there were decl-specifiers, check to see if there was a class-declaration. */ type = check_tag_decl (&decl_specifiers, /*explicit_type_instantiation_p=*/false); /* Nested classes have already been added to the class, but a `friend' needs to be explicitly registered. */ if (friend_p) { /* If the `friend' keyword was present, the friend must be introduced with a class-key. */ if (!declares_class_or_enum && cxx_dialect < cxx11) pedwarn (decl_spec_token_start->location, OPT_Wpedantic, "in C++03 a class-key must be used " "when declaring a friend"); /* In this case: template <typename T> struct A { friend struct A<T>::B; }; A<T>::B will be represented by a TYPENAME_TYPE, and therefore not recognized by check_tag_decl. */ if (!type) { type = decl_specifiers.type; if (type && TREE_CODE (type) == TYPE_DECL) type = TREE_TYPE (type); } if (!type || !TYPE_P (type)) error_at (decl_spec_token_start->location, "friend declaration does not name a class or " "function"); else make_friend_class (current_class_type, type, /*complain=*/true); } /* If there is no TYPE, an error message will already have been issued. */ else if (!type || type == error_mark_node) ; /* An anonymous aggregate has to be handled specially; such a declaration really declares a data member (with a particular type), as opposed to a nested class. */ else if (ANON_AGGR_TYPE_P (type)) { /* C++11 9.5/6. */ if (decl_specifiers.storage_class != sc_none) error_at (decl_spec_token_start->location, "a storage class on an anonymous aggregate " "in class scope is not allowed"); /* Remove constructors and such from TYPE, now that we know it is an anonymous aggregate. */ fixup_anonymous_aggr (type); /* And make the corresponding data member. */ decl = build_decl (decl_spec_token_start->location, FIELD_DECL, NULL_TREE, type); /* Add it to the class. */ finish_member_declaration (decl); } else cp_parser_check_access_in_redeclaration (TYPE_NAME (type), decl_spec_token_start->location); } } else { bool assume_semicolon = false; /* Clear attributes from the decl_specifiers but keep them around as prefix attributes that apply them to the entity being declared. */ prefix_attributes = decl_specifiers.attributes; decl_specifiers.attributes = NULL_TREE; /* See if these declarations will be friends. */ friend_p = cp_parser_friend_p (&decl_specifiers); /* Keep going until we hit the `;' at the end of the declaration. */ while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { tree attributes = NULL_TREE; tree first_attribute; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Check for a bitfield declaration. */ if (token->type == CPP_COLON || (token->type == CPP_NAME && cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON)) { tree identifier; tree width; /* Get the name of the bitfield. Note that we cannot just check TOKEN here because it may have been invalidated by the call to cp_lexer_peek_nth_token above. */ if (cp_lexer_peek_token (parser->lexer)->type != CPP_COLON) identifier = cp_parser_identifier (parser); else identifier = NULL_TREE; /* Consume the `:' token. */ cp_lexer_consume_token (parser->lexer); /* Get the width of the bitfield. */ width = cp_parser_constant_expression (parser); /* Look for attributes that apply to the bitfield. */ attributes = cp_parser_attributes_opt (parser); /* Remember which attributes are prefix attributes and which are not. */ first_attribute = attributes; /* Combine the attributes. */ attributes = chainon (prefix_attributes, attributes); /* Create the bitfield declaration. */ decl = grokbitfield (identifier ? make_id_declarator (NULL_TREE, identifier, sfk_none) : NULL, &decl_specifiers, width, attributes); } else { cp_declarator *declarator; tree initializer; tree asm_specification; int ctor_dtor_or_conv_p; /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, &ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, /*member_p=*/true, friend_p); /* If something went wrong parsing the declarator, make sure that we at least consume some tokens. */ if (declarator == cp_error_declarator) { /* Skip to the end of the statement. */ cp_parser_skip_to_end_of_statement (parser); /* If the next token is not a semicolon, that is probably because we just skipped over the body of a function. So, we consume a semicolon if present, but do not issue an error message if it is not present. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); goto out; } if (declares_class_or_enum & 2) cp_parser_check_for_definition_in_return_type (declarator, decl_specifiers.type, decl_specifiers.locations[ds_type_spec]); /* Look for an asm-specification. */ asm_specification = cp_parser_asm_specification_opt (parser); /* Look for attributes that apply to the declaration. */ attributes = cp_parser_attributes_opt (parser); /* Remember which attributes are prefix attributes and which are not. */ first_attribute = attributes; /* Combine the attributes. */ attributes = chainon (prefix_attributes, attributes); /* If it's an `=', then we have a constant-initializer or a pure-specifier. It is not correct to parse the initializer before registering the member declaration since the member declaration should be in scope while its initializer is processed. However, the rest of the front end does not yet provide an interface that allows us to handle this correctly. */ if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { /* In [class.mem]: A pure-specifier shall be used only in the declaration of a virtual function. A member-declarator can contain a constant-initializer only if it declares a static member of integral or enumeration type. Therefore, if the DECLARATOR is for a function, we look for a pure-specifier; otherwise, we look for a constant-initializer. When we call `grokfield', it will perform more stringent semantics checks. */ initializer_token_start = cp_lexer_peek_token (parser->lexer); if (function_declarator_p (declarator) || (decl_specifiers.type && TREE_CODE (decl_specifiers.type) == TYPE_DECL && declarator->kind == cdk_id && (TREE_CODE (TREE_TYPE (decl_specifiers.type)) == FUNCTION_TYPE))) initializer = cp_parser_pure_specifier (parser); else if (decl_specifiers.storage_class != sc_static) initializer = cp_parser_save_nsdmi (parser); else if (cxx_dialect >= cxx11) { bool nonconst; /* Don't require a constant rvalue in C++11, since we might want a reference constant. We'll enforce constancy later. */ cp_lexer_consume_token (parser->lexer); /* Parse the initializer. */ initializer = cp_parser_initializer_clause (parser, &nonconst); } else /* Parse the initializer. */ initializer = cp_parser_constant_initializer (parser); } else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE) && !function_declarator_p (declarator)) { bool x; if (decl_specifiers.storage_class != sc_static) initializer = cp_parser_save_nsdmi (parser); else initializer = cp_parser_initializer (parser, &x, &x); } /* Otherwise, there is no initializer. */ else initializer = NULL_TREE; /* See if we are probably looking at a function definition. We are certainly not looking at a member-declarator. Calling `grokfield' has side-effects, so we must not do it unless we are sure that we are looking at a member-declarator. */ if (cp_parser_token_starts_function_definition_p (cp_lexer_peek_token (parser->lexer))) { /* The grammar does not allow a pure-specifier to be used when a member function is defined. (It is possible that this fact is an oversight in the standard, since a pure function may be defined outside of the class-specifier. */ if (initializer && initializer_token_start) error_at (initializer_token_start->location, "pure-specifier on function-definition"); decl = cp_parser_save_member_function_body (parser, &decl_specifiers, declarator, attributes); if (parser->fully_implicit_function_template_p) decl = finish_fully_implicit_template (parser, decl); /* If the member was not a friend, declare it here. */ if (!friend_p) finish_member_declaration (decl); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token is a semicolon, consume it. */ if (token->type == CPP_SEMICOLON) cp_lexer_consume_token (parser->lexer); goto out; } else if (declarator->kind == cdk_function) declarator->id_loc = token->location; /* Create the declaration. */ decl = grokfield (declarator, &decl_specifiers, initializer, /*init_const_expr_p=*/true, asm_specification, attributes); if (parser->fully_implicit_function_template_p) { if (friend_p) finish_fully_implicit_template (parser, 0); else decl = finish_fully_implicit_template (parser, decl); } } cp_finalize_omp_declare_simd (parser, decl); /* Reset PREFIX_ATTRIBUTES. */ while (attributes && TREE_CHAIN (attributes) != first_attribute) attributes = TREE_CHAIN (attributes); if (attributes) TREE_CHAIN (attributes) = NULL_TREE; /* If there is any qualification still in effect, clear it now; we will be starting fresh with the next declarator. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; /* If it's a `,', then there are more declarators. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) { cp_lexer_consume_token (parser->lexer); if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) { cp_token *token = cp_lexer_previous_token (parser->lexer); error_at (token->location, "stray %<,%> at end of member declaration"); } } /* If the next token isn't a `;', then we have a parse error. */ else if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { /* The next token might be a ways away from where the actual semicolon is missing. Find the previous token and use that for our error position. */ cp_token *token = cp_lexer_previous_token (parser->lexer); error_at (token->location, "expected %<;%> at end of member declaration"); /* Assume that the user meant to provide a semicolon. If we were to cp_parser_skip_to_end_of_statement, we might skip to a semicolon inside a member function definition and issue nonsensical error messages. */ assume_semicolon = true; } if (decl) { /* Add DECL to the list of members. */ if (!friend_p /* Explicitly include, eg, NSDMIs, for better error recovery (c++/58650). */ || !DECL_DECLARES_FUNCTION_P (decl)) finish_member_declaration (decl); if (TREE_CODE (decl) == FUNCTION_DECL) cp_parser_save_default_args (parser, decl); else if (TREE_CODE (decl) == FIELD_DECL && !DECL_C_BIT_FIELD (decl) && DECL_INITIAL (decl)) /* Add DECL to the queue of NSDMI to be parsed later. */ vec_safe_push (unparsed_nsdmis, decl); } if (assume_semicolon) goto out; } } cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); out: parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; } /* Parse a pure-specifier. pure-specifier: = 0 Returns INTEGER_ZERO_NODE if a pure specifier is found. Otherwise, ERROR_MARK_NODE is returned. */ static tree cp_parser_pure_specifier (cp_parser* parser) { cp_token *token; /* Look for the `=' token. */ if (!cp_parser_require (parser, CPP_EQ, RT_EQ)) return error_mark_node; /* Look for the `0' token. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL) return error_mark_node; cp_lexer_consume_token (parser->lexer); /* Accept = default or = delete in c++0x mode. */ if (token->keyword == RID_DEFAULT || token->keyword == RID_DELETE) { maybe_warn_cpp0x (CPP0X_DEFAULTED_DELETED); return token->u.value; } /* c_lex_with_flags marks a single digit '0' with PURE_ZERO. */ if (token->type != CPP_NUMBER || !(token->flags & PURE_ZERO)) { cp_parser_error (parser, "invalid pure specifier (only %<= 0%> is allowed)"); cp_parser_skip_to_end_of_statement (parser); return error_mark_node; } if (PROCESSING_REAL_TEMPLATE_DECL_P ()) { error_at (token->location, "templates may not be %<virtual%>"); return error_mark_node; } return integer_zero_node; } /* Parse a constant-initializer. constant-initializer: = constant-expression Returns a representation of the constant-expression. */ static tree cp_parser_constant_initializer (cp_parser* parser) { /* Look for the `=' token. */ if (!cp_parser_require (parser, CPP_EQ, RT_EQ)) return error_mark_node; /* It is invalid to write: struct S { static const int i = { 7 }; }; */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { cp_parser_error (parser, "a brace-enclosed initializer is not allowed here"); /* Consume the opening brace. */ cp_lexer_consume_token (parser->lexer); /* Skip the initializer. */ cp_parser_skip_to_closing_brace (parser); /* Look for the trailing `}'. */ cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); return error_mark_node; } return cp_parser_constant_expression (parser); } /* Derived classes [gram.class.derived] */ /* Parse a base-clause. base-clause: : base-specifier-list base-specifier-list: base-specifier ... [opt] base-specifier-list , base-specifier ... [opt] Returns a TREE_LIST representing the base-classes, in the order in which they were declared. The representation of each node is as described by cp_parser_base_specifier. In the case that no bases are specified, this function will return NULL_TREE, not ERROR_MARK_NODE. */ static tree cp_parser_base_clause (cp_parser* parser) { tree bases = NULL_TREE; /* Look for the `:' that begins the list. */ cp_parser_require (parser, CPP_COLON, RT_COLON); /* Scan the base-specifier-list. */ while (true) { cp_token *token; tree base; bool pack_expansion_p = false; /* Look for the base-specifier. */ base = cp_parser_base_specifier (parser); /* Look for the (optional) ellipsis. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); pack_expansion_p = true; } /* Add BASE to the front of the list. */ if (base && base != error_mark_node) { if (pack_expansion_p) /* Make this a pack expansion type. */ TREE_VALUE (base) = make_pack_expansion (TREE_VALUE (base)); if (!check_for_bare_parameter_packs (TREE_VALUE (base))) { TREE_CHAIN (base) = bases; bases = base; } } /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not a comma, then the list is complete. */ if (token->type != CPP_COMMA) break; /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); } /* PARSER->SCOPE may still be non-NULL at this point, if the last base class had a qualified name. However, the next name that appears is certainly not qualified. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; return nreverse (bases); } /* Parse a base-specifier. base-specifier: :: [opt] nested-name-specifier [opt] class-name virtual access-specifier [opt] :: [opt] nested-name-specifier [opt] class-name access-specifier virtual [opt] :: [opt] nested-name-specifier [opt] class-name Returns a TREE_LIST. The TREE_PURPOSE will be one of ACCESS_{DEFAULT,PUBLIC,PROTECTED,PRIVATE}_[VIRTUAL]_NODE to indicate the specifiers provided. The TREE_VALUE will be a TYPE (or the ERROR_MARK_NODE) indicating the type that was specified. */ static tree cp_parser_base_specifier (cp_parser* parser) { cp_token *token; bool done = false; bool virtual_p = false; bool duplicate_virtual_error_issued_p = false; bool duplicate_access_error_issued_p = false; bool class_scope_p, template_p; tree access = access_default_node; tree type; /* Process the optional `virtual' and `access-specifier'. */ while (!done) { /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Process `virtual'. */ switch (token->keyword) { case RID_VIRTUAL: /* If `virtual' appears more than once, issue an error. */ if (virtual_p && !duplicate_virtual_error_issued_p) { cp_parser_error (parser, "%<virtual%> specified more than once in base-specified"); duplicate_virtual_error_issued_p = true; } virtual_p = true; /* Consume the `virtual' token. */ cp_lexer_consume_token (parser->lexer); break; case RID_PUBLIC: case RID_PROTECTED: case RID_PRIVATE: /* If more than one access specifier appears, issue an error. */ if (access != access_default_node && !duplicate_access_error_issued_p) { cp_parser_error (parser, "more than one access specifier in base-specified"); duplicate_access_error_issued_p = true; } access = ridpointers[(int) token->keyword]; /* Consume the access-specifier. */ cp_lexer_consume_token (parser->lexer); break; default: done = true; break; } } /* It is not uncommon to see programs mechanically, erroneously, use the 'typename' keyword to denote (dependent) qualified types as base classes. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TYPENAME)) { token = cp_lexer_peek_token (parser->lexer); if (!processing_template_decl) error_at (token->location, "keyword %<typename%> not allowed outside of templates"); else error_at (token->location, "keyword %<typename%> not allowed in this context " "(the base class is implicitly a type)"); cp_lexer_consume_token (parser->lexer); } /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the nested-name-specifier. The simplest way to implement: [temp.res] The keyword `typename' is not permitted in a base-specifier or mem-initializer; in these contexts a qualified name that depends on a template-parameter is implicitly assumed to be a type name. is to pretend that we have seen the `typename' keyword at this point. */ cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/true, /*check_dependency_p=*/true, typename_type, /*is_declaration=*/true); /* If the base class is given by a qualified name, assume that names we see are type names or templates, as appropriate. */ class_scope_p = (parser->scope && TYPE_P (parser->scope)); template_p = class_scope_p && cp_parser_optional_template_keyword (parser); if (!parser->scope && cp_lexer_next_token_is_decltype (parser->lexer)) /* DR 950 allows decltype as a base-specifier. */ type = cp_parser_decltype (parser); else { /* Otherwise, look for the class-name. */ type = cp_parser_class_name (parser, class_scope_p, template_p, typename_type, /*check_dependency_p=*/true, /*class_head_p=*/false, /*is_declaration=*/true); type = TREE_TYPE (type); } if (type == error_mark_node) return error_mark_node; return finish_base_specifier (type, access, virtual_p); } /* Exception handling [gram.exception] */ /* Parse an (optional) noexcept-specification. noexcept-specification: noexcept ( constant-expression ) [opt] If no noexcept-specification is present, returns NULL_TREE. Otherwise, if REQUIRE_CONSTEXPR is false, then either parse and return any expression if parentheses follow noexcept, or return BOOLEAN_TRUE_NODE if there are no parentheses. CONSUMED_EXPR will be set accordingly. Otherwise, returns a noexcept specification unless RETURN_COND is true, in which case a boolean condition is returned instead. */ static tree cp_parser_noexcept_specification_opt (cp_parser* parser, bool require_constexpr, bool* consumed_expr, bool return_cond) { cp_token *token; const char *saved_message; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Is it a noexcept-specification? */ if (cp_parser_is_keyword (token, RID_NOEXCEPT)) { tree expr; cp_lexer_consume_token (parser->lexer); if (cp_lexer_peek_token (parser->lexer)->type == CPP_OPEN_PAREN) { cp_lexer_consume_token (parser->lexer); if (require_constexpr) { /* Types may not be defined in an exception-specification. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in an exception-specification"); expr = cp_parser_constant_expression (parser); /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; } else { expr = cp_parser_expression (parser); *consumed_expr = true; } cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); } else { expr = boolean_true_node; if (!require_constexpr) *consumed_expr = false; } /* We cannot build a noexcept-spec right away because this will check that expr is a constexpr. */ if (!return_cond) return build_noexcept_spec (expr, tf_warning_or_error); else return expr; } else return NULL_TREE; } /* Parse an (optional) exception-specification. exception-specification: throw ( type-id-list [opt] ) Returns a TREE_LIST representing the exception-specification. The TREE_VALUE of each node is a type. */ static tree cp_parser_exception_specification_opt (cp_parser* parser) { cp_token *token; tree type_id_list; const char *saved_message; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* Is it a noexcept-specification? */ type_id_list = cp_parser_noexcept_specification_opt(parser, true, NULL, false); if (type_id_list != NULL_TREE) return type_id_list; /* If it's not `throw', then there's no exception-specification. */ if (!cp_parser_is_keyword (token, RID_THROW)) return NULL_TREE; #if 0 /* Enable this once a lot of code has transitioned to noexcept? */ if (cxx_dialect >= cxx11 && !in_system_header_at (input_location)) warning (OPT_Wdeprecated, "dynamic exception specifications are " "deprecated in C++0x; use %<noexcept%> instead"); #endif /* Consume the `throw'. */ cp_lexer_consume_token (parser->lexer); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not a `)', then there is a type-id-list. */ if (token->type != CPP_CLOSE_PAREN) { /* Types may not be defined in an exception-specification. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in an exception-specification"); /* Parse the type-id-list. */ type_id_list = cp_parser_type_id_list (parser); /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; } else type_id_list = empty_except_spec; /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return type_id_list; } /* Parse an (optional) type-id-list. type-id-list: type-id ... [opt] type-id-list , type-id ... [opt] Returns a TREE_LIST. The TREE_VALUE of each node is a TYPE, in the order that the types were presented. */ static tree cp_parser_type_id_list (cp_parser* parser) { tree types = NULL_TREE; while (true) { cp_token *token; tree type; /* Get the next type-id. */ type = cp_parser_type_id (parser); /* Parse the optional ellipsis. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); /* Turn the type into a pack expansion expression. */ type = make_pack_expansion (type); } /* Add it to the list. */ types = add_exception_specifier (types, type, /*complain=*/1); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it is not a `,', we are done. */ if (token->type != CPP_COMMA) break; /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); } return nreverse (types); } /* Parse a try-block. try-block: try compound-statement handler-seq */ static tree cp_parser_try_block (cp_parser* parser) { tree try_block; cp_parser_require_keyword (parser, RID_TRY, RT_TRY); if (parser->in_function_body && DECL_DECLARED_CONSTEXPR_P (current_function_decl)) error ("%<try%> in %<constexpr%> function"); try_block = begin_try_block (); cp_parser_compound_statement (parser, NULL, true, false); finish_try_block (try_block); cp_parser_handler_seq (parser); finish_handler_sequence (try_block); return try_block; } /* Parse a function-try-block. function-try-block: try ctor-initializer [opt] function-body handler-seq */ static bool cp_parser_function_try_block (cp_parser* parser) { tree compound_stmt; tree try_block; bool ctor_initializer_p; /* Look for the `try' keyword. */ if (!cp_parser_require_keyword (parser, RID_TRY, RT_TRY)) return false; /* Let the rest of the front end know where we are. */ try_block = begin_function_try_block (&compound_stmt); /* Parse the function-body. */ ctor_initializer_p = cp_parser_ctor_initializer_opt_and_function_body (parser, /*in_function_try_block=*/true); /* We're done with the `try' part. */ finish_function_try_block (try_block); /* Parse the handlers. */ cp_parser_handler_seq (parser); /* We're done with the handlers. */ finish_function_handler_sequence (try_block, compound_stmt); return ctor_initializer_p; } /* Parse a handler-seq. handler-seq: handler handler-seq [opt] */ static void cp_parser_handler_seq (cp_parser* parser) { while (true) { cp_token *token; /* Parse the handler. */ cp_parser_handler (parser); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not `catch' then there are no more handlers. */ if (!cp_parser_is_keyword (token, RID_CATCH)) break; } } /* Parse a handler. handler: catch ( exception-declaration ) compound-statement */ static void cp_parser_handler (cp_parser* parser) { tree handler; tree declaration; cp_parser_require_keyword (parser, RID_CATCH, RT_CATCH); handler = begin_handler (); cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); declaration = cp_parser_exception_declaration (parser); finish_handler_parms (declaration, handler); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); cp_parser_compound_statement (parser, NULL, false, false); finish_handler (handler); } /* Parse an exception-declaration. exception-declaration: type-specifier-seq declarator type-specifier-seq abstract-declarator type-specifier-seq ... Returns a VAR_DECL for the declaration, or NULL_TREE if the ellipsis variant is used. */ static tree cp_parser_exception_declaration (cp_parser* parser) { cp_decl_specifier_seq type_specifiers; cp_declarator *declarator; const char *saved_message; /* If it's an ellipsis, it's easy to handle. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* Consume the `...' token. */ cp_lexer_consume_token (parser->lexer); return NULL_TREE; } /* Types may not be defined in exception-declarations. */ saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in exception-declarations"); /* Parse the type-specifier-seq. */ cp_parser_type_specifier_seq (parser, /*is_declaration=*/true, /*is_trailing_return=*/false, &type_specifiers); /* If it's a `)', then there is no declarator. */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN)) declarator = NULL; else declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_EITHER, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false, /*friend_p=*/false); /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; if (!type_specifiers.any_specifiers_p) return error_mark_node; return grokdeclarator (declarator, &type_specifiers, CATCHPARM, 1, NULL); } /* Parse a throw-expression. throw-expression: throw assignment-expression [opt] Returns a THROW_EXPR representing the throw-expression. */ static tree cp_parser_throw_expression (cp_parser* parser) { tree expression; cp_token* token; cp_parser_require_keyword (parser, RID_THROW, RT_THROW); token = cp_lexer_peek_token (parser->lexer); /* Figure out whether or not there is an assignment-expression following the "throw" keyword. */ if (token->type == CPP_COMMA || token->type == CPP_SEMICOLON || token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_SQUARE || token->type == CPP_CLOSE_BRACE || token->type == CPP_COLON) expression = NULL_TREE; else expression = cp_parser_assignment_expression (parser); return build_throw (expression); } /* GNU Extensions */ /* Parse an (optional) asm-specification. asm-specification: asm ( string-literal ) If the asm-specification is present, returns a STRING_CST corresponding to the string-literal. Otherwise, returns NULL_TREE. */ static tree cp_parser_asm_specification_opt (cp_parser* parser) { cp_token *token; tree asm_specification; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If the next token isn't the `asm' keyword, then there's no asm-specification. */ if (!cp_parser_is_keyword (token, RID_ASM)) return NULL_TREE; /* Consume the `asm' token. */ cp_lexer_consume_token (parser->lexer); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Look for the string-literal. */ asm_specification = cp_parser_string_literal (parser, false, false); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return asm_specification; } /* Parse an asm-operand-list. asm-operand-list: asm-operand asm-operand-list , asm-operand asm-operand: string-literal ( expression ) [ string-literal ] string-literal ( expression ) Returns a TREE_LIST representing the operands. The TREE_VALUE of each node is the expression. The TREE_PURPOSE is itself a TREE_LIST whose TREE_PURPOSE is a STRING_CST for the bracketed string-literal (or NULL_TREE if not present) and whose TREE_VALUE is a STRING_CST for the string literal before the parenthesis. Returns ERROR_MARK_NODE if any of the operands are invalid. */ static tree cp_parser_asm_operand_list (cp_parser* parser) { tree asm_operands = NULL_TREE; bool invalid_operands = false; while (true) { tree string_literal; tree expression; tree name; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) { /* Consume the `[' token. */ cp_lexer_consume_token (parser->lexer); /* Read the operand name. */ name = cp_parser_identifier (parser); if (name != error_mark_node) name = build_string (IDENTIFIER_LENGTH (name), IDENTIFIER_POINTER (name)); /* Look for the closing `]'. */ cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); } else name = NULL_TREE; /* Look for the string-literal. */ string_literal = cp_parser_string_literal (parser, false, false); /* Look for the `('. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Parse the expression. */ expression = cp_parser_expression (parser); /* Look for the `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); if (name == error_mark_node || string_literal == error_mark_node || expression == error_mark_node) invalid_operands = true; /* Add this operand to the list. */ asm_operands = tree_cons (build_tree_list (name, string_literal), expression, asm_operands); /* If the next token is not a `,', there are no more operands. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,'. */ cp_lexer_consume_token (parser->lexer); } return invalid_operands ? error_mark_node : nreverse (asm_operands); } /* Parse an asm-clobber-list. asm-clobber-list: string-literal asm-clobber-list , string-literal Returns a TREE_LIST, indicating the clobbers in the order that they appeared. The TREE_VALUE of each node is a STRING_CST. */ static tree cp_parser_asm_clobber_list (cp_parser* parser) { tree clobbers = NULL_TREE; while (true) { tree string_literal; /* Look for the string literal. */ string_literal = cp_parser_string_literal (parser, false, false); /* Add it to the list. */ clobbers = tree_cons (NULL_TREE, string_literal, clobbers); /* If the next token is not a `,', then the list is complete. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } return clobbers; } /* Parse an asm-label-list. asm-label-list: identifier asm-label-list , identifier Returns a TREE_LIST, indicating the labels in the order that they appeared. The TREE_VALUE of each node is a label. */ static tree cp_parser_asm_label_list (cp_parser* parser) { tree labels = NULL_TREE; while (true) { tree identifier, label, name; /* Look for the identifier. */ identifier = cp_parser_identifier (parser); if (!error_operand_p (identifier)) { label = lookup_label (identifier); if (TREE_CODE (label) == LABEL_DECL) { TREE_USED (label) = 1; check_goto (label); name = build_string (IDENTIFIER_LENGTH (identifier), IDENTIFIER_POINTER (identifier)); labels = tree_cons (name, label, labels); } } /* If the next token is not a `,', then the list is complete. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; /* Consume the `,' token. */ cp_lexer_consume_token (parser->lexer); } return nreverse (labels); } /* Return TRUE iff the next tokens in the stream are possibly the beginning of a GNU extension attribute. */ static bool cp_next_tokens_can_be_gnu_attribute_p (cp_parser *parser) { return cp_nth_tokens_can_be_gnu_attribute_p (parser, 1); } /* Return TRUE iff the next tokens in the stream are possibly the beginning of a standard C++-11 attribute specifier. */ static bool cp_next_tokens_can_be_std_attribute_p (cp_parser *parser) { return cp_nth_tokens_can_be_std_attribute_p (parser, 1); } /* Return TRUE iff the next Nth tokens in the stream are possibly the beginning of a standard C++-11 attribute specifier. */ static bool cp_nth_tokens_can_be_std_attribute_p (cp_parser *parser, size_t n) { cp_token *token = cp_lexer_peek_nth_token (parser->lexer, n); return (cxx_dialect >= cxx11 && ((token->type == CPP_KEYWORD && token->keyword == RID_ALIGNAS) || (token->type == CPP_OPEN_SQUARE && (token = cp_lexer_peek_nth_token (parser->lexer, n + 1)) && token->type == CPP_OPEN_SQUARE))); } /* Return TRUE iff the next Nth tokens in the stream are possibly the beginning of a GNU extension attribute. */ static bool cp_nth_tokens_can_be_gnu_attribute_p (cp_parser *parser, size_t n) { cp_token *token = cp_lexer_peek_nth_token (parser->lexer, n); return token->type == CPP_KEYWORD && token->keyword == RID_ATTRIBUTE; } /* Return true iff the next tokens can be the beginning of either a GNU attribute list, or a standard C++11 attribute sequence. */ static bool cp_next_tokens_can_be_attribute_p (cp_parser *parser) { return (cp_next_tokens_can_be_gnu_attribute_p (parser) || cp_next_tokens_can_be_std_attribute_p (parser)); } /* Return true iff the next Nth tokens can be the beginning of either a GNU attribute list, or a standard C++11 attribute sequence. */ static bool cp_nth_tokens_can_be_attribute_p (cp_parser *parser, size_t n) { return (cp_nth_tokens_can_be_gnu_attribute_p (parser, n) || cp_nth_tokens_can_be_std_attribute_p (parser, n)); } /* Parse either a standard C++-11 attribute-specifier-seq, or a series of GNU attributes, or return NULL. */ static tree cp_parser_attributes_opt (cp_parser *parser) { if (cp_next_tokens_can_be_gnu_attribute_p (parser)) return cp_parser_gnu_attributes_opt (parser); return cp_parser_std_attribute_spec_seq (parser); } #define CILK_SIMD_FN_CLAUSE_MASK \ ((OMP_CLAUSE_MASK_1 << PRAGMA_CILK_CLAUSE_VECTORLENGTH) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_CILK_CLAUSE_LINEAR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_CILK_CLAUSE_UNIFORM) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_CILK_CLAUSE_MASK) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_CILK_CLAUSE_NOMASK)) /* Parses the Cilk Plus SIMD-enabled function's attribute. Syntax: vector [(<clauses>)] */ static void cp_parser_cilk_simd_fn_vector_attrs (cp_parser *parser, cp_token *v_token) { bool first_p = parser->cilk_simd_fn_info == NULL; cp_token *token = v_token; if (first_p) { parser->cilk_simd_fn_info = XNEW (cp_omp_declare_simd_data); parser->cilk_simd_fn_info->error_seen = false; parser->cilk_simd_fn_info->fndecl_seen = false; parser->cilk_simd_fn_info->tokens = vNULL; } int paren_scope = 0; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { cp_lexer_consume_token (parser->lexer); v_token = cp_lexer_peek_token (parser->lexer); paren_scope++; } while (paren_scope > 0) { token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_OPEN_PAREN) paren_scope++; else if (token->type == CPP_CLOSE_PAREN) paren_scope--; /* Do not push the last ')' */ if (!(token->type == CPP_CLOSE_PAREN && paren_scope == 0)) cp_lexer_consume_token (parser->lexer); } token->type = CPP_PRAGMA_EOL; parser->lexer->next_token = token; cp_lexer_consume_token (parser->lexer); struct cp_token_cache *cp = cp_token_cache_new (v_token, cp_lexer_peek_token (parser->lexer)); parser->cilk_simd_fn_info->tokens.safe_push (cp); } /* Parse an (optional) series of attributes. attributes: attributes attribute attribute: __attribute__ (( attribute-list [opt] )) The return value is as for cp_parser_gnu_attribute_list. */ static tree cp_parser_gnu_attributes_opt (cp_parser* parser) { tree attributes = NULL_TREE; while (true) { cp_token *token; tree attribute_list; bool ok = true; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's not `__attribute__', then we're done. */ if (token->keyword != RID_ATTRIBUTE) break; /* Consume the `__attribute__' keyword. */ cp_lexer_consume_token (parser->lexer); /* Look for the two `(' tokens. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_CLOSE_PAREN) /* Parse the attribute-list. */ attribute_list = cp_parser_gnu_attribute_list (parser); else /* If the next token is a `)', then there is no attribute list. */ attribute_list = NULL; /* Look for the two `)' tokens. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) ok = false; if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) ok = false; if (!ok) cp_parser_skip_to_end_of_statement (parser); /* Add these new attributes to the list. */ attributes = chainon (attributes, attribute_list); } return attributes; } /* Returns true of NAME is an IDENTIFIER_NODE with identiifer "vector," "__vector" or "__vector__." */ static inline bool is_cilkplus_vector_p (tree name) { if (flag_cilkplus && is_attribute_p ("vector", name)) return true; return false; } /* Parse a GNU attribute-list. attribute-list: attribute attribute-list , attribute attribute: identifier identifier ( identifier ) identifier ( identifier , expression-list ) identifier ( expression-list ) Returns a TREE_LIST, or NULL_TREE on error. Each node corresponds to an attribute. The TREE_PURPOSE of each node is the identifier indicating which attribute is in use. The TREE_VALUE represents the arguments, if any. */ static tree cp_parser_gnu_attribute_list (cp_parser* parser) { tree attribute_list = NULL_TREE; bool save_translate_strings_p = parser->translate_strings_p; parser->translate_strings_p = false; while (true) { cp_token *token; tree identifier; tree attribute; /* Look for the identifier. We also allow keywords here; for example `__attribute__ ((const))' is legal. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_NAME || token->type == CPP_KEYWORD) { tree arguments = NULL_TREE; /* Consume the token, but save it since we need it for the SIMD enabled function parsing. */ cp_token *id_token = cp_lexer_consume_token (parser->lexer); /* Save away the identifier that indicates which attribute this is. */ identifier = (token->type == CPP_KEYWORD) /* For keywords, use the canonical spelling, not the parsed identifier. */ ? ridpointers[(int) token->keyword] : id_token->u.value; attribute = build_tree_list (identifier, NULL_TREE); /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If it's an `(', then parse the attribute arguments. */ if (token->type == CPP_OPEN_PAREN) { vec<tree, va_gc> *vec; int attr_flag = (attribute_takes_identifier_p (identifier) ? id_attr : normal_attr); if (is_cilkplus_vector_p (identifier)) { cp_parser_cilk_simd_fn_vector_attrs (parser, id_token); continue; } else vec = cp_parser_parenthesized_expression_list (parser, attr_flag, /*cast_p=*/false, /*allow_expansion_p=*/false, /*non_constant_p=*/NULL); if (vec == NULL) arguments = error_mark_node; else { arguments = build_tree_list_vec (vec); release_tree_vector (vec); } /* Save the arguments away. */ TREE_VALUE (attribute) = arguments; } else if (is_cilkplus_vector_p (identifier)) { cp_parser_cilk_simd_fn_vector_attrs (parser, id_token); continue; } if (arguments != error_mark_node) { /* Add this attribute to the list. */ TREE_CHAIN (attribute) = attribute_list; attribute_list = attribute; } token = cp_lexer_peek_token (parser->lexer); } /* Now, look for more attributes. If the next token isn't a `,', we're done. */ if (token->type != CPP_COMMA) break; /* Consume the comma and keep going. */ cp_lexer_consume_token (parser->lexer); } parser->translate_strings_p = save_translate_strings_p; /* We built up the list in reverse order. */ return nreverse (attribute_list); } /* Parse a standard C++11 attribute. The returned representation is a TREE_LIST which TREE_PURPOSE is the scoped name of the attribute, and the TREE_VALUE is its arguments list. Note that the scoped name of the attribute is itself a TREE_LIST which TREE_PURPOSE is the namespace of the attribute, and TREE_VALUE its name. This is unlike a GNU attribute -- as parsed by cp_parser_gnu_attribute_list -- that doesn't have any namespace and which TREE_PURPOSE is directly the attribute name. Clients of the attribute code should use get_attribute_namespace and get_attribute_name to get the actual namespace and name of attributes, regardless of their being GNU or C++11 attributes. attribute: attribute-token attribute-argument-clause [opt] attribute-token: identifier attribute-scoped-token attribute-scoped-token: attribute-namespace :: identifier attribute-namespace: identifier attribute-argument-clause: ( balanced-token-seq ) balanced-token-seq: balanced-token [opt] balanced-token-seq balanced-token balanced-token: ( balanced-token-seq ) [ balanced-token-seq ] { balanced-token-seq }. */ static tree cp_parser_std_attribute (cp_parser *parser) { tree attribute, attr_ns = NULL_TREE, attr_id = NULL_TREE, arguments; cp_token *token; /* First, parse name of the the attribute, a.k.a attribute-token. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_NAME) attr_id = token->u.value; else if (token->type == CPP_KEYWORD) attr_id = ridpointers[(int) token->keyword]; else if (token->flags & NAMED_OP) attr_id = get_identifier (cpp_type2name (token->type, token->flags)); if (attr_id == NULL_TREE) return NULL_TREE; cp_lexer_consume_token (parser->lexer); token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_SCOPE) { /* We are seeing a scoped attribute token. */ cp_lexer_consume_token (parser->lexer); attr_ns = attr_id; token = cp_lexer_consume_token (parser->lexer); if (token->type == CPP_NAME) attr_id = token->u.value; else if (token->type == CPP_KEYWORD) attr_id = ridpointers[(int) token->keyword]; else { error_at (token->location, "expected an identifier for the attribute name"); return error_mark_node; } attribute = build_tree_list (build_tree_list (attr_ns, attr_id), NULL_TREE); token = cp_lexer_peek_token (parser->lexer); } else { attribute = build_tree_list (build_tree_list (NULL_TREE, attr_id), NULL_TREE); /* C++11 noreturn attribute is equivalent to GNU's. */ if (is_attribute_p ("noreturn", attr_id)) TREE_PURPOSE (TREE_PURPOSE (attribute)) = get_identifier ("gnu"); /* C++14 deprecated attribute is equivalent to GNU's. */ else if (cxx_dialect >= cxx11 && is_attribute_p ("deprecated", attr_id)) { if (cxx_dialect == cxx11) pedwarn (token->location, OPT_Wpedantic, "%<deprecated%> is a C++14 feature;" " use %<gnu::deprecated%>"); TREE_PURPOSE (TREE_PURPOSE (attribute)) = get_identifier ("gnu"); } } /* Now parse the optional argument clause of the attribute. */ if (token->type != CPP_OPEN_PAREN) return attribute; { vec<tree, va_gc> *vec; int attr_flag = normal_attr; if (attr_ns == get_identifier ("gnu") && attribute_takes_identifier_p (attr_id)) /* A GNU attribute that takes an identifier in parameter. */ attr_flag = id_attr; vec = cp_parser_parenthesized_expression_list (parser, attr_flag, /*cast_p=*/false, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL); if (vec == NULL) arguments = error_mark_node; else { arguments = build_tree_list_vec (vec); release_tree_vector (vec); } if (arguments == error_mark_node) attribute = error_mark_node; else TREE_VALUE (attribute) = arguments; } return attribute; } /* Parse a list of standard C++-11 attributes. attribute-list: attribute [opt] attribute-list , attribute[opt] attribute ... attribute-list , attribute ... */ static tree cp_parser_std_attribute_list (cp_parser *parser) { tree attributes = NULL_TREE, attribute = NULL_TREE; cp_token *token = NULL; while (true) { attribute = cp_parser_std_attribute (parser); if (attribute == error_mark_node) break; if (attribute != NULL_TREE) { TREE_CHAIN (attribute) = attributes; attributes = attribute; } token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_COMMA) break; cp_lexer_consume_token (parser->lexer); } attributes = nreverse (attributes); return attributes; } /* Parse a standard C++-11 attribute specifier. attribute-specifier: [ [ attribute-list ] ] alignment-specifier alignment-specifier: alignas ( type-id ... [opt] ) alignas ( alignment-expression ... [opt] ). */ static tree cp_parser_std_attribute_spec (cp_parser *parser) { tree attributes = NULL_TREE; cp_token *token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_OPEN_SQUARE && cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_OPEN_SQUARE) { cp_lexer_consume_token (parser->lexer); cp_lexer_consume_token (parser->lexer); attributes = cp_parser_std_attribute_list (parser); if (!cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE) || !cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE)) cp_parser_skip_to_end_of_statement (parser); else /* Warn about parsing c++11 attribute in non-c++1 mode, only when we are sure that we have actually parsed them. */ maybe_warn_cpp0x (CPP0X_ATTRIBUTES); } else { tree alignas_expr; /* Look for an alignment-specifier. */ token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_KEYWORD || token->keyword != RID_ALIGNAS) return NULL_TREE; cp_lexer_consume_token (parser->lexer); maybe_warn_cpp0x (CPP0X_ATTRIBUTES); if (cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN) == NULL) { cp_parser_error (parser, "expected %<(%>"); return error_mark_node; } cp_parser_parse_tentatively (parser); alignas_expr = cp_parser_type_id (parser); if (!cp_parser_parse_definitely (parser)) { gcc_assert (alignas_expr == error_mark_node || alignas_expr == NULL_TREE); alignas_expr = cp_parser_assignment_expression (parser); if (alignas_expr == error_mark_node) cp_parser_skip_to_end_of_statement (parser); if (alignas_expr == NULL_TREE || alignas_expr == error_mark_node) return alignas_expr; } if (cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN) == NULL) { cp_parser_error (parser, "expected %<)%>"); return error_mark_node; } alignas_expr = cxx_alignas_expr (alignas_expr); /* Build the C++-11 representation of an 'aligned' attribute. */ attributes = build_tree_list (build_tree_list (get_identifier ("gnu"), get_identifier ("aligned")), build_tree_list (NULL_TREE, alignas_expr)); } return attributes; } /* Parse a standard C++-11 attribute-specifier-seq. attribute-specifier-seq: attribute-specifier-seq [opt] attribute-specifier */ static tree cp_parser_std_attribute_spec_seq (cp_parser *parser) { tree attr_specs = NULL; while (true) { tree attr_spec = cp_parser_std_attribute_spec (parser); if (attr_spec == NULL_TREE) break; if (attr_spec == error_mark_node) return error_mark_node; TREE_CHAIN (attr_spec) = attr_specs; attr_specs = attr_spec; } attr_specs = nreverse (attr_specs); return attr_specs; } /* Parse an optional `__extension__' keyword. Returns TRUE if it is present, and FALSE otherwise. *SAVED_PEDANTIC is set to the current value of the PEDANTIC flag, regardless of whether or not the `__extension__' keyword is present. The caller is responsible for restoring the value of the PEDANTIC flag. */ static bool cp_parser_extension_opt (cp_parser* parser, int* saved_pedantic) { /* Save the old value of the PEDANTIC flag. */ *saved_pedantic = pedantic; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_EXTENSION)) { /* Consume the `__extension__' token. */ cp_lexer_consume_token (parser->lexer); /* We're not being pedantic while the `__extension__' keyword is in effect. */ pedantic = 0; return true; } return false; } /* Parse a label declaration. label-declaration: __label__ label-declarator-seq ; label-declarator-seq: identifier , label-declarator-seq identifier */ static void cp_parser_label_declaration (cp_parser* parser) { /* Look for the `__label__' keyword. */ cp_parser_require_keyword (parser, RID_LABEL, RT_LABEL); while (true) { tree identifier; /* Look for an identifier. */ identifier = cp_parser_identifier (parser); /* If we failed, stop. */ if (identifier == error_mark_node) break; /* Declare it as a label. */ finish_label_decl (identifier); /* If the next token is a `;', stop. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) break; /* Look for the `,' separating the label declarations. */ cp_parser_require (parser, CPP_COMMA, RT_COMMA); } /* Look for the final `;'. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); } /* Support Functions */ /* Looks up NAME in the current scope, as given by PARSER->SCOPE. NAME should have one of the representations used for an id-expression. If NAME is the ERROR_MARK_NODE, the ERROR_MARK_NODE is returned. If PARSER->SCOPE is a dependent type, then a SCOPE_REF is returned. If NAME is a TEMPLATE_ID_EXPR, then it will be immediately returned; the name was already resolved when the TEMPLATE_ID_EXPR was formed. Abstractly, such entities should not be passed to this function, because they do not need to be looked up, but it is simpler to check for this special case here, rather than at the call-sites. In cases not explicitly covered above, this function returns a DECL, OVERLOAD, or baselink representing the result of the lookup. If there was no entity with the indicated NAME, the ERROR_MARK_NODE is returned. If TAG_TYPE is not NONE_TYPE, it indicates an explicit type keyword (e.g., "struct") that was used. In that case bindings that do not refer to types are ignored. If IS_TEMPLATE is TRUE, bindings that do not refer to templates are ignored. If IS_NAMESPACE is TRUE, bindings that do not refer to namespaces are ignored. If CHECK_DEPENDENCY is TRUE, names are not looked up in dependent types. If AMBIGUOUS_DECLS is non-NULL, *AMBIGUOUS_DECLS is set to a TREE_LIST of candidates if name-lookup results in an ambiguity, and NULL_TREE otherwise. */ static tree cp_parser_lookup_name (cp_parser *parser, tree name, enum tag_types tag_type, bool is_template, bool is_namespace, bool check_dependency, tree *ambiguous_decls, location_t name_location) { tree decl; tree object_type = parser->context->object_type; /* Assume that the lookup will be unambiguous. */ if (ambiguous_decls) *ambiguous_decls = NULL_TREE; /* Now that we have looked up the name, the OBJECT_TYPE (if any) is no longer valid. Note that if we are parsing tentatively, and the parse fails, OBJECT_TYPE will be automatically restored. */ parser->context->object_type = NULL_TREE; if (name == error_mark_node) return error_mark_node; /* A template-id has already been resolved; there is no lookup to do. */ if (TREE_CODE (name) == TEMPLATE_ID_EXPR) return name; if (BASELINK_P (name)) { gcc_assert (TREE_CODE (BASELINK_FUNCTIONS (name)) == TEMPLATE_ID_EXPR); return name; } /* A BIT_NOT_EXPR is used to represent a destructor. By this point, it should already have been checked to make sure that the name used matches the type being destroyed. */ if (TREE_CODE (name) == BIT_NOT_EXPR) { tree type; /* Figure out to which type this destructor applies. */ if (parser->scope) type = parser->scope; else if (object_type) type = object_type; else type = current_class_type; /* If that's not a class type, there is no destructor. */ if (!type || !CLASS_TYPE_P (type)) return error_mark_node; if (CLASSTYPE_LAZY_DESTRUCTOR (type)) lazily_declare_fn (sfk_destructor, type); if (!CLASSTYPE_DESTRUCTORS (type)) return error_mark_node; /* If it was a class type, return the destructor. */ return CLASSTYPE_DESTRUCTORS (type); } /* By this point, the NAME should be an ordinary identifier. If the id-expression was a qualified name, the qualifying scope is stored in PARSER->SCOPE at this point. */ gcc_assert (identifier_p (name)); /* Perform the lookup. */ if (parser->scope) { bool dependent_p; if (parser->scope == error_mark_node) return error_mark_node; /* If the SCOPE is dependent, the lookup must be deferred until the template is instantiated -- unless we are explicitly looking up names in uninstantiated templates. Even then, we cannot look up the name if the scope is not a class type; it might, for example, be a template type parameter. */ dependent_p = (TYPE_P (parser->scope) && dependent_scope_p (parser->scope)); if ((check_dependency || !CLASS_TYPE_P (parser->scope)) && dependent_p) /* Defer lookup. */ decl = error_mark_node; else { tree pushed_scope = NULL_TREE; /* If PARSER->SCOPE is a dependent type, then it must be a class type, and we must not be checking dependencies; otherwise, we would have processed this lookup above. So that PARSER->SCOPE is not considered a dependent base by lookup_member, we must enter the scope here. */ if (dependent_p) pushed_scope = push_scope (parser->scope); /* If the PARSER->SCOPE is a template specialization, it may be instantiated during name lookup. In that case, errors may be issued. Even if we rollback the current tentative parse, those errors are valid. */ decl = lookup_qualified_name (parser->scope, name, tag_type != none_type, /*complain=*/true); /* 3.4.3.1: In a lookup in which the constructor is an acceptable lookup result and the nested-name-specifier nominates a class C: * if the name specified after the nested-name-specifier, when looked up in C, is the injected-class-name of C (Clause 9), or * if the name specified after the nested-name-specifier is the same as the identifier or the simple-template-id's template- name in the last component of the nested-name-specifier, the name is instead considered to name the constructor of class C. [ Note: for example, the constructor is not an acceptable lookup result in an elaborated-type-specifier so the constructor would not be used in place of the injected-class-name. --end note ] Such a constructor name shall be used only in the declarator-id of a declaration that names a constructor or in a using-declaration. */ if (tag_type == none_type && DECL_SELF_REFERENCE_P (decl) && same_type_p (DECL_CONTEXT (decl), parser->scope)) decl = lookup_qualified_name (parser->scope, ctor_identifier, tag_type != none_type, /*complain=*/true); /* If we have a single function from a using decl, pull it out. */ if (TREE_CODE (decl) == OVERLOAD && !really_overloaded_fn (decl)) decl = OVL_FUNCTION (decl); if (pushed_scope) pop_scope (pushed_scope); } /* If the scope is a dependent type and either we deferred lookup or we did lookup but didn't find the name, rememeber the name. */ if (decl == error_mark_node && TYPE_P (parser->scope) && dependent_type_p (parser->scope)) { if (tag_type) { tree type; /* The resolution to Core Issue 180 says that `struct A::B' should be considered a type-name, even if `A' is dependent. */ type = make_typename_type (parser->scope, name, tag_type, /*complain=*/tf_error); if (type != error_mark_node) decl = TYPE_NAME (type); } else if (is_template && (cp_parser_next_token_ends_template_argument_p (parser) || cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN))) decl = make_unbound_class_template (parser->scope, name, NULL_TREE, /*complain=*/tf_error); else decl = build_qualified_name (/*type=*/NULL_TREE, parser->scope, name, is_template); } parser->qualifying_scope = parser->scope; parser->object_scope = NULL_TREE; } else if (object_type) { /* Look up the name in the scope of the OBJECT_TYPE, unless the OBJECT_TYPE is not a class. */ if (CLASS_TYPE_P (object_type)) /* If the OBJECT_TYPE is a template specialization, it may be instantiated during name lookup. In that case, errors may be issued. Even if we rollback the current tentative parse, those errors are valid. */ decl = lookup_member (object_type, name, /*protect=*/0, tag_type != none_type, tf_warning_or_error); else decl = NULL_TREE; if (!decl) /* Look it up in the enclosing context. */ decl = lookup_name_real (name, tag_type != none_type, /*nonclass=*/0, /*block_p=*/true, is_namespace, 0); parser->object_scope = object_type; parser->qualifying_scope = NULL_TREE; } else { decl = lookup_name_real (name, tag_type != none_type, /*nonclass=*/0, /*block_p=*/true, is_namespace, 0); parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; } /* If the lookup failed, let our caller know. */ if (!decl || decl == error_mark_node) return error_mark_node; /* Pull out the template from an injected-class-name (or multiple). */ if (is_template) decl = maybe_get_template_decl_from_type_decl (decl); /* If it's a TREE_LIST, the result of the lookup was ambiguous. */ if (TREE_CODE (decl) == TREE_LIST) { if (ambiguous_decls) *ambiguous_decls = decl; /* The error message we have to print is too complicated for cp_parser_error, so we incorporate its actions directly. */ if (!cp_parser_simulate_error (parser)) { error_at (name_location, "reference to %qD is ambiguous", name); print_candidates (decl); } return error_mark_node; } gcc_assert (DECL_P (decl) || TREE_CODE (decl) == OVERLOAD || TREE_CODE (decl) == SCOPE_REF || TREE_CODE (decl) == UNBOUND_CLASS_TEMPLATE || BASELINK_P (decl)); /* If we have resolved the name of a member declaration, check to see if the declaration is accessible. When the name resolves to set of overloaded functions, accessibility is checked when overload resolution is done. During an explicit instantiation, access is not checked at all, as per [temp.explicit]. */ if (DECL_P (decl)) check_accessibility_of_qualified_id (decl, object_type, parser->scope); maybe_record_typedef_use (decl); return decl; } /* Like cp_parser_lookup_name, but for use in the typical case where CHECK_ACCESS is TRUE, IS_TYPE is FALSE, IS_TEMPLATE is FALSE, IS_NAMESPACE is FALSE, and CHECK_DEPENDENCY is TRUE. */ static tree cp_parser_lookup_name_simple (cp_parser* parser, tree name, location_t location) { return cp_parser_lookup_name (parser, name, none_type, /*is_template=*/false, /*is_namespace=*/false, /*check_dependency=*/true, /*ambiguous_decls=*/NULL, location); } /* If DECL is a TEMPLATE_DECL that can be treated like a TYPE_DECL in the current context, return the TYPE_DECL. If TAG_NAME_P is true, the DECL indicates the class being defined in a class-head, or declared in an elaborated-type-specifier. Otherwise, return DECL. */ static tree cp_parser_maybe_treat_template_as_class (tree decl, bool tag_name_p) { /* If the TEMPLATE_DECL is being declared as part of a class-head, the translation from TEMPLATE_DECL to TYPE_DECL occurs: struct A { template <typename T> struct B; }; template <typename T> struct A::B {}; Similarly, in an elaborated-type-specifier: namespace N { struct X{}; } struct A { template <typename T> friend struct N::X; }; However, if the DECL refers to a class type, and we are in the scope of the class, then the name lookup automatically finds the TYPE_DECL created by build_self_reference rather than a TEMPLATE_DECL. For example, in: template <class T> struct S { S s; }; there is no need to handle such case. */ if (DECL_CLASS_TEMPLATE_P (decl) && tag_name_p) return DECL_TEMPLATE_RESULT (decl); return decl; } /* If too many, or too few, template-parameter lists apply to the declarator, issue an error message. Returns TRUE if all went well, and FALSE otherwise. */ static bool cp_parser_check_declarator_template_parameters (cp_parser* parser, cp_declarator *declarator, location_t declarator_location) { switch (declarator->kind) { case cdk_id: { unsigned num_templates = 0; tree scope = declarator->u.id.qualifying_scope; if (scope) num_templates = num_template_headers_for_class (scope); else if (TREE_CODE (declarator->u.id.unqualified_name) == TEMPLATE_ID_EXPR) /* If the DECLARATOR has the form `X<y>' then it uses one additional level of template parameters. */ ++num_templates; return cp_parser_check_template_parameters (parser, num_templates, declarator_location, declarator); } case cdk_function: case cdk_array: case cdk_pointer: case cdk_reference: case cdk_ptrmem: return (cp_parser_check_declarator_template_parameters (parser, declarator->declarator, declarator_location)); case cdk_error: return true; default: gcc_unreachable (); } return false; } /* NUM_TEMPLATES were used in the current declaration. If that is invalid, return FALSE and issue an error messages. Otherwise, return TRUE. If DECLARATOR is non-NULL, then we are checking a declarator and we can print more accurate diagnostics. */ static bool cp_parser_check_template_parameters (cp_parser* parser, unsigned num_templates, location_t location, cp_declarator *declarator) { /* If there are the same number of template classes and parameter lists, that's OK. */ if (parser->num_template_parameter_lists == num_templates) return true; /* If there are more, but only one more, then we are referring to a member template. That's OK too. */ if (parser->num_template_parameter_lists == num_templates + 1) return true; /* If there are more template classes than parameter lists, we have something like: template <class T> void S<T>::R<T>::f (); */ if (parser->num_template_parameter_lists < num_templates) { if (declarator && !current_function_decl) error_at (location, "specializing member %<%T::%E%> " "requires %<template<>%> syntax", declarator->u.id.qualifying_scope, declarator->u.id.unqualified_name); else if (declarator) error_at (location, "invalid declaration of %<%T::%E%>", declarator->u.id.qualifying_scope, declarator->u.id.unqualified_name); else error_at (location, "too few template-parameter-lists"); return false; } /* Otherwise, there are too many template parameter lists. We have something like: template <class T> template <class U> void S::f(); */ error_at (location, "too many template-parameter-lists"); return false; } /* Parse an optional `::' token indicating that the following name is from the global namespace. If so, PARSER->SCOPE is set to the GLOBAL_NAMESPACE. Otherwise, PARSER->SCOPE is set to NULL_TREE, unless CURRENT_SCOPE_VALID_P is TRUE, in which case it is left alone. Returns the new value of PARSER->SCOPE, if the `::' token is present, and NULL_TREE otherwise. */ static tree cp_parser_global_scope_opt (cp_parser* parser, bool current_scope_valid_p) { cp_token *token; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* If we're looking at a `::' token then we're starting from the global namespace, not our current location. */ if (token->type == CPP_SCOPE) { /* Consume the `::' token. */ cp_lexer_consume_token (parser->lexer); /* Set the SCOPE so that we know where to start the lookup. */ parser->scope = global_namespace; parser->qualifying_scope = global_namespace; parser->object_scope = NULL_TREE; return parser->scope; } else if (!current_scope_valid_p) { parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; } return NULL_TREE; } /* Returns TRUE if the upcoming token sequence is the start of a constructor declarator. If FRIEND_P is true, the declarator is preceded by the `friend' specifier. */ static bool cp_parser_constructor_declarator_p (cp_parser *parser, bool friend_p) { bool constructor_p; bool outside_class_specifier_p; tree nested_name_specifier; cp_token *next_token; /* The common case is that this is not a constructor declarator, so try to avoid doing lots of work if at all possible. It's not valid declare a constructor at function scope. */ if (parser->in_function_body) return false; /* And only certain tokens can begin a constructor declarator. */ next_token = cp_lexer_peek_token (parser->lexer); if (next_token->type != CPP_NAME && next_token->type != CPP_SCOPE && next_token->type != CPP_NESTED_NAME_SPECIFIER && next_token->type != CPP_TEMPLATE_ID) return false; /* Parse tentatively; we are going to roll back all of the tokens consumed here. */ cp_parser_parse_tentatively (parser); /* Assume that we are looking at a constructor declarator. */ constructor_p = true; /* Look for the optional `::' operator. */ cp_parser_global_scope_opt (parser, /*current_scope_valid_p=*/false); /* Look for the nested-name-specifier. */ nested_name_specifier = (cp_parser_nested_name_specifier_opt (parser, /*typename_keyword_p=*/false, /*check_dependency_p=*/false, /*type_p=*/false, /*is_declaration=*/false)); outside_class_specifier_p = (!at_class_scope_p () || !TYPE_BEING_DEFINED (current_class_type) || friend_p); /* Outside of a class-specifier, there must be a nested-name-specifier. */ if (!nested_name_specifier && outside_class_specifier_p) constructor_p = false; else if (nested_name_specifier == error_mark_node) constructor_p = false; /* If we have a class scope, this is easy; DR 147 says that S::S always names the constructor, and no other qualified name could. */ if (constructor_p && nested_name_specifier && CLASS_TYPE_P (nested_name_specifier)) { tree id = cp_parser_unqualified_id (parser, /*template_keyword_p=*/false, /*check_dependency_p=*/false, /*declarator_p=*/true, /*optional_p=*/false); if (is_overloaded_fn (id)) id = DECL_NAME (get_first_fn (id)); if (!constructor_name_p (id, nested_name_specifier)) constructor_p = false; } /* If we still think that this might be a constructor-declarator, look for a class-name. */ else if (constructor_p) { /* If we have: template <typename T> struct S { S(); }; we must recognize that the nested `S' names a class. */ tree type_decl; type_decl = cp_parser_class_name (parser, /*typename_keyword_p=*/false, /*template_keyword_p=*/false, none_type, /*check_dependency_p=*/false, /*class_head_p=*/false, /*is_declaration=*/false); /* If there was no class-name, then this is not a constructor. Otherwise, if we are in a class-specifier and we aren't handling a friend declaration, check that its type matches current_class_type (c++/38313). Note: error_mark_node is left alone for error recovery purposes. */ constructor_p = (!cp_parser_error_occurred (parser) && (outside_class_specifier_p || type_decl == error_mark_node || same_type_p (current_class_type, TREE_TYPE (type_decl)))); /* If we're still considering a constructor, we have to see a `(', to begin the parameter-declaration-clause, followed by either a `)', an `...', or a decl-specifier. We need to check for a type-specifier to avoid being fooled into thinking that: S (f) (int); is a constructor. (It is actually a function named `f' that takes one parameter (of type `int') and returns a value of type `S'. */ if (constructor_p && !cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) constructor_p = false; if (constructor_p && cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN) && cp_lexer_next_token_is_not (parser->lexer, CPP_ELLIPSIS) /* A parameter declaration begins with a decl-specifier, which is either the "attribute" keyword, a storage class specifier, or (usually) a type-specifier. */ && !cp_lexer_next_token_is_decl_specifier_keyword (parser->lexer)) { tree type; tree pushed_scope = NULL_TREE; unsigned saved_num_template_parameter_lists; /* Names appearing in the type-specifier should be looked up in the scope of the class. */ if (current_class_type) type = NULL_TREE; else { type = TREE_TYPE (type_decl); if (TREE_CODE (type) == TYPENAME_TYPE) { type = resolve_typename_type (type, /*only_current_p=*/false); if (TREE_CODE (type) == TYPENAME_TYPE) { cp_parser_abort_tentative_parse (parser); return false; } } pushed_scope = push_scope (type); } /* Inside the constructor parameter list, surrounding template-parameter-lists do not apply. */ saved_num_template_parameter_lists = parser->num_template_parameter_lists; parser->num_template_parameter_lists = 0; /* Look for the type-specifier. */ cp_parser_type_specifier (parser, CP_PARSER_FLAGS_NONE, /*decl_specs=*/NULL, /*is_declarator=*/true, /*declares_class_or_enum=*/NULL, /*is_cv_qualifier=*/NULL); parser->num_template_parameter_lists = saved_num_template_parameter_lists; /* Leave the scope of the class. */ if (pushed_scope) pop_scope (pushed_scope); constructor_p = !cp_parser_error_occurred (parser); } } /* We did not really want to consume any tokens. */ cp_parser_abort_tentative_parse (parser); return constructor_p; } /* Parse the definition of the function given by the DECL_SPECIFIERS, ATTRIBUTES, and DECLARATOR. The access checks have been deferred; they must be performed once we are in the scope of the function. Returns the function defined. */ static tree cp_parser_function_definition_from_specifiers_and_declarator (cp_parser* parser, cp_decl_specifier_seq *decl_specifiers, tree attributes, const cp_declarator *declarator) { tree fn; bool success_p; /* Begin the function-definition. */ success_p = start_function (decl_specifiers, declarator, attributes); /* The things we're about to see are not directly qualified by any template headers we've seen thus far. */ reset_specialization (); /* If there were names looked up in the decl-specifier-seq that we did not check, check them now. We must wait until we are in the scope of the function to perform the checks, since the function might be a friend. */ perform_deferred_access_checks (tf_warning_or_error); if (success_p) { cp_finalize_omp_declare_simd (parser, current_function_decl); parser->omp_declare_simd = NULL; } if (!success_p) { /* Skip the entire function. */ cp_parser_skip_to_end_of_block_or_statement (parser); fn = error_mark_node; } else if (DECL_INITIAL (current_function_decl) != error_mark_node) { /* Seen already, skip it. An error message has already been output. */ cp_parser_skip_to_end_of_block_or_statement (parser); fn = current_function_decl; current_function_decl = NULL_TREE; /* If this is a function from a class, pop the nested class. */ if (current_class_name) pop_nested_class (); } else { timevar_id_t tv; if (DECL_DECLARED_INLINE_P (current_function_decl)) tv = TV_PARSE_INLINE; else tv = TV_PARSE_FUNC; timevar_push (tv); fn = cp_parser_function_definition_after_declarator (parser, /*inline_p=*/false); timevar_pop (tv); } return fn; } /* Parse the part of a function-definition that follows the declarator. INLINE_P is TRUE iff this function is an inline function defined within a class-specifier. Returns the function defined. */ static tree cp_parser_function_definition_after_declarator (cp_parser* parser, bool inline_p) { tree fn; bool ctor_initializer_p = false; bool saved_in_unbraced_linkage_specification_p; bool saved_in_function_body; unsigned saved_num_template_parameter_lists; cp_token *token; bool fully_implicit_function_template_p = parser->fully_implicit_function_template_p; parser->fully_implicit_function_template_p = false; tree implicit_template_parms = parser->implicit_template_parms; parser->implicit_template_parms = 0; cp_binding_level* implicit_template_scope = parser->implicit_template_scope; parser->implicit_template_scope = 0; saved_in_function_body = parser->in_function_body; parser->in_function_body = true; /* If the next token is `return', then the code may be trying to make use of the "named return value" extension that G++ used to support. */ token = cp_lexer_peek_token (parser->lexer); if (cp_lexer_next_token_is_keyword (parser->lexer, RID_RETURN)) { /* Consume the `return' keyword. */ cp_lexer_consume_token (parser->lexer); /* Look for the identifier that indicates what value is to be returned. */ cp_parser_identifier (parser); /* Issue an error message. */ error_at (token->location, "named return values are no longer supported"); /* Skip tokens until we reach the start of the function body. */ while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_OPEN_BRACE || token->type == CPP_EOF || token->type == CPP_PRAGMA_EOL) break; cp_lexer_consume_token (parser->lexer); } } /* The `extern' in `extern "C" void f () { ... }' does not apply to anything declared inside `f'. */ saved_in_unbraced_linkage_specification_p = parser->in_unbraced_linkage_specification_p; parser->in_unbraced_linkage_specification_p = false; /* Inside the function, surrounding template-parameter-lists do not apply. */ saved_num_template_parameter_lists = parser->num_template_parameter_lists; parser->num_template_parameter_lists = 0; start_lambda_scope (current_function_decl); /* If the next token is `try', `__transaction_atomic', or `__transaction_relaxed`, then we are looking at either function-try-block or function-transaction-block. Note that all of these include the function-body. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRANSACTION_ATOMIC)) ctor_initializer_p = cp_parser_function_transaction (parser, RID_TRANSACTION_ATOMIC); else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRANSACTION_RELAXED)) ctor_initializer_p = cp_parser_function_transaction (parser, RID_TRANSACTION_RELAXED); else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY)) ctor_initializer_p = cp_parser_function_try_block (parser); else ctor_initializer_p = cp_parser_ctor_initializer_opt_and_function_body (parser, /*in_function_try_block=*/false); finish_lambda_scope (); /* Finish the function. */ fn = finish_function ((ctor_initializer_p ? 1 : 0) | (inline_p ? 2 : 0)); /* Generate code for it, if necessary. */ expand_or_defer_fn (fn); /* Restore the saved values. */ parser->in_unbraced_linkage_specification_p = saved_in_unbraced_linkage_specification_p; parser->num_template_parameter_lists = saved_num_template_parameter_lists; parser->in_function_body = saved_in_function_body; parser->fully_implicit_function_template_p = fully_implicit_function_template_p; parser->implicit_template_parms = implicit_template_parms; parser->implicit_template_scope = implicit_template_scope; if (parser->fully_implicit_function_template_p) finish_fully_implicit_template (parser, /*member_decl_opt=*/0); return fn; } /* Parse a template-declaration, assuming that the `export' (and `extern') keywords, if present, has already been scanned. MEMBER_P is as for cp_parser_template_declaration. */ static void cp_parser_template_declaration_after_export (cp_parser* parser, bool member_p) { tree decl = NULL_TREE; vec<deferred_access_check, va_gc> *checks; tree parameter_list; bool friend_p = false; bool need_lang_pop; cp_token *token; /* Look for the `template' keyword. */ token = cp_lexer_peek_token (parser->lexer); if (!cp_parser_require_keyword (parser, RID_TEMPLATE, RT_TEMPLATE)) return; /* And the `<'. */ if (!cp_parser_require (parser, CPP_LESS, RT_LESS)) return; if (at_class_scope_p () && current_function_decl) { /* 14.5.2.2 [temp.mem] A local class shall not have member templates. */ error_at (token->location, "invalid declaration of member template in local class"); cp_parser_skip_to_end_of_block_or_statement (parser); return; } /* [temp] A template ... shall not have C linkage. */ if (current_lang_name == lang_name_c) { error_at (token->location, "template with C linkage"); /* Give it C++ linkage to avoid confusing other parts of the front end. */ push_lang_context (lang_name_cplusplus); need_lang_pop = true; } else need_lang_pop = false; /* We cannot perform access checks on the template parameter declarations until we know what is being declared, just as we cannot check the decl-specifier list. */ push_deferring_access_checks (dk_deferred); /* If the next token is `>', then we have an invalid specialization. Rather than complain about an invalid template parameter, issue an error message here. */ if (cp_lexer_next_token_is (parser->lexer, CPP_GREATER)) { cp_parser_error (parser, "invalid explicit specialization"); begin_specialization (); parameter_list = NULL_TREE; } else { /* Parse the template parameters. */ parameter_list = cp_parser_template_parameter_list (parser); } /* Get the deferred access checks from the parameter list. These will be checked once we know what is being declared, as for a member template the checks must be performed in the scope of the class containing the member. */ checks = get_deferred_access_checks (); /* Look for the `>'. */ cp_parser_skip_to_end_of_template_parameter_list (parser); /* We just processed one more parameter list. */ ++parser->num_template_parameter_lists; /* If the next token is `template', there are more template parameters. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) cp_parser_template_declaration_after_export (parser, member_p); else if (cxx_dialect >= cxx11 && cp_lexer_next_token_is_keyword (parser->lexer, RID_USING)) decl = cp_parser_alias_declaration (parser); else { /* There are no access checks when parsing a template, as we do not know if a specialization will be a friend. */ push_deferring_access_checks (dk_no_check); token = cp_lexer_peek_token (parser->lexer); decl = cp_parser_single_declaration (parser, checks, member_p, /*explicit_specialization_p=*/false, &friend_p); pop_deferring_access_checks (); /* If this is a member template declaration, let the front end know. */ if (member_p && !friend_p && decl) { if (TREE_CODE (decl) == TYPE_DECL) cp_parser_check_access_in_redeclaration (decl, token->location); decl = finish_member_template_decl (decl); } else if (friend_p && decl && DECL_DECLARES_TYPE_P (decl)) make_friend_class (current_class_type, TREE_TYPE (decl), /*complain=*/true); } /* We are done with the current parameter list. */ --parser->num_template_parameter_lists; pop_deferring_access_checks (); /* Finish up. */ finish_template_decl (parameter_list); /* Check the template arguments for a literal operator template. */ if (decl && DECL_DECLARES_FUNCTION_P (decl) && UDLIT_OPER_P (DECL_NAME (decl))) { bool ok = true; if (parameter_list == NULL_TREE) ok = false; else { int num_parms = TREE_VEC_LENGTH (parameter_list); if (num_parms == 1) { tree parm_list = TREE_VEC_ELT (parameter_list, 0); tree parm = INNERMOST_TEMPLATE_PARMS (parm_list); if (TREE_TYPE (parm) != char_type_node || !TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))) ok = false; } else if (num_parms == 2 && cxx_dialect >= cxx14) { tree parm_type = TREE_VEC_ELT (parameter_list, 0); tree type = INNERMOST_TEMPLATE_PARMS (parm_type); tree parm_list = TREE_VEC_ELT (parameter_list, 1); tree parm = INNERMOST_TEMPLATE_PARMS (parm_list); if (TREE_TYPE (parm) != TREE_TYPE (type) || !TEMPLATE_PARM_PARAMETER_PACK (DECL_INITIAL (parm))) ok = false; } else ok = false; } if (!ok) { if (cxx_dialect >= cxx14) error ("literal operator template %qD has invalid parameter list." " Expected non-type template argument pack <char...>" " or <typename CharT, CharT...>", decl); else error ("literal operator template %qD has invalid parameter list." " Expected non-type template argument pack <char...>", decl); } } /* Register member declarations. */ if (member_p && !friend_p && decl && !DECL_CLASS_TEMPLATE_P (decl)) finish_member_declaration (decl); /* For the erroneous case of a template with C linkage, we pushed an implicit C++ linkage scope; exit that scope now. */ if (need_lang_pop) pop_lang_context (); /* If DECL is a function template, we must return to parse it later. (Even though there is no definition, there might be default arguments that need handling.) */ if (member_p && decl && DECL_DECLARES_FUNCTION_P (decl)) vec_safe_push (unparsed_funs_with_definitions, decl); } /* Perform the deferred access checks from a template-parameter-list. CHECKS is a TREE_LIST of access checks, as returned by get_deferred_access_checks. */ static void cp_parser_perform_template_parameter_access_checks (vec<deferred_access_check, va_gc> *checks) { ++processing_template_parmlist; perform_access_checks (checks, tf_warning_or_error); --processing_template_parmlist; } /* Parse a `decl-specifier-seq [opt] init-declarator [opt] ;' or `function-definition' sequence that follows a template header. If MEMBER_P is true, this declaration appears in a class scope. Returns the DECL for the declared entity. If FRIEND_P is non-NULL, *FRIEND_P is set to TRUE iff the declaration is a friend. */ static tree cp_parser_single_declaration (cp_parser* parser, vec<deferred_access_check, va_gc> *checks, bool member_p, bool explicit_specialization_p, bool* friend_p) { int declares_class_or_enum; tree decl = NULL_TREE; cp_decl_specifier_seq decl_specifiers; bool function_definition_p = false; cp_token *decl_spec_token_start; /* This function is only used when processing a template declaration. */ gcc_assert (innermost_scope_kind () == sk_template_parms || innermost_scope_kind () == sk_template_spec); /* Defer access checks until we know what is being declared. */ push_deferring_access_checks (dk_deferred); /* Try the `decl-specifier-seq [opt] init-declarator [opt]' alternative. */ decl_spec_token_start = cp_lexer_peek_token (parser->lexer); cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &decl_specifiers, &declares_class_or_enum); if (friend_p) *friend_p = cp_parser_friend_p (&decl_specifiers); /* There are no template typedefs. */ if (decl_spec_seq_has_spec_p (&decl_specifiers, ds_typedef)) { error_at (decl_spec_token_start->location, "template declaration of %<typedef%>"); decl = error_mark_node; } /* Gather up the access checks that occurred the decl-specifier-seq. */ stop_deferring_access_checks (); /* Check for the declaration of a template class. */ if (declares_class_or_enum) { if (cp_parser_declares_only_class_p (parser)) { decl = shadow_tag (&decl_specifiers); /* In this case: struct C { friend template <typename T> struct A<T>::B; }; A<T>::B will be represented by a TYPENAME_TYPE, and therefore not recognized by shadow_tag. */ if (friend_p && *friend_p && !decl && decl_specifiers.type && TYPE_P (decl_specifiers.type)) decl = decl_specifiers.type; if (decl && decl != error_mark_node) decl = TYPE_NAME (decl); else decl = error_mark_node; /* Perform access checks for template parameters. */ cp_parser_perform_template_parameter_access_checks (checks); } } /* Complain about missing 'typename' or other invalid type names. */ if (!decl_specifiers.any_type_specifiers_p && cp_parser_parse_and_diagnose_invalid_type_name (parser)) { /* cp_parser_parse_and_diagnose_invalid_type_name calls cp_parser_skip_to_end_of_block_or_statement, so don't try to parse the rest of this declaration. */ decl = error_mark_node; goto out; } /* If it's not a template class, try for a template function. If the next token is a `;', then this declaration does not declare anything. But, if there were errors in the decl-specifiers, then the error might well have come from an attempted class-specifier. In that case, there's no need to warn about a missing declarator. */ if (!decl && (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON) || decl_specifiers.type != error_mark_node)) { decl = cp_parser_init_declarator (parser, &decl_specifiers, checks, /*function_definition_allowed_p=*/true, member_p, declares_class_or_enum, &function_definition_p, NULL, NULL); /* 7.1.1-1 [dcl.stc] A storage-class-specifier shall not be specified in an explicit specialization... */ if (decl && explicit_specialization_p && decl_specifiers.storage_class != sc_none) { error_at (decl_spec_token_start->location, "explicit template specialization cannot have a storage class"); decl = error_mark_node; } if (decl && VAR_P (decl)) check_template_variable (decl); } /* Look for a trailing `;' after the declaration. */ if (!function_definition_p && (decl == error_mark_node || !cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON))) cp_parser_skip_to_end_of_block_or_statement (parser); out: pop_deferring_access_checks (); /* Clear any current qualification; whatever comes next is the start of something new. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; return decl; } /* Parse a cast-expression that is not the operand of a unary "&". */ static tree cp_parser_simple_cast_expression (cp_parser *parser) { return cp_parser_cast_expression (parser, /*address_p=*/false, /*cast_p=*/false, /*decltype*/false, NULL); } /* Parse a functional cast to TYPE. Returns an expression representing the cast. */ static tree cp_parser_functional_cast (cp_parser* parser, tree type) { vec<tree, va_gc> *vec; tree expression_list; tree cast; bool nonconst_p; if (!type) type = error_mark_node; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { cp_lexer_set_source_position (parser->lexer); maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); expression_list = cp_parser_braced_list (parser, &nonconst_p); CONSTRUCTOR_IS_DIRECT_INIT (expression_list) = 1; if (TREE_CODE (type) == TYPE_DECL) type = TREE_TYPE (type); return finish_compound_literal (type, expression_list, tf_warning_or_error); } vec = cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/true, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL); if (vec == NULL) expression_list = error_mark_node; else { expression_list = build_tree_list_vec (vec); release_tree_vector (vec); } cast = build_functional_cast (type, expression_list, tf_warning_or_error); /* [expr.const]/1: In an integral constant expression "only type conversions to integral or enumeration type can be used". */ if (TREE_CODE (type) == TYPE_DECL) type = TREE_TYPE (type); if (cast != error_mark_node && !cast_valid_in_integral_constant_expression_p (type) && cp_parser_non_integral_constant_expression (parser, NIC_CONSTRUCTOR)) return error_mark_node; return cast; } /* Save the tokens that make up the body of a member function defined in a class-specifier. The DECL_SPECIFIERS and DECLARATOR have already been parsed. The ATTRIBUTES are any GNU "__attribute__" specifiers applied to the declaration. Returns the FUNCTION_DECL for the member function. */ static tree cp_parser_save_member_function_body (cp_parser* parser, cp_decl_specifier_seq *decl_specifiers, cp_declarator *declarator, tree attributes) { cp_token *first; cp_token *last; tree fn; /* Create the FUNCTION_DECL. */ fn = grokmethod (decl_specifiers, declarator, attributes); cp_finalize_omp_declare_simd (parser, fn); /* If something went badly wrong, bail out now. */ if (fn == error_mark_node) { /* If there's a function-body, skip it. */ if (cp_parser_token_starts_function_definition_p (cp_lexer_peek_token (parser->lexer))) cp_parser_skip_to_end_of_block_or_statement (parser); return error_mark_node; } /* Remember it, if there default args to post process. */ cp_parser_save_default_args (parser, fn); /* Save away the tokens that make up the body of the function. */ first = parser->lexer->next_token; /* Handle function try blocks. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY)) cp_lexer_consume_token (parser->lexer); /* We can have braced-init-list mem-initializers before the fn body. */ if (cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { cp_lexer_consume_token (parser->lexer); while (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_BRACE)) { /* cache_group will stop after an un-nested { } pair, too. */ if (cp_parser_cache_group (parser, CPP_CLOSE_PAREN, /*depth=*/0)) break; /* variadic mem-inits have ... after the ')'. */ if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) cp_lexer_consume_token (parser->lexer); } } cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0); /* Handle function try blocks. */ while (cp_lexer_next_token_is_keyword (parser->lexer, RID_CATCH)) cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0); last = parser->lexer->next_token; /* Save away the inline definition; we will process it when the class is complete. */ DECL_PENDING_INLINE_INFO (fn) = cp_token_cache_new (first, last); DECL_PENDING_INLINE_P (fn) = 1; /* We need to know that this was defined in the class, so that friend templates are handled correctly. */ DECL_INITIALIZED_IN_CLASS_P (fn) = 1; /* Add FN to the queue of functions to be parsed later. */ vec_safe_push (unparsed_funs_with_definitions, fn); return fn; } /* Save the tokens that make up the in-class initializer for a non-static data member. Returns a DEFAULT_ARG. */ static tree cp_parser_save_nsdmi (cp_parser* parser) { return cp_parser_cache_defarg (parser, /*nsdmi=*/true); } /* Parse a template-argument-list, as well as the trailing ">" (but not the opening "<"). See cp_parser_template_argument_list for the return value. */ static tree cp_parser_enclosed_template_argument_list (cp_parser* parser) { tree arguments; tree saved_scope; tree saved_qualifying_scope; tree saved_object_scope; bool saved_greater_than_is_operator_p; int saved_unevaluated_operand; int saved_inhibit_evaluation_warnings; /* [temp.names] When parsing a template-id, the first non-nested `>' is taken as the end of the template-argument-list rather than a greater-than operator. */ saved_greater_than_is_operator_p = parser->greater_than_is_operator_p; parser->greater_than_is_operator_p = false; /* Parsing the argument list may modify SCOPE, so we save it here. */ saved_scope = parser->scope; saved_qualifying_scope = parser->qualifying_scope; saved_object_scope = parser->object_scope; /* We need to evaluate the template arguments, even though this template-id may be nested within a "sizeof". */ saved_unevaluated_operand = cp_unevaluated_operand; cp_unevaluated_operand = 0; saved_inhibit_evaluation_warnings = c_inhibit_evaluation_warnings; c_inhibit_evaluation_warnings = 0; /* Parse the template-argument-list itself. */ if (cp_lexer_next_token_is (parser->lexer, CPP_GREATER) || cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT)) arguments = NULL_TREE; else arguments = cp_parser_template_argument_list (parser); /* Look for the `>' that ends the template-argument-list. If we find a '>>' instead, it's probably just a typo. */ if (cp_lexer_next_token_is (parser->lexer, CPP_RSHIFT)) { if (cxx_dialect != cxx98) { /* In C++0x, a `>>' in a template argument list or cast expression is considered to be two separate `>' tokens. So, change the current token to a `>', but don't consume it: it will be consumed later when the outer template argument list (or cast expression) is parsed. Note that this replacement of `>' for `>>' is necessary even if we are parsing tentatively: in the tentative case, after calling cp_parser_enclosed_template_argument_list we will always throw away all of the template arguments and the first closing `>', either because the template argument list was erroneous or because we are replacing those tokens with a CPP_TEMPLATE_ID token. The second `>' (which will not have been thrown away) is needed either to close an outer template argument list or to complete a new-style cast. */ cp_token *token = cp_lexer_peek_token (parser->lexer); token->type = CPP_GREATER; } else if (!saved_greater_than_is_operator_p) { /* If we're in a nested template argument list, the '>>' has to be a typo for '> >'. We emit the error message, but we continue parsing and we push a '>' as next token, so that the argument list will be parsed correctly. Note that the global source location is still on the token before the '>>', so we need to say explicitly where we want it. */ cp_token *token = cp_lexer_peek_token (parser->lexer); error_at (token->location, "%<>>%> should be %<> >%> " "within a nested template argument list"); token->type = CPP_GREATER; } else { /* If this is not a nested template argument list, the '>>' is a typo for '>'. Emit an error message and continue. Same deal about the token location, but here we can get it right by consuming the '>>' before issuing the diagnostic. */ cp_token *token = cp_lexer_consume_token (parser->lexer); error_at (token->location, "spurious %<>>%>, use %<>%> to terminate " "a template argument list"); } } else cp_parser_skip_to_end_of_template_parameter_list (parser); /* The `>' token might be a greater-than operator again now. */ parser->greater_than_is_operator_p = saved_greater_than_is_operator_p; /* Restore the SAVED_SCOPE. */ parser->scope = saved_scope; parser->qualifying_scope = saved_qualifying_scope; parser->object_scope = saved_object_scope; cp_unevaluated_operand = saved_unevaluated_operand; c_inhibit_evaluation_warnings = saved_inhibit_evaluation_warnings; return arguments; } /* MEMBER_FUNCTION is a member function, or a friend. If default arguments, or the body of the function have not yet been parsed, parse them now. */ static void cp_parser_late_parsing_for_member (cp_parser* parser, tree member_function) { timevar_push (TV_PARSE_INMETH); /* If this member is a template, get the underlying FUNCTION_DECL. */ if (DECL_FUNCTION_TEMPLATE_P (member_function)) member_function = DECL_TEMPLATE_RESULT (member_function); /* There should not be any class definitions in progress at this point; the bodies of members are only parsed outside of all class definitions. */ gcc_assert (parser->num_classes_being_defined == 0); /* While we're parsing the member functions we might encounter more classes. We want to handle them right away, but we don't want them getting mixed up with functions that are currently in the queue. */ push_unparsed_function_queues (parser); /* Make sure that any template parameters are in scope. */ maybe_begin_member_template_processing (member_function); /* If the body of the function has not yet been parsed, parse it now. */ if (DECL_PENDING_INLINE_P (member_function)) { tree function_scope; cp_token_cache *tokens; /* The function is no longer pending; we are processing it. */ tokens = DECL_PENDING_INLINE_INFO (member_function); DECL_PENDING_INLINE_INFO (member_function) = NULL; DECL_PENDING_INLINE_P (member_function) = 0; /* If this is a local class, enter the scope of the containing function. */ function_scope = current_function_decl; if (function_scope) push_function_context (); /* Push the body of the function onto the lexer stack. */ cp_parser_push_lexer_for_tokens (parser, tokens); /* Let the front end know that we going to be defining this function. */ start_preparsed_function (member_function, NULL_TREE, SF_PRE_PARSED | SF_INCLASS_INLINE); /* Don't do access checking if it is a templated function. */ if (processing_template_decl) push_deferring_access_checks (dk_no_check); /* #pragma omp declare reduction needs special parsing. */ if (DECL_OMP_DECLARE_REDUCTION_P (member_function)) { parser->lexer->in_pragma = true; cp_parser_omp_declare_reduction_exprs (member_function, parser); finish_function (/*inline*/2); cp_check_omp_declare_reduction (member_function); } else /* Now, parse the body of the function. */ cp_parser_function_definition_after_declarator (parser, /*inline_p=*/true); if (processing_template_decl) pop_deferring_access_checks (); /* Leave the scope of the containing function. */ if (function_scope) pop_function_context (); cp_parser_pop_lexer (parser); } /* Remove any template parameters from the symbol table. */ maybe_end_member_template_processing (); /* Restore the queue. */ pop_unparsed_function_queues (parser); timevar_pop (TV_PARSE_INMETH); } /* If DECL contains any default args, remember it on the unparsed functions queue. */ static void cp_parser_save_default_args (cp_parser* parser, tree decl) { tree probe; for (probe = TYPE_ARG_TYPES (TREE_TYPE (decl)); probe; probe = TREE_CHAIN (probe)) if (TREE_PURPOSE (probe)) { cp_default_arg_entry entry = {current_class_type, decl}; vec_safe_push (unparsed_funs_with_default_args, entry); break; } } /* DEFAULT_ARG contains the saved tokens for the initializer of DECL, which is either a FIELD_DECL or PARM_DECL. Parse it and return the result. For a PARM_DECL, PARMTYPE is the corresponding type from the parameter-type-list. */ static tree cp_parser_late_parse_one_default_arg (cp_parser *parser, tree decl, tree default_arg, tree parmtype) { cp_token_cache *tokens; tree parsed_arg; bool dummy; if (default_arg == error_mark_node) return error_mark_node; /* Push the saved tokens for the default argument onto the parser's lexer stack. */ tokens = DEFARG_TOKENS (default_arg); cp_parser_push_lexer_for_tokens (parser, tokens); start_lambda_scope (decl); /* Parse the default argument. */ parsed_arg = cp_parser_initializer (parser, &dummy, &dummy); if (BRACE_ENCLOSED_INITIALIZER_P (parsed_arg)) maybe_warn_cpp0x (CPP0X_INITIALIZER_LISTS); finish_lambda_scope (); if (parsed_arg == error_mark_node) cp_parser_skip_to_end_of_statement (parser); if (!processing_template_decl) { /* In a non-template class, check conversions now. In a template, we'll wait and instantiate these as needed. */ if (TREE_CODE (decl) == PARM_DECL) parsed_arg = check_default_argument (parmtype, parsed_arg, tf_warning_or_error); else parsed_arg = digest_nsdmi_init (decl, parsed_arg); } /* If the token stream has not been completely used up, then there was extra junk after the end of the default argument. */ if (!cp_lexer_next_token_is (parser->lexer, CPP_EOF)) { if (TREE_CODE (decl) == PARM_DECL) cp_parser_error (parser, "expected %<,%>"); else cp_parser_error (parser, "expected %<;%>"); } /* Revert to the main lexer. */ cp_parser_pop_lexer (parser); return parsed_arg; } /* FIELD is a non-static data member with an initializer which we saved for later; parse it now. */ static void cp_parser_late_parsing_nsdmi (cp_parser *parser, tree field) { tree def; maybe_begin_member_template_processing (field); push_unparsed_function_queues (parser); def = cp_parser_late_parse_one_default_arg (parser, field, DECL_INITIAL (field), NULL_TREE); pop_unparsed_function_queues (parser); maybe_end_member_template_processing (); DECL_INITIAL (field) = def; } /* FN is a FUNCTION_DECL which may contains a parameter with an unparsed DEFAULT_ARG. Parse the default args now. This function assumes that the current scope is the scope in which the default argument should be processed. */ static void cp_parser_late_parsing_default_args (cp_parser *parser, tree fn) { bool saved_local_variables_forbidden_p; tree parm, parmdecl; /* While we're parsing the default args, we might (due to the statement expression extension) encounter more classes. We want to handle them right away, but we don't want them getting mixed up with default args that are currently in the queue. */ push_unparsed_function_queues (parser); /* Local variable names (and the `this' keyword) may not appear in a default argument. */ saved_local_variables_forbidden_p = parser->local_variables_forbidden_p; parser->local_variables_forbidden_p = true; push_defarg_context (fn); for (parm = TYPE_ARG_TYPES (TREE_TYPE (fn)), parmdecl = DECL_ARGUMENTS (fn); parm && parm != void_list_node; parm = TREE_CHAIN (parm), parmdecl = DECL_CHAIN (parmdecl)) { tree default_arg = TREE_PURPOSE (parm); tree parsed_arg; vec<tree, va_gc> *insts; tree copy; unsigned ix; if (!default_arg) continue; if (TREE_CODE (default_arg) != DEFAULT_ARG) /* This can happen for a friend declaration for a function already declared with default arguments. */ continue; parsed_arg = cp_parser_late_parse_one_default_arg (parser, parmdecl, default_arg, TREE_VALUE (parm)); if (parsed_arg == error_mark_node) { continue; } TREE_PURPOSE (parm) = parsed_arg; /* Update any instantiations we've already created. */ for (insts = DEFARG_INSTANTIATIONS (default_arg), ix = 0; vec_safe_iterate (insts, ix, &copy); ix++) TREE_PURPOSE (copy) = parsed_arg; } pop_defarg_context (); /* Make sure no default arg is missing. */ check_default_args (fn); /* Restore the state of local_variables_forbidden_p. */ parser->local_variables_forbidden_p = saved_local_variables_forbidden_p; /* Restore the queue. */ pop_unparsed_function_queues (parser); } /* Subroutine of cp_parser_sizeof_operand, for handling C++11 sizeof ... ( identifier ) where the 'sizeof' token has already been consumed. */ static tree cp_parser_sizeof_pack (cp_parser *parser) { /* Consume the `...'. */ cp_lexer_consume_token (parser->lexer); maybe_warn_variadic_templates (); bool paren = cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN); if (paren) cp_lexer_consume_token (parser->lexer); else permerror (cp_lexer_peek_token (parser->lexer)->location, "%<sizeof...%> argument must be surrounded by parentheses"); cp_token *token = cp_lexer_peek_token (parser->lexer); tree name = cp_parser_identifier (parser); if (name == error_mark_node) return error_mark_node; /* The name is not qualified. */ parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; tree expr = cp_parser_lookup_name_simple (parser, name, token->location); if (expr == error_mark_node) cp_parser_name_lookup_error (parser, name, expr, NLE_NULL, token->location); if (TREE_CODE (expr) == TYPE_DECL) expr = TREE_TYPE (expr); else if (TREE_CODE (expr) == CONST_DECL) expr = DECL_INITIAL (expr); expr = make_pack_expansion (expr); if (paren) cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return expr; } /* Parse the operand of `sizeof' (or a similar operator). Returns either a TYPE or an expression, depending on the form of the input. The KEYWORD indicates which kind of expression we have encountered. */ static tree cp_parser_sizeof_operand (cp_parser* parser, enum rid keyword) { tree expr = NULL_TREE; const char *saved_message; char *tmp; bool saved_integral_constant_expression_p; bool saved_non_integral_constant_expression_p; /* If it's a `...', then we are computing the length of a parameter pack. */ if (keyword == RID_SIZEOF && cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) return cp_parser_sizeof_pack (parser); /* Types cannot be defined in a `sizeof' expression. Save away the old message. */ saved_message = parser->type_definition_forbidden_message; /* And create the new one. */ tmp = concat ("types may not be defined in %<", IDENTIFIER_POINTER (ridpointers[keyword]), "%> expressions", NULL); parser->type_definition_forbidden_message = tmp; /* The restrictions on constant-expressions do not apply inside sizeof expressions. */ saved_integral_constant_expression_p = parser->integral_constant_expression_p; saved_non_integral_constant_expression_p = parser->non_integral_constant_expression_p; parser->integral_constant_expression_p = false; /* Do not actually evaluate the expression. */ ++cp_unevaluated_operand; ++c_inhibit_evaluation_warnings; /* If it's a `(', then we might be looking at the type-id construction. */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { tree type = NULL_TREE; /* We can't be sure yet whether we're looking at a type-id or an expression. */ cp_parser_parse_tentatively (parser); /* Note: as a GNU Extension, compound literals are considered postfix-expressions as they are in C99, so they are valid arguments to sizeof. See comment in cp_parser_cast_expression for details. */ if (cp_parser_compound_literal_p (parser)) cp_parser_simulate_error (parser); else { bool saved_in_type_id_in_expr_p = parser->in_type_id_in_expr_p; parser->in_type_id_in_expr_p = true; /* Look for the type-id. */ type = cp_parser_type_id (parser); /* Look for the closing `)'. */ cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); parser->in_type_id_in_expr_p = saved_in_type_id_in_expr_p; } /* If all went well, then we're done. */ if (cp_parser_parse_definitely (parser)) { cp_decl_specifier_seq decl_specs; /* Build a trivial decl-specifier-seq. */ clear_decl_specs (&decl_specs); decl_specs.type = type; /* Call grokdeclarator to figure out what type this is. */ expr = grokdeclarator (NULL, &decl_specs, TYPENAME, /*initialized=*/0, /*attrlist=*/NULL); } } /* If the type-id production did not work out, then we must be looking at the unary-expression production. */ if (!expr) expr = cp_parser_unary_expression (parser); /* Go back to evaluating expressions. */ --cp_unevaluated_operand; --c_inhibit_evaluation_warnings; /* Free the message we created. */ free (tmp); /* And restore the old one. */ parser->type_definition_forbidden_message = saved_message; parser->integral_constant_expression_p = saved_integral_constant_expression_p; parser->non_integral_constant_expression_p = saved_non_integral_constant_expression_p; return expr; } /* If the current declaration has no declarator, return true. */ static bool cp_parser_declares_only_class_p (cp_parser *parser) { /* If the next token is a `;' or a `,' then there is no declarator. */ return (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || cp_lexer_next_token_is (parser->lexer, CPP_COMMA)); } /* Update the DECL_SPECS to reflect the storage class indicated by KEYWORD. */ static void cp_parser_set_storage_class (cp_parser *parser, cp_decl_specifier_seq *decl_specs, enum rid keyword, cp_token *token) { cp_storage_class storage_class; if (parser->in_unbraced_linkage_specification_p) { error_at (token->location, "invalid use of %qD in linkage specification", ridpointers[keyword]); return; } else if (decl_specs->storage_class != sc_none) { decl_specs->conflicting_specifiers_p = true; return; } if ((keyword == RID_EXTERN || keyword == RID_STATIC) && decl_spec_seq_has_spec_p (decl_specs, ds_thread) && decl_specs->gnu_thread_keyword_p) { pedwarn (decl_specs->locations[ds_thread], 0, "%<__thread%> before %qD", ridpointers[keyword]); } switch (keyword) { case RID_AUTO: storage_class = sc_auto; break; case RID_REGISTER: storage_class = sc_register; break; case RID_STATIC: storage_class = sc_static; break; case RID_EXTERN: storage_class = sc_extern; break; case RID_MUTABLE: storage_class = sc_mutable; break; default: gcc_unreachable (); } decl_specs->storage_class = storage_class; set_and_check_decl_spec_loc (decl_specs, ds_storage_class, token); /* A storage class specifier cannot be applied alongside a typedef specifier. If there is a typedef specifier present then set conflicting_specifiers_p which will trigger an error later on in grokdeclarator. */ if (decl_spec_seq_has_spec_p (decl_specs, ds_typedef)) decl_specs->conflicting_specifiers_p = true; } /* Update the DECL_SPECS to reflect the TYPE_SPEC. If TYPE_DEFINITION_P is true, the type is a class or enum definition. */ static void cp_parser_set_decl_spec_type (cp_decl_specifier_seq *decl_specs, tree type_spec, cp_token *token, bool type_definition_p) { decl_specs->any_specifiers_p = true; /* If the user tries to redeclare bool, char16_t, char32_t, or wchar_t (with, for example, in "typedef int wchar_t;") we remember that this is what happened. In system headers, we ignore these declarations so that G++ can work with system headers that are not C++-safe. */ if (decl_spec_seq_has_spec_p (decl_specs, ds_typedef) && !type_definition_p && (type_spec == boolean_type_node || type_spec == char16_type_node || type_spec == char32_type_node || type_spec == wchar_type_node) && (decl_specs->type || decl_spec_seq_has_spec_p (decl_specs, ds_long) || decl_spec_seq_has_spec_p (decl_specs, ds_short) || decl_spec_seq_has_spec_p (decl_specs, ds_unsigned) || decl_spec_seq_has_spec_p (decl_specs, ds_signed))) { decl_specs->redefined_builtin_type = type_spec; set_and_check_decl_spec_loc (decl_specs, ds_redefined_builtin_type_spec, token); if (!decl_specs->type) { decl_specs->type = type_spec; decl_specs->type_definition_p = false; set_and_check_decl_spec_loc (decl_specs,ds_type_spec, token); } } else if (decl_specs->type) decl_specs->multiple_types_p = true; else { decl_specs->type = type_spec; decl_specs->type_definition_p = type_definition_p; decl_specs->redefined_builtin_type = NULL_TREE; set_and_check_decl_spec_loc (decl_specs, ds_type_spec, token); } } /* True iff TOKEN is the GNU keyword __thread. */ static bool token_is__thread (cp_token *token) { gcc_assert (token->keyword == RID_THREAD); return !strcmp (IDENTIFIER_POINTER (token->u.value), "__thread"); } /* Set the location for a declarator specifier and check if it is duplicated. DECL_SPECS is the sequence of declarator specifiers onto which to set the location. DS is the single declarator specifier to set which location is to be set onto the existing sequence of declarators. LOCATION is the location for the declarator specifier to consider. */ static void set_and_check_decl_spec_loc (cp_decl_specifier_seq *decl_specs, cp_decl_spec ds, cp_token *token) { gcc_assert (ds < ds_last); if (decl_specs == NULL) return; source_location location = token->location; if (decl_specs->locations[ds] == 0) { decl_specs->locations[ds] = location; if (ds == ds_thread) decl_specs->gnu_thread_keyword_p = token_is__thread (token); } else { if (ds == ds_long) { if (decl_specs->locations[ds_long_long] != 0) error_at (location, "%<long long long%> is too long for GCC"); else { decl_specs->locations[ds_long_long] = location; pedwarn_cxx98 (location, OPT_Wlong_long, "ISO C++ 1998 does not support %<long long%>"); } } else if (ds == ds_thread) { bool gnu = token_is__thread (token); if (gnu != decl_specs->gnu_thread_keyword_p) error_at (location, "both %<__thread%> and %<thread_local%> specified"); else error_at (location, "duplicate %qD", token->u.value); } else { static const char *const decl_spec_names[] = { "signed", "unsigned", "short", "long", "const", "volatile", "restrict", "inline", "virtual", "explicit", "friend", "typedef", "using", "constexpr", "__complex" }; error_at (location, "duplicate %qs", decl_spec_names[ds]); } } } /* Return true iff the declarator specifier DS is present in the sequence of declarator specifiers DECL_SPECS. */ bool decl_spec_seq_has_spec_p (const cp_decl_specifier_seq * decl_specs, cp_decl_spec ds) { gcc_assert (ds < ds_last); if (decl_specs == NULL) return false; return decl_specs->locations[ds] != 0; } /* DECL_SPECIFIERS is the representation of a decl-specifier-seq. Returns TRUE iff `friend' appears among the DECL_SPECIFIERS. */ static bool cp_parser_friend_p (const cp_decl_specifier_seq *decl_specifiers) { return decl_spec_seq_has_spec_p (decl_specifiers, ds_friend); } /* Issue an error message indicating that TOKEN_DESC was expected. If KEYWORD is true, it indicated this function is called by cp_parser_require_keword and the required token can only be a indicated keyword. */ static void cp_parser_required_error (cp_parser *parser, required_token token_desc, bool keyword) { switch (token_desc) { case RT_NEW: cp_parser_error (parser, "expected %<new%>"); return; case RT_DELETE: cp_parser_error (parser, "expected %<delete%>"); return; case RT_RETURN: cp_parser_error (parser, "expected %<return%>"); return; case RT_WHILE: cp_parser_error (parser, "expected %<while%>"); return; case RT_EXTERN: cp_parser_error (parser, "expected %<extern%>"); return; case RT_STATIC_ASSERT: cp_parser_error (parser, "expected %<static_assert%>"); return; case RT_DECLTYPE: cp_parser_error (parser, "expected %<decltype%>"); return; case RT_OPERATOR: cp_parser_error (parser, "expected %<operator%>"); return; case RT_CLASS: cp_parser_error (parser, "expected %<class%>"); return; case RT_TEMPLATE: cp_parser_error (parser, "expected %<template%>"); return; case RT_NAMESPACE: cp_parser_error (parser, "expected %<namespace%>"); return; case RT_USING: cp_parser_error (parser, "expected %<using%>"); return; case RT_ASM: cp_parser_error (parser, "expected %<asm%>"); return; case RT_TRY: cp_parser_error (parser, "expected %<try%>"); return; case RT_CATCH: cp_parser_error (parser, "expected %<catch%>"); return; case RT_THROW: cp_parser_error (parser, "expected %<throw%>"); return; case RT_LABEL: cp_parser_error (parser, "expected %<__label__%>"); return; case RT_AT_TRY: cp_parser_error (parser, "expected %<@try%>"); return; case RT_AT_SYNCHRONIZED: cp_parser_error (parser, "expected %<@synchronized%>"); return; case RT_AT_THROW: cp_parser_error (parser, "expected %<@throw%>"); return; case RT_TRANSACTION_ATOMIC: cp_parser_error (parser, "expected %<__transaction_atomic%>"); return; case RT_TRANSACTION_RELAXED: cp_parser_error (parser, "expected %<__transaction_relaxed%>"); return; default: break; } if (!keyword) { switch (token_desc) { case RT_SEMICOLON: cp_parser_error (parser, "expected %<;%>"); return; case RT_OPEN_PAREN: cp_parser_error (parser, "expected %<(%>"); return; case RT_CLOSE_BRACE: cp_parser_error (parser, "expected %<}%>"); return; case RT_OPEN_BRACE: cp_parser_error (parser, "expected %<{%>"); return; case RT_CLOSE_SQUARE: cp_parser_error (parser, "expected %<]%>"); return; case RT_OPEN_SQUARE: cp_parser_error (parser, "expected %<[%>"); return; case RT_COMMA: cp_parser_error (parser, "expected %<,%>"); return; case RT_SCOPE: cp_parser_error (parser, "expected %<::%>"); return; case RT_LESS: cp_parser_error (parser, "expected %<<%>"); return; case RT_GREATER: cp_parser_error (parser, "expected %<>%>"); return; case RT_EQ: cp_parser_error (parser, "expected %<=%>"); return; case RT_ELLIPSIS: cp_parser_error (parser, "expected %<...%>"); return; case RT_MULT: cp_parser_error (parser, "expected %<*%>"); return; case RT_COMPL: cp_parser_error (parser, "expected %<~%>"); return; case RT_COLON: cp_parser_error (parser, "expected %<:%>"); return; case RT_COLON_SCOPE: cp_parser_error (parser, "expected %<:%> or %<::%>"); return; case RT_CLOSE_PAREN: cp_parser_error (parser, "expected %<)%>"); return; case RT_COMMA_CLOSE_PAREN: cp_parser_error (parser, "expected %<,%> or %<)%>"); return; case RT_PRAGMA_EOL: cp_parser_error (parser, "expected end of line"); return; case RT_NAME: cp_parser_error (parser, "expected identifier"); return; case RT_SELECT: cp_parser_error (parser, "expected selection-statement"); return; case RT_INTERATION: cp_parser_error (parser, "expected iteration-statement"); return; case RT_JUMP: cp_parser_error (parser, "expected jump-statement"); return; case RT_CLASS_KEY: cp_parser_error (parser, "expected class-key"); return; case RT_CLASS_TYPENAME_TEMPLATE: cp_parser_error (parser, "expected %<class%>, %<typename%>, or %<template%>"); return; default: gcc_unreachable (); } } else gcc_unreachable (); } /* If the next token is of the indicated TYPE, consume it. Otherwise, issue an error message indicating that TOKEN_DESC was expected. Returns the token consumed, if the token had the appropriate type. Otherwise, returns NULL. */ static cp_token * cp_parser_require (cp_parser* parser, enum cpp_ttype type, required_token token_desc) { if (cp_lexer_next_token_is (parser->lexer, type)) return cp_lexer_consume_token (parser->lexer); else { /* Output the MESSAGE -- unless we're parsing tentatively. */ if (!cp_parser_simulate_error (parser)) cp_parser_required_error (parser, token_desc, /*keyword=*/false); return NULL; } } /* An error message is produced if the next token is not '>'. All further tokens are skipped until the desired token is found or '{', '}', ';' or an unbalanced ')' or ']'. */ static void cp_parser_skip_to_end_of_template_parameter_list (cp_parser* parser) { /* Current level of '< ... >'. */ unsigned level = 0; /* Ignore '<' and '>' nested inside '( ... )' or '[ ... ]'. */ unsigned nesting_depth = 0; /* Are we ready, yet? If not, issue error message. */ if (cp_parser_require (parser, CPP_GREATER, RT_GREATER)) return; /* Skip tokens until the desired token is found. */ while (true) { /* Peek at the next token. */ switch (cp_lexer_peek_token (parser->lexer)->type) { case CPP_LESS: if (!nesting_depth) ++level; break; case CPP_RSHIFT: if (cxx_dialect == cxx98) /* C++0x views the `>>' operator as two `>' tokens, but C++98 does not. */ break; else if (!nesting_depth && level-- == 0) { /* We've hit a `>>' where the first `>' closes the template argument list, and the second `>' is spurious. Just consume the `>>' and stop; we've already produced at least one error. */ cp_lexer_consume_token (parser->lexer); return; } /* Fall through for C++0x, so we handle the second `>' in the `>>'. */ case CPP_GREATER: if (!nesting_depth && level-- == 0) { /* We've reached the token we want, consume it and stop. */ cp_lexer_consume_token (parser->lexer); return; } break; case CPP_OPEN_PAREN: case CPP_OPEN_SQUARE: ++nesting_depth; break; case CPP_CLOSE_PAREN: case CPP_CLOSE_SQUARE: if (nesting_depth-- == 0) return; break; case CPP_EOF: case CPP_PRAGMA_EOL: case CPP_SEMICOLON: case CPP_OPEN_BRACE: case CPP_CLOSE_BRACE: /* The '>' was probably forgotten, don't look further. */ return; default: break; } /* Consume this token. */ cp_lexer_consume_token (parser->lexer); } } /* If the next token is the indicated keyword, consume it. Otherwise, issue an error message indicating that TOKEN_DESC was expected. Returns the token consumed, if the token had the appropriate type. Otherwise, returns NULL. */ static cp_token * cp_parser_require_keyword (cp_parser* parser, enum rid keyword, required_token token_desc) { cp_token *token = cp_parser_require (parser, CPP_KEYWORD, token_desc); if (token && token->keyword != keyword) { cp_parser_required_error (parser, token_desc, /*keyword=*/true); return NULL; } return token; } /* Returns TRUE iff TOKEN is a token that can begin the body of a function-definition. */ static bool cp_parser_token_starts_function_definition_p (cp_token* token) { return (/* An ordinary function-body begins with an `{'. */ token->type == CPP_OPEN_BRACE /* A ctor-initializer begins with a `:'. */ || token->type == CPP_COLON /* A function-try-block begins with `try'. */ || token->keyword == RID_TRY /* A function-transaction-block begins with `__transaction_atomic' or `__transaction_relaxed'. */ || token->keyword == RID_TRANSACTION_ATOMIC || token->keyword == RID_TRANSACTION_RELAXED /* The named return value extension begins with `return'. */ || token->keyword == RID_RETURN); } /* Returns TRUE iff the next token is the ":" or "{" beginning a class definition. */ static bool cp_parser_next_token_starts_class_definition_p (cp_parser *parser) { cp_token *token; token = cp_lexer_peek_token (parser->lexer); return (token->type == CPP_OPEN_BRACE || (token->type == CPP_COLON && !parser->colon_doesnt_start_class_def_p)); } /* Returns TRUE iff the next token is the "," or ">" (or `>>', in C++0x) ending a template-argument. */ static bool cp_parser_next_token_ends_template_argument_p (cp_parser *parser) { cp_token *token; token = cp_lexer_peek_token (parser->lexer); return (token->type == CPP_COMMA || token->type == CPP_GREATER || token->type == CPP_ELLIPSIS || ((cxx_dialect != cxx98) && token->type == CPP_RSHIFT)); } /* Returns TRUE iff the n-th token is a "<", or the n-th is a "[" and the (n+1)-th is a ":" (which is a possible digraph typo for "< ::"). */ static bool cp_parser_nth_token_starts_template_argument_list_p (cp_parser * parser, size_t n) { cp_token *token; token = cp_lexer_peek_nth_token (parser->lexer, n); if (token->type == CPP_LESS) return true; /* Check for the sequence `<::' in the original code. It would be lexed as `[:', where `[' is a digraph, and there is no whitespace before `:'. */ if (token->type == CPP_OPEN_SQUARE && token->flags & DIGRAPH) { cp_token *token2; token2 = cp_lexer_peek_nth_token (parser->lexer, n+1); if (token2->type == CPP_COLON && !(token2->flags & PREV_WHITE)) return true; } return false; } /* Returns the kind of tag indicated by TOKEN, if it is a class-key, or none_type otherwise. */ static enum tag_types cp_parser_token_is_class_key (cp_token* token) { switch (token->keyword) { case RID_CLASS: return class_type; case RID_STRUCT: return record_type; case RID_UNION: return union_type; default: return none_type; } } /* Returns the kind of tag indicated by TOKEN, if it is a type-parameter-key, or none_type otherwise or if the token is null. */ static enum tag_types cp_parser_token_is_type_parameter_key (cp_token* token) { if (!token) return none_type; switch (token->keyword) { case RID_CLASS: return class_type; case RID_TYPENAME: return typename_type; default: return none_type; } } /* Issue an error message if the CLASS_KEY does not match the TYPE. */ static void cp_parser_check_class_key (enum tag_types class_key, tree type) { if (type == error_mark_node) return; if ((TREE_CODE (type) == UNION_TYPE) != (class_key == union_type)) { if (permerror (input_location, "%qs tag used in naming %q#T", class_key == union_type ? "union" : class_key == record_type ? "struct" : "class", type)) inform (DECL_SOURCE_LOCATION (TYPE_NAME (type)), "%q#T was previously declared here", type); } } /* Issue an error message if DECL is redeclared with different access than its original declaration [class.access.spec/3]. This applies to nested classes and nested class templates. [class.mem/1]. */ static void cp_parser_check_access_in_redeclaration (tree decl, location_t location) { if (!decl || !CLASS_TYPE_P (TREE_TYPE (decl))) return; if ((TREE_PRIVATE (decl) != (current_access_specifier == access_private_node)) || (TREE_PROTECTED (decl) != (current_access_specifier == access_protected_node))) error_at (location, "%qD redeclared with different access", decl); } /* Look for the `template' keyword, as a syntactic disambiguator. Return TRUE iff it is present, in which case it will be consumed. */ static bool cp_parser_optional_template_keyword (cp_parser *parser) { if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TEMPLATE)) { /* In C++98 the `template' keyword can only be used within templates; outside templates the parser can always figure out what is a template and what is not. In C++11, per the resolution of DR 468, `template' is allowed in cases where it is not strictly necessary. */ if (!processing_template_decl && pedantic && cxx_dialect == cxx98) { cp_token *token = cp_lexer_peek_token (parser->lexer); pedwarn (token->location, OPT_Wpedantic, "in C++98 %<template%> (as a disambiguator) is only " "allowed within templates"); /* If this part of the token stream is rescanned, the same error message would be generated. So, we purge the token from the stream. */ cp_lexer_purge_token (parser->lexer); return false; } else { /* Consume the `template' keyword. */ cp_lexer_consume_token (parser->lexer); return true; } } return false; } /* The next token is a CPP_NESTED_NAME_SPECIFIER. Consume the token, set PARSER->SCOPE, and perform other related actions. */ static void cp_parser_pre_parsed_nested_name_specifier (cp_parser *parser) { int i; struct tree_check *check_value; deferred_access_check *chk; vec<deferred_access_check, va_gc> *checks; /* Get the stored value. */ check_value = cp_lexer_consume_token (parser->lexer)->u.tree_check_value; /* Perform any access checks that were deferred. */ checks = check_value->checks; if (checks) { FOR_EACH_VEC_SAFE_ELT (checks, i, chk) perform_or_defer_access_check (chk->binfo, chk->decl, chk->diag_decl, tf_warning_or_error); } /* Set the scope from the stored value. */ parser->scope = check_value->value; parser->qualifying_scope = check_value->qualifying_scope; parser->object_scope = NULL_TREE; } /* Consume tokens up through a non-nested END token. Returns TRUE if we encounter the end of a block before what we were looking for. */ static bool cp_parser_cache_group (cp_parser *parser, enum cpp_ttype end, unsigned depth) { while (true) { cp_token *token = cp_lexer_peek_token (parser->lexer); /* Abort a parenthesized expression if we encounter a semicolon. */ if ((end == CPP_CLOSE_PAREN || depth == 0) && token->type == CPP_SEMICOLON) return true; /* If we've reached the end of the file, stop. */ if (token->type == CPP_EOF || (end != CPP_PRAGMA_EOL && token->type == CPP_PRAGMA_EOL)) return true; if (token->type == CPP_CLOSE_BRACE && depth == 0) /* We've hit the end of an enclosing block, so there's been some kind of syntax error. */ return true; /* Consume the token. */ cp_lexer_consume_token (parser->lexer); /* See if it starts a new group. */ if (token->type == CPP_OPEN_BRACE) { cp_parser_cache_group (parser, CPP_CLOSE_BRACE, depth + 1); /* In theory this should probably check end == '}', but cp_parser_save_member_function_body needs it to exit after either '}' or ')' when called with ')'. */ if (depth == 0) return false; } else if (token->type == CPP_OPEN_PAREN) { cp_parser_cache_group (parser, CPP_CLOSE_PAREN, depth + 1); if (depth == 0 && end == CPP_CLOSE_PAREN) return false; } else if (token->type == CPP_PRAGMA) cp_parser_cache_group (parser, CPP_PRAGMA_EOL, depth + 1); else if (token->type == end) return false; } } /* Like above, for caching a default argument or NSDMI. Both of these are terminated by a non-nested comma, but it can be unclear whether or not a comma is nested in a template argument list unless we do more parsing. In order to handle this ambiguity, when we encounter a ',' after a '<' we try to parse what follows as a parameter-declaration-list (in the case of a default argument) or a member-declarator (in the case of an NSDMI). If that succeeds, then we stop caching. */ static tree cp_parser_cache_defarg (cp_parser *parser, bool nsdmi) { unsigned depth = 0; int maybe_template_id = 0; cp_token *first_token; cp_token *token; tree default_argument; /* Add tokens until we have processed the entire default argument. We add the range [first_token, token). */ first_token = cp_lexer_peek_token (parser->lexer); if (first_token->type == CPP_OPEN_BRACE) { /* For list-initialization, this is straightforward. */ cp_parser_cache_group (parser, CPP_CLOSE_BRACE, /*depth=*/0); token = cp_lexer_peek_token (parser->lexer); } else while (true) { bool done = false; /* Peek at the next token. */ token = cp_lexer_peek_token (parser->lexer); /* What we do depends on what token we have. */ switch (token->type) { /* In valid code, a default argument must be immediately followed by a `,' `)', or `...'. */ case CPP_COMMA: if (depth == 0 && maybe_template_id) { /* If we've seen a '<', we might be in a template-argument-list. Until Core issue 325 is resolved, we don't know how this situation ought to be handled, so try to DTRT. We check whether what comes after the comma is a valid parameter declaration list. If it is, then the comma ends the default argument; otherwise the default argument continues. */ bool error = false; /* Set ITALP so cp_parser_parameter_declaration_list doesn't decide to commit to this parse. */ bool saved_italp = parser->in_template_argument_list_p; parser->in_template_argument_list_p = true; cp_parser_parse_tentatively (parser); cp_lexer_consume_token (parser->lexer); if (nsdmi) { int ctor_dtor_or_conv_p; cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, &ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, /*member_p=*/true, /*friend_p=*/false); } else { begin_scope (sk_function_parms, NULL_TREE); cp_parser_parameter_declaration_list (parser, &error); pop_bindings_and_leave_scope (); } if (!cp_parser_error_occurred (parser) && !error) done = true; cp_parser_abort_tentative_parse (parser); parser->in_template_argument_list_p = saved_italp; break; } case CPP_CLOSE_PAREN: case CPP_ELLIPSIS: /* If we run into a non-nested `;', `}', or `]', then the code is invalid -- but the default argument is certainly over. */ case CPP_SEMICOLON: case CPP_CLOSE_BRACE: case CPP_CLOSE_SQUARE: if (depth == 0 /* Handle correctly int n = sizeof ... ( p ); */ && token->type != CPP_ELLIPSIS) done = true; /* Update DEPTH, if necessary. */ else if (token->type == CPP_CLOSE_PAREN || token->type == CPP_CLOSE_BRACE || token->type == CPP_CLOSE_SQUARE) --depth; break; case CPP_OPEN_PAREN: case CPP_OPEN_SQUARE: case CPP_OPEN_BRACE: ++depth; break; case CPP_LESS: if (depth == 0) /* This might be the comparison operator, or it might start a template argument list. */ ++maybe_template_id; break; case CPP_RSHIFT: if (cxx_dialect == cxx98) break; /* Fall through for C++0x, which treats the `>>' operator like two `>' tokens in certain cases. */ case CPP_GREATER: if (depth == 0) { /* This might be an operator, or it might close a template argument list. But if a previous '<' started a template argument list, this will have closed it, so we can't be in one anymore. */ maybe_template_id -= 1 + (token->type == CPP_RSHIFT); if (maybe_template_id < 0) maybe_template_id = 0; } break; /* If we run out of tokens, issue an error message. */ case CPP_EOF: case CPP_PRAGMA_EOL: error_at (token->location, "file ends in default argument"); done = true; break; case CPP_NAME: case CPP_SCOPE: /* In these cases, we should look for template-ids. For example, if the default argument is `X<int, double>()', we need to do name lookup to figure out whether or not `X' is a template; if so, the `,' does not end the default argument. That is not yet done. */ break; default: break; } /* If we've reached the end, stop. */ if (done) break; /* Add the token to the token block. */ token = cp_lexer_consume_token (parser->lexer); } /* Create a DEFAULT_ARG to represent the unparsed default argument. */ default_argument = make_node (DEFAULT_ARG); DEFARG_TOKENS (default_argument) = cp_token_cache_new (first_token, token); DEFARG_INSTANTIATIONS (default_argument) = NULL; return default_argument; } /* Begin parsing tentatively. We always save tokens while parsing tentatively so that if the tentative parsing fails we can restore the tokens. */ static void cp_parser_parse_tentatively (cp_parser* parser) { /* Enter a new parsing context. */ parser->context = cp_parser_context_new (parser->context); /* Begin saving tokens. */ cp_lexer_save_tokens (parser->lexer); /* In order to avoid repetitive access control error messages, access checks are queued up until we are no longer parsing tentatively. */ push_deferring_access_checks (dk_deferred); } /* Commit to the currently active tentative parse. */ static void cp_parser_commit_to_tentative_parse (cp_parser* parser) { cp_parser_context *context; cp_lexer *lexer; /* Mark all of the levels as committed. */ lexer = parser->lexer; for (context = parser->context; context->next; context = context->next) { if (context->status == CP_PARSER_STATUS_KIND_COMMITTED) break; context->status = CP_PARSER_STATUS_KIND_COMMITTED; while (!cp_lexer_saving_tokens (lexer)) lexer = lexer->next; cp_lexer_commit_tokens (lexer); } } /* Commit to the topmost currently active tentative parse. Note that this function shouldn't be called when there are irreversible side-effects while in a tentative state. For example, we shouldn't create a permanent entry in the symbol table, or issue an error message that might not apply if the tentative parse is aborted. */ static void cp_parser_commit_to_topmost_tentative_parse (cp_parser* parser) { cp_parser_context *context = parser->context; cp_lexer *lexer = parser->lexer; if (context) { if (context->status == CP_PARSER_STATUS_KIND_COMMITTED) return; context->status = CP_PARSER_STATUS_KIND_COMMITTED; while (!cp_lexer_saving_tokens (lexer)) lexer = lexer->next; cp_lexer_commit_tokens (lexer); } } /* Abort the currently active tentative parse. All consumed tokens will be rolled back, and no diagnostics will be issued. */ static void cp_parser_abort_tentative_parse (cp_parser* parser) { gcc_assert (parser->context->status != CP_PARSER_STATUS_KIND_COMMITTED || errorcount > 0); cp_parser_simulate_error (parser); /* Now, pretend that we want to see if the construct was successfully parsed. */ cp_parser_parse_definitely (parser); } /* Stop parsing tentatively. If a parse error has occurred, restore the token stream. Otherwise, commit to the tokens we have consumed. Returns true if no error occurred; false otherwise. */ static bool cp_parser_parse_definitely (cp_parser* parser) { bool error_occurred; cp_parser_context *context; /* Remember whether or not an error occurred, since we are about to destroy that information. */ error_occurred = cp_parser_error_occurred (parser); /* Remove the topmost context from the stack. */ context = parser->context; parser->context = context->next; /* If no parse errors occurred, commit to the tentative parse. */ if (!error_occurred) { /* Commit to the tokens read tentatively, unless that was already done. */ if (context->status != CP_PARSER_STATUS_KIND_COMMITTED) cp_lexer_commit_tokens (parser->lexer); pop_to_parent_deferring_access_checks (); } /* Otherwise, if errors occurred, roll back our state so that things are just as they were before we began the tentative parse. */ else { cp_lexer_rollback_tokens (parser->lexer); pop_deferring_access_checks (); } /* Add the context to the front of the free list. */ context->next = cp_parser_context_free_list; cp_parser_context_free_list = context; return !error_occurred; } /* Returns true if we are parsing tentatively and are not committed to this tentative parse. */ static bool cp_parser_uncommitted_to_tentative_parse_p (cp_parser* parser) { return (cp_parser_parsing_tentatively (parser) && parser->context->status != CP_PARSER_STATUS_KIND_COMMITTED); } /* Returns nonzero iff an error has occurred during the most recent tentative parse. */ static bool cp_parser_error_occurred (cp_parser* parser) { return (cp_parser_parsing_tentatively (parser) && parser->context->status == CP_PARSER_STATUS_KIND_ERROR); } /* Returns nonzero if GNU extensions are allowed. */ static bool cp_parser_allow_gnu_extensions_p (cp_parser* parser) { return parser->allow_gnu_extensions_p; } /* Objective-C++ Productions */ /* Parse an Objective-C expression, which feeds into a primary-expression above. objc-expression: objc-message-expression objc-string-literal objc-encode-expression objc-protocol-expression objc-selector-expression Returns a tree representation of the expression. */ static tree cp_parser_objc_expression (cp_parser* parser) { /* Try to figure out what kind of declaration is present. */ cp_token *kwd = cp_lexer_peek_token (parser->lexer); switch (kwd->type) { case CPP_OPEN_SQUARE: return cp_parser_objc_message_expression (parser); case CPP_OBJC_STRING: kwd = cp_lexer_consume_token (parser->lexer); return objc_build_string_object (kwd->u.value); case CPP_KEYWORD: switch (kwd->keyword) { case RID_AT_ENCODE: return cp_parser_objc_encode_expression (parser); case RID_AT_PROTOCOL: return cp_parser_objc_protocol_expression (parser); case RID_AT_SELECTOR: return cp_parser_objc_selector_expression (parser); default: break; } default: error_at (kwd->location, "misplaced %<@%D%> Objective-C++ construct", kwd->u.value); cp_parser_skip_to_end_of_block_or_statement (parser); } return error_mark_node; } /* Parse an Objective-C message expression. objc-message-expression: [ objc-message-receiver objc-message-args ] Returns a representation of an Objective-C message. */ static tree cp_parser_objc_message_expression (cp_parser* parser) { tree receiver, messageargs; cp_lexer_consume_token (parser->lexer); /* Eat '['. */ receiver = cp_parser_objc_message_receiver (parser); messageargs = cp_parser_objc_message_args (parser); cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); return objc_build_message_expr (receiver, messageargs); } /* Parse an objc-message-receiver. objc-message-receiver: expression simple-type-specifier Returns a representation of the type or expression. */ static tree cp_parser_objc_message_receiver (cp_parser* parser) { tree rcv; /* An Objective-C message receiver may be either (1) a type or (2) an expression. */ cp_parser_parse_tentatively (parser); rcv = cp_parser_expression (parser); /* If that worked out, fine. */ if (cp_parser_parse_definitely (parser)) return rcv; cp_parser_parse_tentatively (parser); rcv = cp_parser_simple_type_specifier (parser, /*decl_specs=*/NULL, CP_PARSER_FLAGS_NONE); if (cp_parser_parse_definitely (parser)) return objc_get_class_reference (rcv); cp_parser_error (parser, "objective-c++ message receiver expected"); return error_mark_node; } /* Parse the arguments and selectors comprising an Objective-C message. objc-message-args: objc-selector objc-selector-args objc-selector-args , objc-comma-args objc-selector-args: objc-selector [opt] : assignment-expression objc-selector-args objc-selector [opt] : assignment-expression objc-comma-args: assignment-expression objc-comma-args , assignment-expression Returns a TREE_LIST, with TREE_PURPOSE containing a list of selector arguments and TREE_VALUE containing a list of comma arguments. */ static tree cp_parser_objc_message_args (cp_parser* parser) { tree sel_args = NULL_TREE, addl_args = NULL_TREE; bool maybe_unary_selector_p = true; cp_token *token = cp_lexer_peek_token (parser->lexer); while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON) { tree selector = NULL_TREE, arg; if (token->type != CPP_COLON) selector = cp_parser_objc_selector (parser); /* Detect if we have a unary selector. */ if (maybe_unary_selector_p && cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)) return build_tree_list (selector, NULL_TREE); maybe_unary_selector_p = false; cp_parser_require (parser, CPP_COLON, RT_COLON); arg = cp_parser_assignment_expression (parser); sel_args = chainon (sel_args, build_tree_list (selector, arg)); token = cp_lexer_peek_token (parser->lexer); } /* Handle non-selector arguments, if any. */ while (token->type == CPP_COMMA) { tree arg; cp_lexer_consume_token (parser->lexer); arg = cp_parser_assignment_expression (parser); addl_args = chainon (addl_args, build_tree_list (NULL_TREE, arg)); token = cp_lexer_peek_token (parser->lexer); } if (sel_args == NULL_TREE && addl_args == NULL_TREE) { cp_parser_error (parser, "objective-c++ message argument(s) are expected"); return build_tree_list (error_mark_node, error_mark_node); } return build_tree_list (sel_args, addl_args); } /* Parse an Objective-C encode expression. objc-encode-expression: @encode objc-typename Returns an encoded representation of the type argument. */ static tree cp_parser_objc_encode_expression (cp_parser* parser) { tree type; cp_token *token; cp_lexer_consume_token (parser->lexer); /* Eat '@encode'. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); token = cp_lexer_peek_token (parser->lexer); type = complete_type (cp_parser_type_id (parser)); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); if (!type) { error_at (token->location, "%<@encode%> must specify a type as an argument"); return error_mark_node; } /* This happens if we find @encode(T) (where T is a template typename or something dependent on a template typename) when parsing a template. In that case, we can't compile it immediately, but we rather create an AT_ENCODE_EXPR which will need to be instantiated when the template is used. */ if (dependent_type_p (type)) { tree value = build_min (AT_ENCODE_EXPR, size_type_node, type); TREE_READONLY (value) = 1; return value; } return objc_build_encode_expr (type); } /* Parse an Objective-C @defs expression. */ static tree cp_parser_objc_defs_expression (cp_parser *parser) { tree name; cp_lexer_consume_token (parser->lexer); /* Eat '@defs'. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); name = cp_parser_identifier (parser); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return objc_get_class_ivars (name); } /* Parse an Objective-C protocol expression. objc-protocol-expression: @protocol ( identifier ) Returns a representation of the protocol expression. */ static tree cp_parser_objc_protocol_expression (cp_parser* parser) { tree proto; cp_lexer_consume_token (parser->lexer); /* Eat '@protocol'. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); proto = cp_parser_identifier (parser); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return objc_build_protocol_expr (proto); } /* Parse an Objective-C selector expression. objc-selector-expression: @selector ( objc-method-signature ) objc-method-signature: objc-selector objc-selector-seq objc-selector-seq: objc-selector : objc-selector-seq objc-selector : Returns a representation of the method selector. */ static tree cp_parser_objc_selector_expression (cp_parser* parser) { tree sel_seq = NULL_TREE; bool maybe_unary_selector_p = true; cp_token *token; location_t loc = cp_lexer_peek_token (parser->lexer)->location; cp_lexer_consume_token (parser->lexer); /* Eat '@selector'. */ cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); token = cp_lexer_peek_token (parser->lexer); while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON || token->type == CPP_SCOPE) { tree selector = NULL_TREE; if (token->type != CPP_COLON || token->type == CPP_SCOPE) selector = cp_parser_objc_selector (parser); if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON) && cp_lexer_next_token_is_not (parser->lexer, CPP_SCOPE)) { /* Detect if we have a unary selector. */ if (maybe_unary_selector_p) { sel_seq = selector; goto finish_selector; } else { cp_parser_error (parser, "expected %<:%>"); } } maybe_unary_selector_p = false; token = cp_lexer_consume_token (parser->lexer); if (token->type == CPP_SCOPE) { sel_seq = chainon (sel_seq, build_tree_list (selector, NULL_TREE)); sel_seq = chainon (sel_seq, build_tree_list (NULL_TREE, NULL_TREE)); } else sel_seq = chainon (sel_seq, build_tree_list (selector, NULL_TREE)); token = cp_lexer_peek_token (parser->lexer); } finish_selector: cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); return objc_build_selector_expr (loc, sel_seq); } /* Parse a list of identifiers. objc-identifier-list: identifier objc-identifier-list , identifier Returns a TREE_LIST of identifier nodes. */ static tree cp_parser_objc_identifier_list (cp_parser* parser) { tree identifier; tree list; cp_token *sep; identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return error_mark_node; list = build_tree_list (NULL_TREE, identifier); sep = cp_lexer_peek_token (parser->lexer); while (sep->type == CPP_COMMA) { cp_lexer_consume_token (parser->lexer); /* Eat ','. */ identifier = cp_parser_identifier (parser); if (identifier == error_mark_node) return list; list = chainon (list, build_tree_list (NULL_TREE, identifier)); sep = cp_lexer_peek_token (parser->lexer); } return list; } /* Parse an Objective-C alias declaration. objc-alias-declaration: @compatibility_alias identifier identifier ; This function registers the alias mapping with the Objective-C front end. It returns nothing. */ static void cp_parser_objc_alias_declaration (cp_parser* parser) { tree alias, orig; cp_lexer_consume_token (parser->lexer); /* Eat '@compatibility_alias'. */ alias = cp_parser_identifier (parser); orig = cp_parser_identifier (parser); objc_declare_alias (alias, orig); cp_parser_consume_semicolon_at_end_of_statement (parser); } /* Parse an Objective-C class forward-declaration. objc-class-declaration: @class objc-identifier-list ; The function registers the forward declarations with the Objective-C front end. It returns nothing. */ static void cp_parser_objc_class_declaration (cp_parser* parser) { cp_lexer_consume_token (parser->lexer); /* Eat '@class'. */ while (true) { tree id; id = cp_parser_identifier (parser); if (id == error_mark_node) break; objc_declare_class (id); if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); else break; } cp_parser_consume_semicolon_at_end_of_statement (parser); } /* Parse a list of Objective-C protocol references. objc-protocol-refs-opt: objc-protocol-refs [opt] objc-protocol-refs: < objc-identifier-list > Returns a TREE_LIST of identifiers, if any. */ static tree cp_parser_objc_protocol_refs_opt (cp_parser* parser) { tree protorefs = NULL_TREE; if(cp_lexer_next_token_is (parser->lexer, CPP_LESS)) { cp_lexer_consume_token (parser->lexer); /* Eat '<'. */ protorefs = cp_parser_objc_identifier_list (parser); cp_parser_require (parser, CPP_GREATER, RT_GREATER); } return protorefs; } /* Parse a Objective-C visibility specification. */ static void cp_parser_objc_visibility_spec (cp_parser* parser) { cp_token *vis = cp_lexer_peek_token (parser->lexer); switch (vis->keyword) { case RID_AT_PRIVATE: objc_set_visibility (OBJC_IVAR_VIS_PRIVATE); break; case RID_AT_PROTECTED: objc_set_visibility (OBJC_IVAR_VIS_PROTECTED); break; case RID_AT_PUBLIC: objc_set_visibility (OBJC_IVAR_VIS_PUBLIC); break; case RID_AT_PACKAGE: objc_set_visibility (OBJC_IVAR_VIS_PACKAGE); break; default: return; } /* Eat '@private'/'@protected'/'@public'. */ cp_lexer_consume_token (parser->lexer); } /* Parse an Objective-C method type. Return 'true' if it is a class (+) method, and 'false' if it is an instance (-) method. */ static inline bool cp_parser_objc_method_type (cp_parser* parser) { if (cp_lexer_consume_token (parser->lexer)->type == CPP_PLUS) return true; else return false; } /* Parse an Objective-C protocol qualifier. */ static tree cp_parser_objc_protocol_qualifiers (cp_parser* parser) { tree quals = NULL_TREE, node; cp_token *token = cp_lexer_peek_token (parser->lexer); node = token->u.value; while (node && identifier_p (node) && (node == ridpointers [(int) RID_IN] || node == ridpointers [(int) RID_OUT] || node == ridpointers [(int) RID_INOUT] || node == ridpointers [(int) RID_BYCOPY] || node == ridpointers [(int) RID_BYREF] || node == ridpointers [(int) RID_ONEWAY])) { quals = tree_cons (NULL_TREE, node, quals); cp_lexer_consume_token (parser->lexer); token = cp_lexer_peek_token (parser->lexer); node = token->u.value; } return quals; } /* Parse an Objective-C typename. */ static tree cp_parser_objc_typename (cp_parser* parser) { tree type_name = NULL_TREE; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { tree proto_quals, cp_type = NULL_TREE; cp_lexer_consume_token (parser->lexer); /* Eat '('. */ proto_quals = cp_parser_objc_protocol_qualifiers (parser); /* An ObjC type name may consist of just protocol qualifiers, in which case the type shall default to 'id'. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) { cp_type = cp_parser_type_id (parser); /* If the type could not be parsed, an error has already been produced. For error recovery, behave as if it had not been specified, which will use the default type 'id'. */ if (cp_type == error_mark_node) { cp_type = NULL_TREE; /* We need to skip to the closing parenthesis as cp_parser_type_id() does not seem to do it for us. */ cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/false); } } cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); type_name = build_tree_list (proto_quals, cp_type); } return type_name; } /* Check to see if TYPE refers to an Objective-C selector name. */ static bool cp_parser_objc_selector_p (enum cpp_ttype type) { return (type == CPP_NAME || type == CPP_KEYWORD || type == CPP_AND_AND || type == CPP_AND_EQ || type == CPP_AND || type == CPP_OR || type == CPP_COMPL || type == CPP_NOT || type == CPP_NOT_EQ || type == CPP_OR_OR || type == CPP_OR_EQ || type == CPP_XOR || type == CPP_XOR_EQ); } /* Parse an Objective-C selector. */ static tree cp_parser_objc_selector (cp_parser* parser) { cp_token *token = cp_lexer_consume_token (parser->lexer); if (!cp_parser_objc_selector_p (token->type)) { error_at (token->location, "invalid Objective-C++ selector name"); return error_mark_node; } /* C++ operator names are allowed to appear in ObjC selectors. */ switch (token->type) { case CPP_AND_AND: return get_identifier ("and"); case CPP_AND_EQ: return get_identifier ("and_eq"); case CPP_AND: return get_identifier ("bitand"); case CPP_OR: return get_identifier ("bitor"); case CPP_COMPL: return get_identifier ("compl"); case CPP_NOT: return get_identifier ("not"); case CPP_NOT_EQ: return get_identifier ("not_eq"); case CPP_OR_OR: return get_identifier ("or"); case CPP_OR_EQ: return get_identifier ("or_eq"); case CPP_XOR: return get_identifier ("xor"); case CPP_XOR_EQ: return get_identifier ("xor_eq"); default: return token->u.value; } } /* Parse an Objective-C params list. */ static tree cp_parser_objc_method_keyword_params (cp_parser* parser, tree* attributes) { tree params = NULL_TREE; bool maybe_unary_selector_p = true; cp_token *token = cp_lexer_peek_token (parser->lexer); while (cp_parser_objc_selector_p (token->type) || token->type == CPP_COLON) { tree selector = NULL_TREE, type_name, identifier; tree parm_attr = NULL_TREE; if (token->keyword == RID_ATTRIBUTE) break; if (token->type != CPP_COLON) selector = cp_parser_objc_selector (parser); /* Detect if we have a unary selector. */ if (maybe_unary_selector_p && cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)) { params = selector; /* Might be followed by attributes. */ break; } maybe_unary_selector_p = false; if (!cp_parser_require (parser, CPP_COLON, RT_COLON)) { /* Something went quite wrong. There should be a colon here, but there is not. Stop parsing parameters. */ break; } type_name = cp_parser_objc_typename (parser); /* New ObjC allows attributes on parameters too. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE)) parm_attr = cp_parser_attributes_opt (parser); identifier = cp_parser_identifier (parser); params = chainon (params, objc_build_keyword_decl (selector, type_name, identifier, parm_attr)); token = cp_lexer_peek_token (parser->lexer); } if (params == NULL_TREE) { cp_parser_error (parser, "objective-c++ method declaration is expected"); return error_mark_node; } /* We allow tail attributes for the method. */ if (token->keyword == RID_ATTRIBUTE) { *attributes = cp_parser_attributes_opt (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) return params; cp_parser_error (parser, "method attributes must be specified at the end"); return error_mark_node; } if (params == NULL_TREE) { cp_parser_error (parser, "objective-c++ method declaration is expected"); return error_mark_node; } return params; } /* Parse the non-keyword Objective-C params. */ static tree cp_parser_objc_method_tail_params_opt (cp_parser* parser, bool *ellipsisp, tree* attributes) { tree params = make_node (TREE_LIST); cp_token *token = cp_lexer_peek_token (parser->lexer); *ellipsisp = false; /* Initially, assume no ellipsis. */ while (token->type == CPP_COMMA) { cp_parameter_declarator *parmdecl; tree parm; cp_lexer_consume_token (parser->lexer); /* Eat ','. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_ELLIPSIS) { cp_lexer_consume_token (parser->lexer); /* Eat '...'. */ *ellipsisp = true; token = cp_lexer_peek_token (parser->lexer); break; } /* TODO: parse attributes for tail parameters. */ parmdecl = cp_parser_parameter_declaration (parser, false, NULL); parm = grokdeclarator (parmdecl->declarator, &parmdecl->decl_specifiers, PARM, /*initialized=*/0, /*attrlist=*/NULL); chainon (params, build_tree_list (NULL_TREE, parm)); token = cp_lexer_peek_token (parser->lexer); } /* We allow tail attributes for the method. */ if (token->keyword == RID_ATTRIBUTE) { if (*attributes == NULL_TREE) { *attributes = cp_parser_attributes_opt (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON) || cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) return params; } else /* We have an error, but parse the attributes, so that we can carry on. */ *attributes = cp_parser_attributes_opt (parser); cp_parser_error (parser, "method attributes must be specified at the end"); return error_mark_node; } return params; } /* Parse a linkage specification, a pragma, an extra semicolon or a block. */ static void cp_parser_objc_interstitial_code (cp_parser* parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); /* If the next token is `extern' and the following token is a string literal, then we have a linkage specification. */ if (token->keyword == RID_EXTERN && cp_parser_is_pure_string_literal (cp_lexer_peek_nth_token (parser->lexer, 2))) cp_parser_linkage_specification (parser); /* Handle #pragma, if any. */ else if (token->type == CPP_PRAGMA) cp_parser_pragma (parser, pragma_objc_icode); /* Allow stray semicolons. */ else if (token->type == CPP_SEMICOLON) cp_lexer_consume_token (parser->lexer); /* Mark methods as optional or required, when building protocols. */ else if (token->keyword == RID_AT_OPTIONAL) { cp_lexer_consume_token (parser->lexer); objc_set_method_opt (true); } else if (token->keyword == RID_AT_REQUIRED) { cp_lexer_consume_token (parser->lexer); objc_set_method_opt (false); } else if (token->keyword == RID_NAMESPACE) cp_parser_namespace_definition (parser); /* Other stray characters must generate errors. */ else if (token->type == CPP_OPEN_BRACE || token->type == CPP_CLOSE_BRACE) { cp_lexer_consume_token (parser->lexer); error ("stray %qs between Objective-C++ methods", token->type == CPP_OPEN_BRACE ? "{" : "}"); } /* Finally, try to parse a block-declaration, or a function-definition. */ else cp_parser_block_declaration (parser, /*statement_p=*/false); } /* Parse a method signature. */ static tree cp_parser_objc_method_signature (cp_parser* parser, tree* attributes) { tree rettype, kwdparms, optparms; bool ellipsis = false; bool is_class_method; is_class_method = cp_parser_objc_method_type (parser); rettype = cp_parser_objc_typename (parser); *attributes = NULL_TREE; kwdparms = cp_parser_objc_method_keyword_params (parser, attributes); if (kwdparms == error_mark_node) return error_mark_node; optparms = cp_parser_objc_method_tail_params_opt (parser, &ellipsis, attributes); if (optparms == error_mark_node) return error_mark_node; return objc_build_method_signature (is_class_method, rettype, kwdparms, optparms, ellipsis); } static bool cp_parser_objc_method_maybe_bad_prefix_attributes (cp_parser* parser) { tree tattr; cp_lexer_save_tokens (parser->lexer); tattr = cp_parser_attributes_opt (parser); gcc_assert (tattr) ; /* If the attributes are followed by a method introducer, this is not allowed. Dump the attributes and flag the situation. */ if (cp_lexer_next_token_is (parser->lexer, CPP_PLUS) || cp_lexer_next_token_is (parser->lexer, CPP_MINUS)) return true; /* Otherwise, the attributes introduce some interstitial code, possibly so rewind to allow that check. */ cp_lexer_rollback_tokens (parser->lexer); return false; } /* Parse an Objective-C method prototype list. */ static void cp_parser_objc_method_prototype_list (cp_parser* parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); while (token->keyword != RID_AT_END && token->type != CPP_EOF) { if (token->type == CPP_PLUS || token->type == CPP_MINUS) { tree attributes, sig; bool is_class_method; if (token->type == CPP_PLUS) is_class_method = true; else is_class_method = false; sig = cp_parser_objc_method_signature (parser, &attributes); if (sig == error_mark_node) { cp_parser_skip_to_end_of_block_or_statement (parser); token = cp_lexer_peek_token (parser->lexer); continue; } objc_add_method_declaration (is_class_method, sig, attributes); cp_parser_consume_semicolon_at_end_of_statement (parser); } else if (token->keyword == RID_AT_PROPERTY) cp_parser_objc_at_property_declaration (parser); else if (token->keyword == RID_ATTRIBUTE && cp_parser_objc_method_maybe_bad_prefix_attributes(parser)) warning_at (cp_lexer_peek_token (parser->lexer)->location, OPT_Wattributes, "prefix attributes are ignored for methods"); else /* Allow for interspersed non-ObjC++ code. */ cp_parser_objc_interstitial_code (parser); token = cp_lexer_peek_token (parser->lexer); } if (token->type != CPP_EOF) cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */ else cp_parser_error (parser, "expected %<@end%>"); objc_finish_interface (); } /* Parse an Objective-C method definition list. */ static void cp_parser_objc_method_definition_list (cp_parser* parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); while (token->keyword != RID_AT_END && token->type != CPP_EOF) { tree meth; if (token->type == CPP_PLUS || token->type == CPP_MINUS) { cp_token *ptk; tree sig, attribute; bool is_class_method; if (token->type == CPP_PLUS) is_class_method = true; else is_class_method = false; push_deferring_access_checks (dk_deferred); sig = cp_parser_objc_method_signature (parser, &attribute); if (sig == error_mark_node) { cp_parser_skip_to_end_of_block_or_statement (parser); token = cp_lexer_peek_token (parser->lexer); continue; } objc_start_method_definition (is_class_method, sig, attribute, NULL_TREE); /* For historical reasons, we accept an optional semicolon. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); ptk = cp_lexer_peek_token (parser->lexer); if (!(ptk->type == CPP_PLUS || ptk->type == CPP_MINUS || ptk->type == CPP_EOF || ptk->keyword == RID_AT_END)) { perform_deferred_access_checks (tf_warning_or_error); stop_deferring_access_checks (); meth = cp_parser_function_definition_after_declarator (parser, false); pop_deferring_access_checks (); objc_finish_method_definition (meth); } } /* The following case will be removed once @synthesize is completely implemented. */ else if (token->keyword == RID_AT_PROPERTY) cp_parser_objc_at_property_declaration (parser); else if (token->keyword == RID_AT_SYNTHESIZE) cp_parser_objc_at_synthesize_declaration (parser); else if (token->keyword == RID_AT_DYNAMIC) cp_parser_objc_at_dynamic_declaration (parser); else if (token->keyword == RID_ATTRIBUTE && cp_parser_objc_method_maybe_bad_prefix_attributes(parser)) warning_at (token->location, OPT_Wattributes, "prefix attributes are ignored for methods"); else /* Allow for interspersed non-ObjC++ code. */ cp_parser_objc_interstitial_code (parser); token = cp_lexer_peek_token (parser->lexer); } if (token->type != CPP_EOF) cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */ else cp_parser_error (parser, "expected %<@end%>"); objc_finish_implementation (); } /* Parse Objective-C ivars. */ static void cp_parser_objc_class_ivars (cp_parser* parser) { cp_token *token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_OPEN_BRACE) return; /* No ivars specified. */ cp_lexer_consume_token (parser->lexer); /* Eat '{'. */ token = cp_lexer_peek_token (parser->lexer); while (token->type != CPP_CLOSE_BRACE && token->keyword != RID_AT_END && token->type != CPP_EOF) { cp_decl_specifier_seq declspecs; int decl_class_or_enum_p; tree prefix_attributes; cp_parser_objc_visibility_spec (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) break; cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_OPTIONAL, &declspecs, &decl_class_or_enum_p); /* auto, register, static, extern, mutable. */ if (declspecs.storage_class != sc_none) { cp_parser_error (parser, "invalid type for instance variable"); declspecs.storage_class = sc_none; } /* thread_local. */ if (decl_spec_seq_has_spec_p (&declspecs, ds_thread)) { cp_parser_error (parser, "invalid type for instance variable"); declspecs.locations[ds_thread] = 0; } /* typedef. */ if (decl_spec_seq_has_spec_p (&declspecs, ds_typedef)) { cp_parser_error (parser, "invalid type for instance variable"); declspecs.locations[ds_typedef] = 0; } prefix_attributes = declspecs.attributes; declspecs.attributes = NULL_TREE; /* Keep going until we hit the `;' at the end of the declaration. */ while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { tree width = NULL_TREE, attributes, first_attribute, decl; cp_declarator *declarator = NULL; int ctor_dtor_or_conv_p; /* Check for a (possibly unnamed) bitfield declaration. */ token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_COLON) goto eat_colon; if (token->type == CPP_NAME && (cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON)) { /* Get the name of the bitfield. */ declarator = make_id_declarator (NULL_TREE, cp_parser_identifier (parser), sfk_none); eat_colon: cp_lexer_consume_token (parser->lexer); /* Eat ':'. */ /* Get the width of the bitfield. */ width = cp_parser_constant_expression (parser); } else { /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, &ctor_dtor_or_conv_p, /*parenthesized_p=*/NULL, /*member_p=*/false, /*friend_p=*/false); } /* Look for attributes that apply to the ivar. */ attributes = cp_parser_attributes_opt (parser); /* Remember which attributes are prefix attributes and which are not. */ first_attribute = attributes; /* Combine the attributes. */ attributes = chainon (prefix_attributes, attributes); if (width) /* Create the bitfield declaration. */ decl = grokbitfield (declarator, &declspecs, width, attributes); else decl = grokfield (declarator, &declspecs, NULL_TREE, /*init_const_expr_p=*/false, NULL_TREE, attributes); /* Add the instance variable. */ if (decl != error_mark_node && decl != NULL_TREE) objc_add_instance_variable (decl); /* Reset PREFIX_ATTRIBUTES. */ while (attributes && TREE_CHAIN (attributes) != first_attribute) attributes = TREE_CHAIN (attributes); if (attributes) TREE_CHAIN (attributes) = NULL_TREE; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_COMMA) { cp_lexer_consume_token (parser->lexer); /* Eat ','. */ continue; } break; } cp_parser_consume_semicolon_at_end_of_statement (parser); token = cp_lexer_peek_token (parser->lexer); } if (token->keyword == RID_AT_END) cp_parser_error (parser, "expected %<}%>"); /* Do not consume the RID_AT_END, so it will be read again as terminating the @interface of @implementation. */ if (token->keyword != RID_AT_END && token->type != CPP_EOF) cp_lexer_consume_token (parser->lexer); /* Eat '}'. */ /* For historical reasons, we accept an optional semicolon. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); } /* Parse an Objective-C protocol declaration. */ static void cp_parser_objc_protocol_declaration (cp_parser* parser, tree attributes) { tree proto, protorefs; cp_token *tok; cp_lexer_consume_token (parser->lexer); /* Eat '@protocol'. */ if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME)) { tok = cp_lexer_peek_token (parser->lexer); error_at (tok->location, "identifier expected after %<@protocol%>"); cp_parser_consume_semicolon_at_end_of_statement (parser); return; } /* See if we have a forward declaration or a definition. */ tok = cp_lexer_peek_nth_token (parser->lexer, 2); /* Try a forward declaration first. */ if (tok->type == CPP_COMMA || tok->type == CPP_SEMICOLON) { while (true) { tree id; id = cp_parser_identifier (parser); if (id == error_mark_node) break; objc_declare_protocol (id, attributes); if(cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); else break; } cp_parser_consume_semicolon_at_end_of_statement (parser); } /* Ok, we got a full-fledged definition (or at least should). */ else { proto = cp_parser_identifier (parser); protorefs = cp_parser_objc_protocol_refs_opt (parser); objc_start_protocol (proto, protorefs, attributes); cp_parser_objc_method_prototype_list (parser); } } /* Parse an Objective-C superclass or category. */ static void cp_parser_objc_superclass_or_category (cp_parser *parser, bool iface_p, tree *super, tree *categ, bool *is_class_extension) { cp_token *next = cp_lexer_peek_token (parser->lexer); *super = *categ = NULL_TREE; *is_class_extension = false; if (next->type == CPP_COLON) { cp_lexer_consume_token (parser->lexer); /* Eat ':'. */ *super = cp_parser_identifier (parser); } else if (next->type == CPP_OPEN_PAREN) { cp_lexer_consume_token (parser->lexer); /* Eat '('. */ /* If there is no category name, and this is an @interface, we have a class extension. */ if (iface_p && cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN)) { *categ = NULL_TREE; *is_class_extension = true; } else *categ = cp_parser_identifier (parser); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); } } /* Parse an Objective-C class interface. */ static void cp_parser_objc_class_interface (cp_parser* parser, tree attributes) { tree name, super, categ, protos; bool is_class_extension; cp_lexer_consume_token (parser->lexer); /* Eat '@interface'. */ name = cp_parser_identifier (parser); if (name == error_mark_node) { /* It's hard to recover because even if valid @interface stuff is to follow, we can't compile it (or validate it) if we don't even know which class it refers to. Let's assume this was a stray '@interface' token in the stream and skip it. */ return; } cp_parser_objc_superclass_or_category (parser, true, &super, &categ, &is_class_extension); protos = cp_parser_objc_protocol_refs_opt (parser); /* We have either a class or a category on our hands. */ if (categ || is_class_extension) objc_start_category_interface (name, categ, protos, attributes); else { objc_start_class_interface (name, super, protos, attributes); /* Handle instance variable declarations, if any. */ cp_parser_objc_class_ivars (parser); objc_continue_interface (); } cp_parser_objc_method_prototype_list (parser); } /* Parse an Objective-C class implementation. */ static void cp_parser_objc_class_implementation (cp_parser* parser) { tree name, super, categ; bool is_class_extension; cp_lexer_consume_token (parser->lexer); /* Eat '@implementation'. */ name = cp_parser_identifier (parser); if (name == error_mark_node) { /* It's hard to recover because even if valid @implementation stuff is to follow, we can't compile it (or validate it) if we don't even know which class it refers to. Let's assume this was a stray '@implementation' token in the stream and skip it. */ return; } cp_parser_objc_superclass_or_category (parser, false, &super, &categ, &is_class_extension); /* We have either a class or a category on our hands. */ if (categ) objc_start_category_implementation (name, categ); else { objc_start_class_implementation (name, super); /* Handle instance variable declarations, if any. */ cp_parser_objc_class_ivars (parser); objc_continue_implementation (); } cp_parser_objc_method_definition_list (parser); } /* Consume the @end token and finish off the implementation. */ static void cp_parser_objc_end_implementation (cp_parser* parser) { cp_lexer_consume_token (parser->lexer); /* Eat '@end'. */ objc_finish_implementation (); } /* Parse an Objective-C declaration. */ static void cp_parser_objc_declaration (cp_parser* parser, tree attributes) { /* Try to figure out what kind of declaration is present. */ cp_token *kwd = cp_lexer_peek_token (parser->lexer); if (attributes) switch (kwd->keyword) { case RID_AT_ALIAS: case RID_AT_CLASS: case RID_AT_END: error_at (kwd->location, "attributes may not be specified before" " the %<@%D%> Objective-C++ keyword", kwd->u.value); attributes = NULL; break; case RID_AT_IMPLEMENTATION: warning_at (kwd->location, OPT_Wattributes, "prefix attributes are ignored before %<@%D%>", kwd->u.value); attributes = NULL; default: break; } switch (kwd->keyword) { case RID_AT_ALIAS: cp_parser_objc_alias_declaration (parser); break; case RID_AT_CLASS: cp_parser_objc_class_declaration (parser); break; case RID_AT_PROTOCOL: cp_parser_objc_protocol_declaration (parser, attributes); break; case RID_AT_INTERFACE: cp_parser_objc_class_interface (parser, attributes); break; case RID_AT_IMPLEMENTATION: cp_parser_objc_class_implementation (parser); break; case RID_AT_END: cp_parser_objc_end_implementation (parser); break; default: error_at (kwd->location, "misplaced %<@%D%> Objective-C++ construct", kwd->u.value); cp_parser_skip_to_end_of_block_or_statement (parser); } } /* Parse an Objective-C try-catch-finally statement. objc-try-catch-finally-stmt: @try compound-statement objc-catch-clause-seq [opt] objc-finally-clause [opt] objc-catch-clause-seq: objc-catch-clause objc-catch-clause-seq [opt] objc-catch-clause: @catch ( objc-exception-declaration ) compound-statement objc-finally-clause: @finally compound-statement objc-exception-declaration: parameter-declaration '...' where '...' is to be interpreted literally, that is, it means CPP_ELLIPSIS. Returns NULL_TREE. PS: This function is identical to c_parser_objc_try_catch_finally_statement for C. Keep them in sync. */ static tree cp_parser_objc_try_catch_finally_statement (cp_parser *parser) { location_t location; tree stmt; cp_parser_require_keyword (parser, RID_AT_TRY, RT_AT_TRY); location = cp_lexer_peek_token (parser->lexer)->location; objc_maybe_warn_exceptions (location); /* NB: The @try block needs to be wrapped in its own STATEMENT_LIST node, lest it get absorbed into the surrounding block. */ stmt = push_stmt_list (); cp_parser_compound_statement (parser, NULL, false, false); objc_begin_try_stmt (location, pop_stmt_list (stmt)); while (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_CATCH)) { cp_parameter_declarator *parm; tree parameter_declaration = error_mark_node; bool seen_open_paren = false; cp_lexer_consume_token (parser->lexer); if (cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) seen_open_paren = true; if (cp_lexer_next_token_is (parser->lexer, CPP_ELLIPSIS)) { /* We have "@catch (...)" (where the '...' are literally what is in the code). Skip the '...'. parameter_declaration is set to NULL_TREE, and objc_being_catch_clauses() knows that that means '...'. */ cp_lexer_consume_token (parser->lexer); parameter_declaration = NULL_TREE; } else { /* We have "@catch (NSException *exception)" or something like that. Parse the parameter declaration. */ parm = cp_parser_parameter_declaration (parser, false, NULL); if (parm == NULL) parameter_declaration = error_mark_node; else parameter_declaration = grokdeclarator (parm->declarator, &parm->decl_specifiers, PARM, /*initialized=*/0, /*attrlist=*/NULL); } if (seen_open_paren) cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); else { /* If there was no open parenthesis, we are recovering from an error, and we are trying to figure out what mistake the user has made. */ /* If there is an immediate closing parenthesis, the user probably forgot the opening one (ie, they typed "@catch NSException *e)". Parse the closing parenthesis and keep going. */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN)) cp_lexer_consume_token (parser->lexer); /* If these is no immediate closing parenthesis, the user probably doesn't know that parenthesis are required at all (ie, they typed "@catch NSException *e"). So, just forget about the closing parenthesis and keep going. */ } objc_begin_catch_clause (parameter_declaration); cp_parser_compound_statement (parser, NULL, false, false); objc_finish_catch_clause (); } if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AT_FINALLY)) { cp_lexer_consume_token (parser->lexer); location = cp_lexer_peek_token (parser->lexer)->location; /* NB: The @finally block needs to be wrapped in its own STATEMENT_LIST node, lest it get absorbed into the surrounding block. */ stmt = push_stmt_list (); cp_parser_compound_statement (parser, NULL, false, false); objc_build_finally_clause (location, pop_stmt_list (stmt)); } return objc_finish_try_stmt (); } /* Parse an Objective-C synchronized statement. objc-synchronized-stmt: @synchronized ( expression ) compound-statement Returns NULL_TREE. */ static tree cp_parser_objc_synchronized_statement (cp_parser *parser) { location_t location; tree lock, stmt; cp_parser_require_keyword (parser, RID_AT_SYNCHRONIZED, RT_AT_SYNCHRONIZED); location = cp_lexer_peek_token (parser->lexer)->location; objc_maybe_warn_exceptions (location); cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); lock = cp_parser_expression (parser); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); /* NB: The @synchronized block needs to be wrapped in its own STATEMENT_LIST node, lest it get absorbed into the surrounding block. */ stmt = push_stmt_list (); cp_parser_compound_statement (parser, NULL, false, false); return objc_build_synchronized (location, lock, pop_stmt_list (stmt)); } /* Parse an Objective-C throw statement. objc-throw-stmt: @throw assignment-expression [opt] ; Returns a constructed '@throw' statement. */ static tree cp_parser_objc_throw_statement (cp_parser *parser) { tree expr = NULL_TREE; location_t loc = cp_lexer_peek_token (parser->lexer)->location; cp_parser_require_keyword (parser, RID_AT_THROW, RT_AT_THROW); if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) expr = cp_parser_expression (parser); cp_parser_consume_semicolon_at_end_of_statement (parser); return objc_build_throw_stmt (loc, expr); } /* Parse an Objective-C statement. */ static tree cp_parser_objc_statement (cp_parser * parser) { /* Try to figure out what kind of declaration is present. */ cp_token *kwd = cp_lexer_peek_token (parser->lexer); switch (kwd->keyword) { case RID_AT_TRY: return cp_parser_objc_try_catch_finally_statement (parser); case RID_AT_SYNCHRONIZED: return cp_parser_objc_synchronized_statement (parser); case RID_AT_THROW: return cp_parser_objc_throw_statement (parser); default: error_at (kwd->location, "misplaced %<@%D%> Objective-C++ construct", kwd->u.value); cp_parser_skip_to_end_of_block_or_statement (parser); } return error_mark_node; } /* If we are compiling ObjC++ and we see an __attribute__ we neeed to look ahead to see if an objc keyword follows the attributes. This is to detect the use of prefix attributes on ObjC @interface and @protocol. */ static bool cp_parser_objc_valid_prefix_attributes (cp_parser* parser, tree *attrib) { cp_lexer_save_tokens (parser->lexer); *attrib = cp_parser_attributes_opt (parser); gcc_assert (*attrib); if (OBJC_IS_AT_KEYWORD (cp_lexer_peek_token (parser->lexer)->keyword)) { cp_lexer_commit_tokens (parser->lexer); return true; } cp_lexer_rollback_tokens (parser->lexer); return false; } /* This routine is a minimal replacement for c_parser_struct_declaration () used when parsing the list of types/names or ObjC++ properties. For example, when parsing the code @property (readonly) int a, b, c; this function is responsible for parsing "int a, int b, int c" and returning the declarations as CHAIN of DECLs. TODO: Share this code with cp_parser_objc_class_ivars. It's very similar parsing. */ static tree cp_parser_objc_struct_declaration (cp_parser *parser) { tree decls = NULL_TREE; cp_decl_specifier_seq declspecs; int decl_class_or_enum_p; tree prefix_attributes; cp_parser_decl_specifier_seq (parser, CP_PARSER_FLAGS_NONE, &declspecs, &decl_class_or_enum_p); if (declspecs.type == error_mark_node) return error_mark_node; /* auto, register, static, extern, mutable. */ if (declspecs.storage_class != sc_none) { cp_parser_error (parser, "invalid type for property"); declspecs.storage_class = sc_none; } /* thread_local. */ if (decl_spec_seq_has_spec_p (&declspecs, ds_thread)) { cp_parser_error (parser, "invalid type for property"); declspecs.locations[ds_thread] = 0; } /* typedef. */ if (decl_spec_seq_has_spec_p (&declspecs, ds_typedef)) { cp_parser_error (parser, "invalid type for property"); declspecs.locations[ds_typedef] = 0; } prefix_attributes = declspecs.attributes; declspecs.attributes = NULL_TREE; /* Keep going until we hit the `;' at the end of the declaration. */ while (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { tree attributes, first_attribute, decl; cp_declarator *declarator; cp_token *token; /* Parse the declarator. */ declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, NULL, NULL, false, false); /* Look for attributes that apply to the ivar. */ attributes = cp_parser_attributes_opt (parser); /* Remember which attributes are prefix attributes and which are not. */ first_attribute = attributes; /* Combine the attributes. */ attributes = chainon (prefix_attributes, attributes); decl = grokfield (declarator, &declspecs, NULL_TREE, /*init_const_expr_p=*/false, NULL_TREE, attributes); if (decl == error_mark_node || decl == NULL_TREE) return error_mark_node; /* Reset PREFIX_ATTRIBUTES. */ while (attributes && TREE_CHAIN (attributes) != first_attribute) attributes = TREE_CHAIN (attributes); if (attributes) TREE_CHAIN (attributes) = NULL_TREE; DECL_CHAIN (decl) = decls; decls = decl; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_COMMA) { cp_lexer_consume_token (parser->lexer); /* Eat ','. */ continue; } else break; } return decls; } /* Parse an Objective-C @property declaration. The syntax is: objc-property-declaration: '@property' objc-property-attributes[opt] struct-declaration ; objc-property-attributes: '(' objc-property-attribute-list ')' objc-property-attribute-list: objc-property-attribute objc-property-attribute-list, objc-property-attribute objc-property-attribute 'getter' = identifier 'setter' = identifier 'readonly' 'readwrite' 'assign' 'retain' 'copy' 'nonatomic' For example: @property NSString *name; @property (readonly) id object; @property (retain, nonatomic, getter=getTheName) id name; @property int a, b, c; PS: This function is identical to c_parser_objc_at_property_declaration for C. Keep them in sync. */ static void cp_parser_objc_at_property_declaration (cp_parser *parser) { /* The following variables hold the attributes of the properties as parsed. They are 'false' or 'NULL_TREE' if the attribute was not seen. When we see an attribute, we set them to 'true' (if they are boolean properties) or to the identifier (if they have an argument, ie, for getter and setter). Note that here we only parse the list of attributes, check the syntax and accumulate the attributes that we find. objc_add_property_declaration() will then process the information. */ bool property_assign = false; bool property_copy = false; tree property_getter_ident = NULL_TREE; bool property_nonatomic = false; bool property_readonly = false; bool property_readwrite = false; bool property_retain = false; tree property_setter_ident = NULL_TREE; /* 'properties' is the list of properties that we read. Usually a single one, but maybe more (eg, in "@property int a, b, c;" there are three). */ tree properties; location_t loc; loc = cp_lexer_peek_token (parser->lexer)->location; cp_lexer_consume_token (parser->lexer); /* Eat '@property'. */ /* Parse the optional attribute list... */ if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { /* Eat the '('. */ cp_lexer_consume_token (parser->lexer); while (true) { bool syntax_error = false; cp_token *token = cp_lexer_peek_token (parser->lexer); enum rid keyword; if (token->type != CPP_NAME) { cp_parser_error (parser, "expected identifier"); break; } keyword = C_RID_CODE (token->u.value); cp_lexer_consume_token (parser->lexer); switch (keyword) { case RID_ASSIGN: property_assign = true; break; case RID_COPY: property_copy = true; break; case RID_NONATOMIC: property_nonatomic = true; break; case RID_READONLY: property_readonly = true; break; case RID_READWRITE: property_readwrite = true; break; case RID_RETAIN: property_retain = true; break; case RID_GETTER: case RID_SETTER: if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ)) { if (keyword == RID_GETTER) cp_parser_error (parser, "missing %<=%> (after %<getter%> attribute)"); else cp_parser_error (parser, "missing %<=%> (after %<setter%> attribute)"); syntax_error = true; break; } cp_lexer_consume_token (parser->lexer); /* eat the = */ if (!cp_parser_objc_selector_p (cp_lexer_peek_token (parser->lexer)->type)) { cp_parser_error (parser, "expected identifier"); syntax_error = true; break; } if (keyword == RID_SETTER) { if (property_setter_ident != NULL_TREE) { cp_parser_error (parser, "the %<setter%> attribute may only be specified once"); cp_lexer_consume_token (parser->lexer); } else property_setter_ident = cp_parser_objc_selector (parser); if (cp_lexer_next_token_is_not (parser->lexer, CPP_COLON)) cp_parser_error (parser, "setter name must terminate with %<:%>"); else cp_lexer_consume_token (parser->lexer); } else { if (property_getter_ident != NULL_TREE) { cp_parser_error (parser, "the %<getter%> attribute may only be specified once"); cp_lexer_consume_token (parser->lexer); } else property_getter_ident = cp_parser_objc_selector (parser); } break; default: cp_parser_error (parser, "unknown property attribute"); syntax_error = true; break; } if (syntax_error) break; if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); else break; } /* FIXME: "@property (setter, assign);" will generate a spurious "error: expected ‘)’ before ‘,’ token". This is because cp_parser_require, unlike the C counterpart, will produce an error even if we are in error recovery. */ if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) { cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); } } /* ... and the property declaration(s). */ properties = cp_parser_objc_struct_declaration (parser); if (properties == error_mark_node) { cp_parser_skip_to_end_of_statement (parser); /* If the next token is now a `;', consume it. */ if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); return; } if (properties == NULL_TREE) cp_parser_error (parser, "expected identifier"); else { /* Comma-separated properties are chained together in reverse order; add them one by one. */ properties = nreverse (properties); for (; properties; properties = TREE_CHAIN (properties)) objc_add_property_declaration (loc, copy_node (properties), property_readonly, property_readwrite, property_assign, property_retain, property_copy, property_nonatomic, property_getter_ident, property_setter_ident); } cp_parser_consume_semicolon_at_end_of_statement (parser); } /* Parse an Objective-C++ @synthesize declaration. The syntax is: objc-synthesize-declaration: @synthesize objc-synthesize-identifier-list ; objc-synthesize-identifier-list: objc-synthesize-identifier objc-synthesize-identifier-list, objc-synthesize-identifier objc-synthesize-identifier identifier identifier = identifier For example: @synthesize MyProperty; @synthesize OneProperty, AnotherProperty=MyIvar, YetAnotherProperty; PS: This function is identical to c_parser_objc_at_synthesize_declaration for C. Keep them in sync. */ static void cp_parser_objc_at_synthesize_declaration (cp_parser *parser) { tree list = NULL_TREE; location_t loc; loc = cp_lexer_peek_token (parser->lexer)->location; cp_lexer_consume_token (parser->lexer); /* Eat '@synthesize'. */ while (true) { tree property, ivar; property = cp_parser_identifier (parser); if (property == error_mark_node) { cp_parser_consume_semicolon_at_end_of_statement (parser); return; } if (cp_lexer_next_token_is (parser->lexer, CPP_EQ)) { cp_lexer_consume_token (parser->lexer); ivar = cp_parser_identifier (parser); if (ivar == error_mark_node) { cp_parser_consume_semicolon_at_end_of_statement (parser); return; } } else ivar = NULL_TREE; list = chainon (list, build_tree_list (ivar, property)); if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); else break; } cp_parser_consume_semicolon_at_end_of_statement (parser); objc_add_synthesize_declaration (loc, list); } /* Parse an Objective-C++ @dynamic declaration. The syntax is: objc-dynamic-declaration: @dynamic identifier-list ; For example: @dynamic MyProperty; @dynamic MyProperty, AnotherProperty; PS: This function is identical to c_parser_objc_at_dynamic_declaration for C. Keep them in sync. */ static void cp_parser_objc_at_dynamic_declaration (cp_parser *parser) { tree list = NULL_TREE; location_t loc; loc = cp_lexer_peek_token (parser->lexer)->location; cp_lexer_consume_token (parser->lexer); /* Eat '@dynamic'. */ while (true) { tree property; property = cp_parser_identifier (parser); if (property == error_mark_node) { cp_parser_consume_semicolon_at_end_of_statement (parser); return; } list = chainon (list, build_tree_list (NULL, property)); if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); else break; } cp_parser_consume_semicolon_at_end_of_statement (parser); objc_add_dynamic_declaration (loc, list); } /* OpenMP 2.5 / 3.0 / 3.1 / 4.0 parsing routines. */ /* Returns name of the next clause. If the clause is not recognized PRAGMA_OMP_CLAUSE_NONE is returned and the token is not consumed. Otherwise appropriate pragma_omp_clause is returned and the token is consumed. */ static pragma_omp_clause cp_parser_omp_clause_name (cp_parser *parser) { pragma_omp_clause result = PRAGMA_OMP_CLAUSE_NONE; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_IF)) result = PRAGMA_OMP_CLAUSE_IF; else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_DEFAULT)) result = PRAGMA_OMP_CLAUSE_DEFAULT; else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_DELETE)) result = PRAGMA_OACC_CLAUSE_DELETE; else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_PRIVATE)) result = PRAGMA_OMP_CLAUSE_PRIVATE; else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR)) result = PRAGMA_OMP_CLAUSE_FOR; else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); switch (p[0]) { case 'a': if (!strcmp ("aligned", p)) result = PRAGMA_OMP_CLAUSE_ALIGNED; else if (!strcmp ("async", p)) result = PRAGMA_OACC_CLAUSE_ASYNC; break; case 'c': if (!strcmp ("collapse", p)) result = PRAGMA_OMP_CLAUSE_COLLAPSE; else if (!strcmp ("copy", p)) result = PRAGMA_OACC_CLAUSE_COPY; else if (!strcmp ("copyin", p)) result = PRAGMA_OMP_CLAUSE_COPYIN; else if (!strcmp ("copyout", p)) result = PRAGMA_OACC_CLAUSE_COPYOUT; else if (!strcmp ("copyprivate", p)) result = PRAGMA_OMP_CLAUSE_COPYPRIVATE; else if (!strcmp ("create", p)) result = PRAGMA_OACC_CLAUSE_CREATE; break; case 'd': if (!strcmp ("depend", p)) result = PRAGMA_OMP_CLAUSE_DEPEND; else if (!strcmp ("device", p)) result = PRAGMA_OMP_CLAUSE_DEVICE; else if (!strcmp ("deviceptr", p)) result = PRAGMA_OACC_CLAUSE_DEVICEPTR; else if (!strcmp ("dist_schedule", p)) result = PRAGMA_OMP_CLAUSE_DIST_SCHEDULE; break; case 'f': if (!strcmp ("final", p)) result = PRAGMA_OMP_CLAUSE_FINAL; else if (!strcmp ("firstprivate", p)) result = PRAGMA_OMP_CLAUSE_FIRSTPRIVATE; else if (!strcmp ("from", p)) result = PRAGMA_OMP_CLAUSE_FROM; break; case 'h': if (!strcmp ("host", p)) result = PRAGMA_OACC_CLAUSE_HOST; break; case 'i': if (!strcmp ("inbranch", p)) result = PRAGMA_OMP_CLAUSE_INBRANCH; break; case 'l': if (!strcmp ("lastprivate", p)) result = PRAGMA_OMP_CLAUSE_LASTPRIVATE; else if (!strcmp ("linear", p)) result = PRAGMA_OMP_CLAUSE_LINEAR; break; case 'm': if (!strcmp ("map", p)) result = PRAGMA_OMP_CLAUSE_MAP; else if (!strcmp ("mergeable", p)) result = PRAGMA_OMP_CLAUSE_MERGEABLE; else if (flag_cilkplus && !strcmp ("mask", p)) result = PRAGMA_CILK_CLAUSE_MASK; break; case 'n': if (!strcmp ("notinbranch", p)) result = PRAGMA_OMP_CLAUSE_NOTINBRANCH; else if (!strcmp ("nowait", p)) result = PRAGMA_OMP_CLAUSE_NOWAIT; else if (flag_cilkplus && !strcmp ("nomask", p)) result = PRAGMA_CILK_CLAUSE_NOMASK; else if (!strcmp ("num_gangs", p)) result = PRAGMA_OACC_CLAUSE_NUM_GANGS; else if (!strcmp ("num_teams", p)) result = PRAGMA_OMP_CLAUSE_NUM_TEAMS; else if (!strcmp ("num_threads", p)) result = PRAGMA_OMP_CLAUSE_NUM_THREADS; else if (!strcmp ("num_workers", p)) result = PRAGMA_OACC_CLAUSE_NUM_WORKERS; break; case 'o': if (!strcmp ("ordered", p)) result = PRAGMA_OMP_CLAUSE_ORDERED; break; case 'p': if (!strcmp ("parallel", p)) result = PRAGMA_OMP_CLAUSE_PARALLEL; else if (!strcmp ("present", p)) result = PRAGMA_OACC_CLAUSE_PRESENT; else if (!strcmp ("present_or_copy", p) || !strcmp ("pcopy", p)) result = PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY; else if (!strcmp ("present_or_copyin", p) || !strcmp ("pcopyin", p)) result = PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN; else if (!strcmp ("present_or_copyout", p) || !strcmp ("pcopyout", p)) result = PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT; else if (!strcmp ("present_or_create", p) || !strcmp ("pcreate", p)) result = PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE; else if (!strcmp ("proc_bind", p)) result = PRAGMA_OMP_CLAUSE_PROC_BIND; break; case 'r': if (!strcmp ("reduction", p)) result = PRAGMA_OMP_CLAUSE_REDUCTION; break; case 's': if (!strcmp ("safelen", p)) result = PRAGMA_OMP_CLAUSE_SAFELEN; else if (!strcmp ("schedule", p)) result = PRAGMA_OMP_CLAUSE_SCHEDULE; else if (!strcmp ("sections", p)) result = PRAGMA_OMP_CLAUSE_SECTIONS; else if (!strcmp ("self", p)) result = PRAGMA_OACC_CLAUSE_SELF; else if (!strcmp ("shared", p)) result = PRAGMA_OMP_CLAUSE_SHARED; else if (!strcmp ("simdlen", p)) result = PRAGMA_OMP_CLAUSE_SIMDLEN; break; case 't': if (!strcmp ("taskgroup", p)) result = PRAGMA_OMP_CLAUSE_TASKGROUP; else if (!strcmp ("thread_limit", p)) result = PRAGMA_OMP_CLAUSE_THREAD_LIMIT; else if (!strcmp ("to", p)) result = PRAGMA_OMP_CLAUSE_TO; break; case 'u': if (!strcmp ("uniform", p)) result = PRAGMA_OMP_CLAUSE_UNIFORM; else if (!strcmp ("untied", p)) result = PRAGMA_OMP_CLAUSE_UNTIED; break; case 'v': if (!strcmp ("vector_length", p)) result = PRAGMA_OACC_CLAUSE_VECTOR_LENGTH; else if (flag_cilkplus && !strcmp ("vectorlength", p)) result = PRAGMA_CILK_CLAUSE_VECTORLENGTH; break; case 'w': if (!strcmp ("wait", p)) result = PRAGMA_OACC_CLAUSE_WAIT; break; } } if (result != PRAGMA_OMP_CLAUSE_NONE) cp_lexer_consume_token (parser->lexer); return result; } /* Validate that a clause of the given type does not already exist. */ static void check_no_duplicate_clause (tree clauses, enum omp_clause_code code, const char *name, location_t location) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == code) { error_at (location, "too many %qs clauses", name); break; } } /* OpenMP 2.5: variable-list: identifier variable-list , identifier In addition, we match a closing parenthesis (or, if COLON is non-NULL, colon). An opening parenthesis will have been consumed by the caller. If KIND is nonzero, create the appropriate node and install the decl in OMP_CLAUSE_DECL and add the node to the head of the list. If KIND is zero, create a TREE_LIST with the decl in TREE_PURPOSE; return the list created. COLON can be NULL if only closing parenthesis should end the list, or pointer to bool which will receive false if the list is terminated by closing parenthesis or true if the list is terminated by colon. */ static tree cp_parser_omp_var_list_no_open (cp_parser *parser, enum omp_clause_code kind, tree list, bool *colon) { cp_token *token; bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; if (colon) { parser->colon_corrects_to_scope_p = false; *colon = false; } while (1) { tree name, decl; token = cp_lexer_peek_token (parser->lexer); name = cp_parser_id_expression (parser, /*template_p=*/false, /*check_dependency_p=*/true, /*template_p=*/NULL, /*declarator_p=*/false, /*optional_p=*/false); if (name == error_mark_node) goto skip_comma; decl = cp_parser_lookup_name_simple (parser, name, token->location); if (decl == error_mark_node) cp_parser_name_lookup_error (parser, name, decl, NLE_NULL, token->location); else if (kind != 0) { switch (kind) { case OMP_CLAUSE__CACHE_: if (cp_lexer_peek_token (parser->lexer)->type != CPP_OPEN_SQUARE) { error_at (token->location, "expected %<[%>"); decl = error_mark_node; break; } /* FALL THROUGH. */ case OMP_CLAUSE_MAP: case OMP_CLAUSE_FROM: case OMP_CLAUSE_TO: case OMP_CLAUSE_DEPEND: while (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_SQUARE)) { tree low_bound = NULL_TREE, length = NULL_TREE; parser->colon_corrects_to_scope_p = false; cp_lexer_consume_token (parser->lexer); if (!cp_lexer_next_token_is (parser->lexer, CPP_COLON)) low_bound = cp_parser_expression (parser); if (!colon) parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_SQUARE)) length = integer_one_node; else { /* Look for `:'. */ if (!cp_parser_require (parser, CPP_COLON, RT_COLON)) goto skip_comma; if (!cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_SQUARE)) length = cp_parser_expression (parser); } /* Look for the closing `]'. */ if (!cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE)) goto skip_comma; if (kind == OMP_CLAUSE__CACHE_) { if (TREE_CODE (low_bound) != INTEGER_CST && !TREE_READONLY (low_bound)) { error_at (token->location, "%qD is not a constant", low_bound); decl = error_mark_node; } if (TREE_CODE (length) != INTEGER_CST && !TREE_READONLY (length)) { error_at (token->location, "%qD is not a constant", length); decl = error_mark_node; } } decl = tree_cons (low_bound, length, decl); } break; default: break; } tree u = build_omp_clause (token->location, kind); OMP_CLAUSE_DECL (u) = decl; OMP_CLAUSE_CHAIN (u) = list; list = u; } else list = tree_cons (decl, NULL_TREE, list); get_comma: if (cp_lexer_next_token_is_not (parser->lexer, CPP_COMMA)) break; cp_lexer_consume_token (parser->lexer); } if (colon) parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; if (colon != NULL && cp_lexer_next_token_is (parser->lexer, CPP_COLON)) { *colon = true; cp_parser_require (parser, CPP_COLON, RT_COLON); return list; } if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) { int ending; /* Try to resync to an unnested comma. Copied from cp_parser_parenthesized_expression_list. */ skip_comma: if (colon) parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; ending = cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/true, /*consume_paren=*/true); if (ending < 0) goto get_comma; } return list; } /* Similarly, but expect leading and trailing parenthesis. This is a very common case for omp clauses. */ static tree cp_parser_omp_var_list (cp_parser *parser, enum omp_clause_code kind, tree list) { if (cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return cp_parser_omp_var_list_no_open (parser, kind, list, NULL); return list; } /* OpenACC 2.0: copy ( variable-list ) copyin ( variable-list ) copyout ( variable-list ) create ( variable-list ) delete ( variable-list ) present ( variable-list ) present_or_copy ( variable-list ) pcopy ( variable-list ) present_or_copyin ( variable-list ) pcopyin ( variable-list ) present_or_copyout ( variable-list ) pcopyout ( variable-list ) present_or_create ( variable-list ) pcreate ( variable-list ) */ static tree cp_parser_oacc_data_clause (cp_parser *parser, pragma_omp_clause c_kind, tree list) { enum gomp_map_kind kind; switch (c_kind) { case PRAGMA_OACC_CLAUSE_COPY: kind = GOMP_MAP_FORCE_TOFROM; break; case PRAGMA_OACC_CLAUSE_COPYIN: kind = GOMP_MAP_FORCE_TO; break; case PRAGMA_OACC_CLAUSE_COPYOUT: kind = GOMP_MAP_FORCE_FROM; break; case PRAGMA_OACC_CLAUSE_CREATE: kind = GOMP_MAP_FORCE_ALLOC; break; case PRAGMA_OACC_CLAUSE_DELETE: kind = GOMP_MAP_FORCE_DEALLOC; break; case PRAGMA_OACC_CLAUSE_DEVICE: kind = GOMP_MAP_FORCE_TO; break; case PRAGMA_OACC_CLAUSE_HOST: case PRAGMA_OACC_CLAUSE_SELF: kind = GOMP_MAP_FORCE_FROM; break; case PRAGMA_OACC_CLAUSE_PRESENT: kind = GOMP_MAP_FORCE_PRESENT; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY: kind = GOMP_MAP_TOFROM; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN: kind = GOMP_MAP_TO; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT: kind = GOMP_MAP_FROM; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE: kind = GOMP_MAP_ALLOC; break; default: gcc_unreachable (); } tree nl, c; nl = cp_parser_omp_var_list (parser, OMP_CLAUSE_MAP, list); for (c = nl; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_SET_MAP_KIND (c, kind); return nl; } /* OpenACC 2.0: deviceptr ( variable-list ) */ static tree cp_parser_oacc_data_clause_deviceptr (cp_parser *parser, tree list) { location_t loc = cp_lexer_peek_token (parser->lexer)->location; tree vars, t; /* Can't use OMP_CLAUSE_MAP here (that is, can't use the generic cp_parser_oacc_data_clause), as for PRAGMA_OACC_CLAUSE_DEVICEPTR, variable-list must only allow for pointer variables. */ vars = cp_parser_omp_var_list (parser, OMP_CLAUSE_ERROR, NULL); for (t = vars; t; t = TREE_CHAIN (t)) { tree v = TREE_PURPOSE (t); /* FIXME diagnostics: Ideally we should keep individual locations for all the variables in the var list to make the following errors more precise. Perhaps c_parser_omp_var_list_parens should construct a list of locations to go along with the var list. */ if (TREE_CODE (v) != VAR_DECL) error_at (loc, "%qD is not a variable", v); else if (TREE_TYPE (v) == error_mark_node) ; else if (!POINTER_TYPE_P (TREE_TYPE (v))) error_at (loc, "%qD is not a pointer variable", v); tree u = build_omp_clause (loc, OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (u, GOMP_MAP_FORCE_DEVICEPTR); OMP_CLAUSE_DECL (u) = v; OMP_CLAUSE_CHAIN (u) = list; list = u; } return list; } /* OpenACC: vector_length ( expression ) */ static tree cp_parser_oacc_clause_vector_length (cp_parser *parser, tree list) { tree t, c; location_t location = cp_lexer_peek_token (parser->lexer)->location; bool error = false; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; t = cp_parser_condition (parser); if (t == error_mark_node || !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (location, "expected positive integer expression"); error = true; } if (error || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) { cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); return list; } check_no_duplicate_clause (list, OMP_CLAUSE_VECTOR_LENGTH, "vector_length", location); c = build_omp_clause (location, OMP_CLAUSE_VECTOR_LENGTH); OMP_CLAUSE_VECTOR_LENGTH_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; return list; } /* OpenACC 2.0 Parse wait clause or directive parameters. */ static tree cp_parser_oacc_wait_list (cp_parser *parser, location_t clause_loc, tree list) { vec<tree, va_gc> *args; tree t, args_tree; args = cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL); if (args == NULL || args->length () == 0) { cp_parser_error (parser, "expected integer expression before ')'"); if (args != NULL) release_tree_vector (args); return list; } args_tree = build_tree_list_vec (args); release_tree_vector (args); for (t = args_tree; t; t = TREE_CHAIN (t)) { tree targ = TREE_VALUE (t); if (targ != error_mark_node) { if (!INTEGRAL_TYPE_P (TREE_TYPE (targ))) error ("%<wait%> expression must be integral"); else { tree c = build_omp_clause (clause_loc, OMP_CLAUSE_WAIT); mark_rvalue_use (targ); OMP_CLAUSE_DECL (c) = targ; OMP_CLAUSE_CHAIN (c) = list; list = c; } } } return list; } /* OpenACC: wait ( int-expr-list ) */ static tree cp_parser_oacc_clause_wait (cp_parser *parser, tree list) { location_t location = cp_lexer_peek_token (parser->lexer)->location; if (cp_lexer_peek_token (parser->lexer)->type != CPP_OPEN_PAREN) return list; list = cp_parser_oacc_wait_list (parser, location, list); return list; } /* OpenMP 3.0: collapse ( constant-expression ) */ static tree cp_parser_omp_clause_collapse (cp_parser *parser, tree list, location_t location) { tree c, num; location_t loc; HOST_WIDE_INT n; loc = cp_lexer_peek_token (parser->lexer)->location; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; num = cp_parser_constant_expression (parser); if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); if (num == error_mark_node) return list; num = fold_non_dependent_expr (num); if (!INTEGRAL_TYPE_P (TREE_TYPE (num)) || !tree_fits_shwi_p (num) || (n = tree_to_shwi (num)) <= 0 || (int) n != n) { error_at (loc, "collapse argument needs positive constant integer expression"); return list; } check_no_duplicate_clause (list, OMP_CLAUSE_COLLAPSE, "collapse", location); c = build_omp_clause (loc, OMP_CLAUSE_COLLAPSE); OMP_CLAUSE_CHAIN (c) = list; OMP_CLAUSE_COLLAPSE_EXPR (c) = num; return c; } /* OpenMP 2.5: default ( shared | none ) */ static tree cp_parser_omp_clause_default (cp_parser *parser, tree list, location_t location) { enum omp_clause_default_kind kind = OMP_CLAUSE_DEFAULT_UNSPECIFIED; tree c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); switch (p[0]) { case 'n': if (strcmp ("none", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_NONE; break; case 's': if (strcmp ("shared", p) != 0) goto invalid_kind; kind = OMP_CLAUSE_DEFAULT_SHARED; break; default: goto invalid_kind; } cp_lexer_consume_token (parser->lexer); } else { invalid_kind: cp_parser_error (parser, "expected %<none%> or %<shared%>"); } if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); if (kind == OMP_CLAUSE_DEFAULT_UNSPECIFIED) return list; check_no_duplicate_clause (list, OMP_CLAUSE_DEFAULT, "default", location); c = build_omp_clause (location, OMP_CLAUSE_DEFAULT); OMP_CLAUSE_CHAIN (c) = list; OMP_CLAUSE_DEFAULT_KIND (c) = kind; return c; } /* OpenMP 3.1: final ( expression ) */ static tree cp_parser_omp_clause_final (cp_parser *parser, tree list, location_t location) { tree t, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; t = cp_parser_condition (parser); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); check_no_duplicate_clause (list, OMP_CLAUSE_FINAL, "final", location); c = build_omp_clause (location, OMP_CLAUSE_FINAL); OMP_CLAUSE_FINAL_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: if ( expression ) */ static tree cp_parser_omp_clause_if (cp_parser *parser, tree list, location_t location) { tree t, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; t = cp_parser_condition (parser); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); check_no_duplicate_clause (list, OMP_CLAUSE_IF, "if", location); c = build_omp_clause (location, OMP_CLAUSE_IF); OMP_CLAUSE_IF_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 3.1: mergeable */ static tree cp_parser_omp_clause_mergeable (cp_parser * /*parser*/, tree list, location_t location) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_MERGEABLE, "mergeable", location); c = build_omp_clause (location, OMP_CLAUSE_MERGEABLE); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: nowait */ static tree cp_parser_omp_clause_nowait (cp_parser * /*parser*/, tree list, location_t location) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_NOWAIT, "nowait", location); c = build_omp_clause (location, OMP_CLAUSE_NOWAIT); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenACC: num_gangs ( expression ) */ static tree cp_parser_omp_clause_num_gangs (cp_parser *parser, tree list) { tree t, c; location_t location = cp_lexer_peek_token (parser->lexer)->location; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; t = cp_parser_condition (parser); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (location, "expected positive integer expression"); return list; } check_no_duplicate_clause (list, OMP_CLAUSE_NUM_GANGS, "num_gangs", location); c = build_omp_clause (location, OMP_CLAUSE_NUM_GANGS); OMP_CLAUSE_NUM_GANGS_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; return list; } /* OpenMP 2.5: num_threads ( expression ) */ static tree cp_parser_omp_clause_num_threads (cp_parser *parser, tree list, location_t location) { tree t, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; t = cp_parser_expression (parser); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); check_no_duplicate_clause (list, OMP_CLAUSE_NUM_THREADS, "num_threads", location); c = build_omp_clause (location, OMP_CLAUSE_NUM_THREADS); OMP_CLAUSE_NUM_THREADS_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenACC: num_workers ( expression ) */ static tree cp_parser_omp_clause_num_workers (cp_parser *parser, tree list) { tree t, c; location_t location = cp_lexer_peek_token (parser->lexer)->location; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; t = cp_parser_condition (parser); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); if (!INTEGRAL_TYPE_P (TREE_TYPE (t))) { error_at (location, "expected positive integer expression"); return list; } check_no_duplicate_clause (list, OMP_CLAUSE_NUM_WORKERS, "num_gangs", location); c = build_omp_clause (location, OMP_CLAUSE_NUM_WORKERS); OMP_CLAUSE_NUM_WORKERS_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; return list; } /* OpenMP 2.5: ordered */ static tree cp_parser_omp_clause_ordered (cp_parser * /*parser*/, tree list, location_t location) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_ORDERED, "ordered", location); c = build_omp_clause (location, OMP_CLAUSE_ORDERED); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 2.5: reduction ( reduction-operator : variable-list ) reduction-operator: One of: + * - & ^ | && || OpenMP 3.1: reduction-operator: One of: + * - & ^ | && || min max OpenMP 4.0: reduction-operator: One of: + * - & ^ | && || id-expression */ static tree cp_parser_omp_clause_reduction (cp_parser *parser, tree list) { enum tree_code code = ERROR_MARK; tree nlist, c, id = NULL_TREE; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; switch (cp_lexer_peek_token (parser->lexer)->type) { case CPP_PLUS: code = PLUS_EXPR; break; case CPP_MULT: code = MULT_EXPR; break; case CPP_MINUS: code = MINUS_EXPR; break; case CPP_AND: code = BIT_AND_EXPR; break; case CPP_XOR: code = BIT_XOR_EXPR; break; case CPP_OR: code = BIT_IOR_EXPR; break; case CPP_AND_AND: code = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: code = TRUTH_ORIF_EXPR; break; default: break; } if (code != ERROR_MARK) cp_lexer_consume_token (parser->lexer); else { bool saved_colon_corrects_to_scope_p; saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; parser->colon_corrects_to_scope_p = false; id = cp_parser_id_expression (parser, /*template_p=*/false, /*check_dependency_p=*/true, /*template_p=*/NULL, /*declarator_p=*/false, /*optional_p=*/false); parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; if (identifier_p (id)) { const char *p = IDENTIFIER_POINTER (id); if (strcmp (p, "min") == 0) code = MIN_EXPR; else if (strcmp (p, "max") == 0) code = MAX_EXPR; else if (id == ansi_opname (PLUS_EXPR)) code = PLUS_EXPR; else if (id == ansi_opname (MULT_EXPR)) code = MULT_EXPR; else if (id == ansi_opname (MINUS_EXPR)) code = MINUS_EXPR; else if (id == ansi_opname (BIT_AND_EXPR)) code = BIT_AND_EXPR; else if (id == ansi_opname (BIT_IOR_EXPR)) code = BIT_IOR_EXPR; else if (id == ansi_opname (BIT_XOR_EXPR)) code = BIT_XOR_EXPR; else if (id == ansi_opname (TRUTH_ANDIF_EXPR)) code = TRUTH_ANDIF_EXPR; else if (id == ansi_opname (TRUTH_ORIF_EXPR)) code = TRUTH_ORIF_EXPR; id = omp_reduction_id (code, id, NULL_TREE); tree scope = parser->scope; if (scope) id = build_qualified_name (NULL_TREE, scope, id, false); parser->scope = NULL_TREE; parser->qualifying_scope = NULL_TREE; parser->object_scope = NULL_TREE; } else { error ("invalid reduction-identifier"); resync_fail: cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); return list; } } if (!cp_parser_require (parser, CPP_COLON, RT_COLON)) goto resync_fail; nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_REDUCTION, list, NULL); for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c)) { OMP_CLAUSE_REDUCTION_CODE (c) = code; OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = id; } return nlist; } /* OpenMP 2.5: schedule ( schedule-kind ) schedule ( schedule-kind , expression ) schedule-kind: static | dynamic | guided | runtime | auto */ static tree cp_parser_omp_clause_schedule (cp_parser *parser, tree list, location_t location) { tree c, t; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; c = build_omp_clause (location, OMP_CLAUSE_SCHEDULE); if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); switch (p[0]) { case 'd': if (strcmp ("dynamic", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_DYNAMIC; break; case 'g': if (strcmp ("guided", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_GUIDED; break; case 'r': if (strcmp ("runtime", p) != 0) goto invalid_kind; OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_RUNTIME; break; default: goto invalid_kind; } } else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC)) OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_STATIC; else if (cp_lexer_next_token_is_keyword (parser->lexer, RID_AUTO)) OMP_CLAUSE_SCHEDULE_KIND (c) = OMP_CLAUSE_SCHEDULE_AUTO; else goto invalid_kind; cp_lexer_consume_token (parser->lexer); if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) { cp_token *token; cp_lexer_consume_token (parser->lexer); token = cp_lexer_peek_token (parser->lexer); t = cp_parser_assignment_expression (parser); if (t == error_mark_node) goto resync_fail; else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_RUNTIME) error_at (token->location, "schedule %<runtime%> does not take " "a %<chunk_size%> parameter"); else if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_AUTO) error_at (token->location, "schedule %<auto%> does not take " "a %<chunk_size%> parameter"); else OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t; if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) goto resync_fail; } else if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_COMMA_CLOSE_PAREN)) goto resync_fail; check_no_duplicate_clause (list, OMP_CLAUSE_SCHEDULE, "schedule", location); OMP_CLAUSE_CHAIN (c) = list; return c; invalid_kind: cp_parser_error (parser, "invalid schedule kind"); resync_fail: cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); return list; } /* OpenMP 3.0: untied */ static tree cp_parser_omp_clause_untied (cp_parser * /*parser*/, tree list, location_t location) { tree c; check_no_duplicate_clause (list, OMP_CLAUSE_UNTIED, "untied", location); c = build_omp_clause (location, OMP_CLAUSE_UNTIED); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: inbranch notinbranch */ static tree cp_parser_omp_clause_branch (cp_parser * /*parser*/, enum omp_clause_code code, tree list, location_t location) { check_no_duplicate_clause (list, code, omp_clause_code_name[code], location); tree c = build_omp_clause (location, code); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: parallel for sections taskgroup */ static tree cp_parser_omp_clause_cancelkind (cp_parser * /*parser*/, enum omp_clause_code code, tree list, location_t location) { tree c = build_omp_clause (location, code); OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: num_teams ( expression ) */ static tree cp_parser_omp_clause_num_teams (cp_parser *parser, tree list, location_t location) { tree t, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; t = cp_parser_expression (parser); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); check_no_duplicate_clause (list, OMP_CLAUSE_NUM_TEAMS, "num_teams", location); c = build_omp_clause (location, OMP_CLAUSE_NUM_TEAMS); OMP_CLAUSE_NUM_TEAMS_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: thread_limit ( expression ) */ static tree cp_parser_omp_clause_thread_limit (cp_parser *parser, tree list, location_t location) { tree t, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; t = cp_parser_expression (parser); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); check_no_duplicate_clause (list, OMP_CLAUSE_THREAD_LIMIT, "thread_limit", location); c = build_omp_clause (location, OMP_CLAUSE_THREAD_LIMIT); OMP_CLAUSE_THREAD_LIMIT_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: aligned ( variable-list ) aligned ( variable-list : constant-expression ) */ static tree cp_parser_omp_clause_aligned (cp_parser *parser, tree list) { tree nlist, c, alignment = NULL_TREE; bool colon; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_ALIGNED, list, &colon); if (colon) { alignment = cp_parser_constant_expression (parser); if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); if (alignment == error_mark_node) alignment = NULL_TREE; } for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_ALIGNED_ALIGNMENT (c) = alignment; return nlist; } /* OpenMP 4.0: linear ( variable-list ) linear ( variable-list : expression ) */ static tree cp_parser_omp_clause_linear (cp_parser *parser, tree list, bool is_cilk_simd_fn) { tree nlist, c, step = integer_one_node; bool colon; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_LINEAR, list, &colon); if (colon) { step = cp_parser_expression (parser); if (is_cilk_simd_fn && TREE_CODE (step) == PARM_DECL) { sorry ("using parameters for %<linear%> step is not supported yet"); step = integer_one_node; } if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); if (step == error_mark_node) return list; } for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_LINEAR_STEP (c) = step; return nlist; } /* OpenMP 4.0: safelen ( constant-expression ) */ static tree cp_parser_omp_clause_safelen (cp_parser *parser, tree list, location_t location) { tree t, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; t = cp_parser_constant_expression (parser); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); check_no_duplicate_clause (list, OMP_CLAUSE_SAFELEN, "safelen", location); c = build_omp_clause (location, OMP_CLAUSE_SAFELEN); OMP_CLAUSE_SAFELEN_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: simdlen ( constant-expression ) */ static tree cp_parser_omp_clause_simdlen (cp_parser *parser, tree list, location_t location) { tree t, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; t = cp_parser_constant_expression (parser); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); check_no_duplicate_clause (list, OMP_CLAUSE_SIMDLEN, "simdlen", location); c = build_omp_clause (location, OMP_CLAUSE_SIMDLEN); OMP_CLAUSE_SIMDLEN_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: depend ( depend-kind : variable-list ) depend-kind: in | out | inout */ static tree cp_parser_omp_clause_depend (cp_parser *parser, tree list) { tree nlist, c; enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_INOUT; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (strcmp ("in", p) == 0) kind = OMP_CLAUSE_DEPEND_IN; else if (strcmp ("inout", p) == 0) kind = OMP_CLAUSE_DEPEND_INOUT; else if (strcmp ("out", p) == 0) kind = OMP_CLAUSE_DEPEND_OUT; else goto invalid_kind; } else goto invalid_kind; cp_lexer_consume_token (parser->lexer); if (!cp_parser_require (parser, CPP_COLON, RT_COLON)) goto resync_fail; nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_DEPEND, list, NULL); for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_DEPEND_KIND (c) = kind; return nlist; invalid_kind: cp_parser_error (parser, "invalid depend kind"); resync_fail: cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); return list; } /* OpenMP 4.0: map ( map-kind : variable-list ) map ( variable-list ) map-kind: alloc | to | from | tofrom */ static tree cp_parser_omp_clause_map (cp_parser *parser, tree list) { tree nlist, c; enum gomp_map_kind kind = GOMP_MAP_TOFROM; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME) && cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_COLON) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (strcmp ("alloc", p) == 0) kind = GOMP_MAP_ALLOC; else if (strcmp ("to", p) == 0) kind = GOMP_MAP_TO; else if (strcmp ("from", p) == 0) kind = GOMP_MAP_FROM; else if (strcmp ("tofrom", p) == 0) kind = GOMP_MAP_TOFROM; else { cp_parser_error (parser, "invalid map kind"); cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); return list; } cp_lexer_consume_token (parser->lexer); cp_lexer_consume_token (parser->lexer); } nlist = cp_parser_omp_var_list_no_open (parser, OMP_CLAUSE_MAP, list, NULL); for (c = nlist; c != list; c = OMP_CLAUSE_CHAIN (c)) OMP_CLAUSE_SET_MAP_KIND (c, kind); return nlist; } /* OpenMP 4.0: device ( expression ) */ static tree cp_parser_omp_clause_device (cp_parser *parser, tree list, location_t location) { tree t, c; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; t = cp_parser_expression (parser); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); check_no_duplicate_clause (list, OMP_CLAUSE_DEVICE, "device", location); c = build_omp_clause (location, OMP_CLAUSE_DEVICE); OMP_CLAUSE_DEVICE_ID (c) = t; OMP_CLAUSE_CHAIN (c) = list; return c; } /* OpenMP 4.0: dist_schedule ( static ) dist_schedule ( static , expression ) */ static tree cp_parser_omp_clause_dist_schedule (cp_parser *parser, tree list, location_t location) { tree c, t; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; c = build_omp_clause (location, OMP_CLAUSE_DIST_SCHEDULE); if (!cp_lexer_next_token_is_keyword (parser->lexer, RID_STATIC)) goto invalid_kind; cp_lexer_consume_token (parser->lexer); if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) { cp_lexer_consume_token (parser->lexer); t = cp_parser_assignment_expression (parser); if (t == error_mark_node) goto resync_fail; OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c) = t; if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) goto resync_fail; } else if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_COMMA_CLOSE_PAREN)) goto resync_fail; check_no_duplicate_clause (list, OMP_CLAUSE_DIST_SCHEDULE, "dist_schedule", location); OMP_CLAUSE_CHAIN (c) = list; return c; invalid_kind: cp_parser_error (parser, "invalid dist_schedule kind"); resync_fail: cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); return list; } /* OpenMP 4.0: proc_bind ( proc-bind-kind ) proc-bind-kind: master | close | spread */ static tree cp_parser_omp_clause_proc_bind (cp_parser *parser, tree list, location_t location) { tree c; enum omp_clause_proc_bind_kind kind; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return list; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (strcmp ("master", p) == 0) kind = OMP_CLAUSE_PROC_BIND_MASTER; else if (strcmp ("close", p) == 0) kind = OMP_CLAUSE_PROC_BIND_CLOSE; else if (strcmp ("spread", p) == 0) kind = OMP_CLAUSE_PROC_BIND_SPREAD; else goto invalid_kind; } else goto invalid_kind; cp_lexer_consume_token (parser->lexer); if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_COMMA_CLOSE_PAREN)) goto resync_fail; c = build_omp_clause (location, OMP_CLAUSE_PROC_BIND); check_no_duplicate_clause (list, OMP_CLAUSE_PROC_BIND, "proc_bind", location); OMP_CLAUSE_PROC_BIND_KIND (c) = kind; OMP_CLAUSE_CHAIN (c) = list; return c; invalid_kind: cp_parser_error (parser, "invalid depend kind"); resync_fail: cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); return list; } /* OpenACC: async [( int-expr )] */ static tree cp_parser_oacc_clause_async (cp_parser *parser, tree list) { tree c, t; location_t loc = cp_lexer_peek_token (parser->lexer)->location; t = build_int_cst (integer_type_node, GOMP_ASYNC_NOVAL); if (cp_lexer_peek_token (parser->lexer)->type == CPP_OPEN_PAREN) { cp_lexer_consume_token (parser->lexer); t = cp_parser_expression (parser); if (t == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); } check_no_duplicate_clause (list, OMP_CLAUSE_ASYNC, "async", loc); c = build_omp_clause (loc, OMP_CLAUSE_ASYNC); OMP_CLAUSE_ASYNC_EXPR (c) = t; OMP_CLAUSE_CHAIN (c) = list; list = c; return list; } /* Parse all OpenACC clauses. The set clauses allowed by the directive is a bitmask in MASK. Return the list of clauses found. */ static tree cp_parser_oacc_all_clauses (cp_parser *parser, omp_clause_mask mask, const char *where, cp_token *pragma_tok, bool finish_p = true) { tree clauses = NULL; bool first = true; while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL)) { location_t here; pragma_omp_clause c_kind; const char *c_name; tree prev = clauses; if (!first && cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); here = cp_lexer_peek_token (parser->lexer)->location; c_kind = cp_parser_omp_clause_name (parser); switch (c_kind) { case PRAGMA_OACC_CLAUSE_ASYNC: clauses = cp_parser_oacc_clause_async (parser, clauses); c_name = "async"; break; case PRAGMA_OACC_CLAUSE_COLLAPSE: clauses = cp_parser_omp_clause_collapse (parser, clauses, here); c_name = "collapse"; break; case PRAGMA_OACC_CLAUSE_COPY: clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "copy"; break; case PRAGMA_OACC_CLAUSE_COPYIN: clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "copyin"; break; case PRAGMA_OACC_CLAUSE_COPYOUT: clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "copyout"; break; case PRAGMA_OACC_CLAUSE_CREATE: clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "create"; break; case PRAGMA_OACC_CLAUSE_DELETE: clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "delete"; break; case PRAGMA_OACC_CLAUSE_DEVICE: clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "device"; break; case PRAGMA_OACC_CLAUSE_DEVICEPTR: clauses = cp_parser_oacc_data_clause_deviceptr (parser, clauses); c_name = "deviceptr"; break; case PRAGMA_OACC_CLAUSE_HOST: clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "host"; break; case PRAGMA_OACC_CLAUSE_IF: clauses = cp_parser_omp_clause_if (parser, clauses, here); c_name = "if"; break; case PRAGMA_OACC_CLAUSE_NUM_GANGS: clauses = cp_parser_omp_clause_num_gangs (parser, clauses); c_name = "num_gangs"; break; case PRAGMA_OACC_CLAUSE_NUM_WORKERS: clauses = cp_parser_omp_clause_num_workers (parser, clauses); c_name = "num_workers"; break; case PRAGMA_OACC_CLAUSE_PRESENT: clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "present"; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY: clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "present_or_copy"; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN: clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "present_or_copyin"; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT: clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "present_or_copyout"; break; case PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE: clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "present_or_create"; break; case PRAGMA_OACC_CLAUSE_REDUCTION: clauses = cp_parser_omp_clause_reduction (parser, clauses); c_name = "reduction"; break; case PRAGMA_OACC_CLAUSE_SELF: clauses = cp_parser_oacc_data_clause (parser, c_kind, clauses); c_name = "self"; break; case PRAGMA_OACC_CLAUSE_VECTOR_LENGTH: clauses = cp_parser_oacc_clause_vector_length (parser, clauses); c_name = "vector_length"; break; case PRAGMA_OACC_CLAUSE_WAIT: clauses = cp_parser_oacc_clause_wait (parser, clauses); c_name = "wait"; break; default: cp_parser_error (parser, "expected %<#pragma acc%> clause"); goto saw_error; } first = false; if (((mask >> c_kind) & 1) == 0) { /* Remove the invalid clause(s) from the list to avoid confusing the rest of the compiler. */ clauses = prev; error_at (here, "%qs is not valid for %qs", c_name, where); } } saw_error: cp_parser_skip_to_pragma_eol (parser, pragma_tok); if (finish_p) return finish_omp_clauses (clauses); return clauses; } /* Parse all OpenMP clauses. The set clauses allowed by the directive is a bitmask in MASK. Return the list of clauses found; the result of clause default goes in *pdefault. */ static tree cp_parser_omp_all_clauses (cp_parser *parser, omp_clause_mask mask, const char *where, cp_token *pragma_tok, bool finish_p = true) { tree clauses = NULL; bool first = true; cp_token *token = NULL; bool cilk_simd_fn = false; while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL)) { pragma_omp_clause c_kind; const char *c_name; tree prev = clauses; if (!first && cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); token = cp_lexer_peek_token (parser->lexer); c_kind = cp_parser_omp_clause_name (parser); switch (c_kind) { case PRAGMA_OMP_CLAUSE_COLLAPSE: clauses = cp_parser_omp_clause_collapse (parser, clauses, token->location); c_name = "collapse"; break; case PRAGMA_OMP_CLAUSE_COPYIN: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_COPYIN, clauses); c_name = "copyin"; break; case PRAGMA_OMP_CLAUSE_COPYPRIVATE: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_COPYPRIVATE, clauses); c_name = "copyprivate"; break; case PRAGMA_OMP_CLAUSE_DEFAULT: clauses = cp_parser_omp_clause_default (parser, clauses, token->location); c_name = "default"; break; case PRAGMA_OMP_CLAUSE_FINAL: clauses = cp_parser_omp_clause_final (parser, clauses, token->location); c_name = "final"; break; case PRAGMA_OMP_CLAUSE_FIRSTPRIVATE: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_FIRSTPRIVATE, clauses); c_name = "firstprivate"; break; case PRAGMA_OMP_CLAUSE_IF: clauses = cp_parser_omp_clause_if (parser, clauses, token->location); c_name = "if"; break; case PRAGMA_OMP_CLAUSE_LASTPRIVATE: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_LASTPRIVATE, clauses); c_name = "lastprivate"; break; case PRAGMA_OMP_CLAUSE_MERGEABLE: clauses = cp_parser_omp_clause_mergeable (parser, clauses, token->location); c_name = "mergeable"; break; case PRAGMA_OMP_CLAUSE_NOWAIT: clauses = cp_parser_omp_clause_nowait (parser, clauses, token->location); c_name = "nowait"; break; case PRAGMA_OMP_CLAUSE_NUM_THREADS: clauses = cp_parser_omp_clause_num_threads (parser, clauses, token->location); c_name = "num_threads"; break; case PRAGMA_OMP_CLAUSE_ORDERED: clauses = cp_parser_omp_clause_ordered (parser, clauses, token->location); c_name = "ordered"; break; case PRAGMA_OMP_CLAUSE_PRIVATE: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_PRIVATE, clauses); c_name = "private"; break; case PRAGMA_OMP_CLAUSE_REDUCTION: clauses = cp_parser_omp_clause_reduction (parser, clauses); c_name = "reduction"; break; case PRAGMA_OMP_CLAUSE_SCHEDULE: clauses = cp_parser_omp_clause_schedule (parser, clauses, token->location); c_name = "schedule"; break; case PRAGMA_OMP_CLAUSE_SHARED: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_SHARED, clauses); c_name = "shared"; break; case PRAGMA_OMP_CLAUSE_UNTIED: clauses = cp_parser_omp_clause_untied (parser, clauses, token->location); c_name = "untied"; break; case PRAGMA_OMP_CLAUSE_INBRANCH: case PRAGMA_CILK_CLAUSE_MASK: clauses = cp_parser_omp_clause_branch (parser, OMP_CLAUSE_INBRANCH, clauses, token->location); c_name = "inbranch"; break; case PRAGMA_OMP_CLAUSE_NOTINBRANCH: case PRAGMA_CILK_CLAUSE_NOMASK: clauses = cp_parser_omp_clause_branch (parser, OMP_CLAUSE_NOTINBRANCH, clauses, token->location); c_name = "notinbranch"; break; case PRAGMA_OMP_CLAUSE_PARALLEL: clauses = cp_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_PARALLEL, clauses, token->location); c_name = "parallel"; if (!first) { clause_not_first: error_at (token->location, "%qs must be the first clause of %qs", c_name, where); clauses = prev; } break; case PRAGMA_OMP_CLAUSE_FOR: clauses = cp_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_FOR, clauses, token->location); c_name = "for"; if (!first) goto clause_not_first; break; case PRAGMA_OMP_CLAUSE_SECTIONS: clauses = cp_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_SECTIONS, clauses, token->location); c_name = "sections"; if (!first) goto clause_not_first; break; case PRAGMA_OMP_CLAUSE_TASKGROUP: clauses = cp_parser_omp_clause_cancelkind (parser, OMP_CLAUSE_TASKGROUP, clauses, token->location); c_name = "taskgroup"; if (!first) goto clause_not_first; break; case PRAGMA_OMP_CLAUSE_TO: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_TO, clauses); c_name = "to"; break; case PRAGMA_OMP_CLAUSE_FROM: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_FROM, clauses); c_name = "from"; break; case PRAGMA_OMP_CLAUSE_UNIFORM: clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_UNIFORM, clauses); c_name = "uniform"; break; case PRAGMA_OMP_CLAUSE_NUM_TEAMS: clauses = cp_parser_omp_clause_num_teams (parser, clauses, token->location); c_name = "num_teams"; break; case PRAGMA_OMP_CLAUSE_THREAD_LIMIT: clauses = cp_parser_omp_clause_thread_limit (parser, clauses, token->location); c_name = "thread_limit"; break; case PRAGMA_OMP_CLAUSE_ALIGNED: clauses = cp_parser_omp_clause_aligned (parser, clauses); c_name = "aligned"; break; case PRAGMA_OMP_CLAUSE_LINEAR: if (((mask >> PRAGMA_CILK_CLAUSE_VECTORLENGTH) & 1) != 0) cilk_simd_fn = true; clauses = cp_parser_omp_clause_linear (parser, clauses, cilk_simd_fn); c_name = "linear"; break; case PRAGMA_OMP_CLAUSE_DEPEND: clauses = cp_parser_omp_clause_depend (parser, clauses); c_name = "depend"; break; case PRAGMA_OMP_CLAUSE_MAP: clauses = cp_parser_omp_clause_map (parser, clauses); c_name = "map"; break; case PRAGMA_OMP_CLAUSE_DEVICE: clauses = cp_parser_omp_clause_device (parser, clauses, token->location); c_name = "device"; break; case PRAGMA_OMP_CLAUSE_DIST_SCHEDULE: clauses = cp_parser_omp_clause_dist_schedule (parser, clauses, token->location); c_name = "dist_schedule"; break; case PRAGMA_OMP_CLAUSE_PROC_BIND: clauses = cp_parser_omp_clause_proc_bind (parser, clauses, token->location); c_name = "proc_bind"; break; case PRAGMA_OMP_CLAUSE_SAFELEN: clauses = cp_parser_omp_clause_safelen (parser, clauses, token->location); c_name = "safelen"; break; case PRAGMA_OMP_CLAUSE_SIMDLEN: clauses = cp_parser_omp_clause_simdlen (parser, clauses, token->location); c_name = "simdlen"; break; case PRAGMA_CILK_CLAUSE_VECTORLENGTH: clauses = cp_parser_cilk_simd_vectorlength (parser, clauses, true); c_name = "simdlen"; break; default: cp_parser_error (parser, "expected %<#pragma omp%> clause"); goto saw_error; } first = false; if (((mask >> c_kind) & 1) == 0) { /* Remove the invalid clause(s) from the list to avoid confusing the rest of the compiler. */ clauses = prev; error_at (token->location, "%qs is not valid for %qs", c_name, where); } } saw_error: /* In Cilk Plus SIMD enabled functions there is no pragma_token, so no reason to skip to the end. */ if (!(flag_cilkplus && pragma_tok == NULL)) cp_parser_skip_to_pragma_eol (parser, pragma_tok); if (finish_p) return finish_omp_clauses (clauses); return clauses; } /* OpenMP 2.5: structured-block: statement In practice, we're also interested in adding the statement to an outer node. So it is convenient if we work around the fact that cp_parser_statement calls add_stmt. */ static unsigned cp_parser_begin_omp_structured_block (cp_parser *parser) { unsigned save = parser->in_statement; /* Only move the values to IN_OMP_BLOCK if they weren't false. This preserves the "not within loop or switch" style error messages for nonsense cases like void foo() { #pragma omp single break; } */ if (parser->in_statement) parser->in_statement = IN_OMP_BLOCK; return save; } static void cp_parser_end_omp_structured_block (cp_parser *parser, unsigned save) { parser->in_statement = save; } static tree cp_parser_omp_structured_block (cp_parser *parser) { tree stmt = begin_omp_structured_block (); unsigned int save = cp_parser_begin_omp_structured_block (parser); cp_parser_statement (parser, NULL_TREE, false, NULL); cp_parser_end_omp_structured_block (parser, save); return finish_omp_structured_block (stmt); } /* OpenMP 2.5: # pragma omp atomic new-line expression-stmt expression-stmt: x binop= expr | x++ | ++x | x-- | --x binop: +, *, -, /, &, ^, |, <<, >> where x is an lvalue expression with scalar type. OpenMP 3.1: # pragma omp atomic new-line update-stmt # pragma omp atomic read new-line read-stmt # pragma omp atomic write new-line write-stmt # pragma omp atomic update new-line update-stmt # pragma omp atomic capture new-line capture-stmt # pragma omp atomic capture new-line capture-block read-stmt: v = x write-stmt: x = expr update-stmt: expression-stmt | x = x binop expr capture-stmt: v = expression-stmt capture-block: { v = x; update-stmt; } | { update-stmt; v = x; } OpenMP 4.0: update-stmt: expression-stmt | x = x binop expr | x = expr binop x capture-stmt: v = update-stmt capture-block: { v = x; update-stmt; } | { update-stmt; v = x; } | { v = x; x = expr; } where x and v are lvalue expressions with scalar type. */ static void cp_parser_omp_atomic (cp_parser *parser, cp_token *pragma_tok) { tree lhs = NULL_TREE, rhs = NULL_TREE, v = NULL_TREE, lhs1 = NULL_TREE; tree rhs1 = NULL_TREE, orig_lhs; enum tree_code code = OMP_ATOMIC, opcode = NOP_EXPR; bool structured_block = false; bool seq_cst = false; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (!strcmp (p, "seq_cst")) { seq_cst = true; cp_lexer_consume_token (parser->lexer); if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA) && cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_NAME) cp_lexer_consume_token (parser->lexer); } } if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (!strcmp (p, "read")) code = OMP_ATOMIC_READ; else if (!strcmp (p, "write")) code = NOP_EXPR; else if (!strcmp (p, "update")) code = OMP_ATOMIC; else if (!strcmp (p, "capture")) code = OMP_ATOMIC_CAPTURE_NEW; else p = NULL; if (p) cp_lexer_consume_token (parser->lexer); } if (!seq_cst) { if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA) && cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_NAME) cp_lexer_consume_token (parser->lexer); if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (!strcmp (p, "seq_cst")) { seq_cst = true; cp_lexer_consume_token (parser->lexer); } } } cp_parser_require_pragma_eol (parser, pragma_tok); switch (code) { case OMP_ATOMIC_READ: case NOP_EXPR: /* atomic write */ v = cp_parser_unary_expression (parser); if (v == error_mark_node) goto saw_error; if (!cp_parser_require (parser, CPP_EQ, RT_EQ)) goto saw_error; if (code == NOP_EXPR) lhs = cp_parser_expression (parser); else lhs = cp_parser_unary_expression (parser); if (lhs == error_mark_node) goto saw_error; if (code == NOP_EXPR) { /* atomic write is represented by OMP_ATOMIC with NOP_EXPR opcode. */ code = OMP_ATOMIC; rhs = lhs; lhs = v; v = NULL_TREE; } goto done; case OMP_ATOMIC_CAPTURE_NEW: if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { cp_lexer_consume_token (parser->lexer); structured_block = true; } else { v = cp_parser_unary_expression (parser); if (v == error_mark_node) goto saw_error; if (!cp_parser_require (parser, CPP_EQ, RT_EQ)) goto saw_error; } default: break; } restart: lhs = cp_parser_unary_expression (parser); orig_lhs = lhs; switch (TREE_CODE (lhs)) { case ERROR_MARK: goto saw_error; case POSTINCREMENT_EXPR: if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block) code = OMP_ATOMIC_CAPTURE_OLD; /* FALLTHROUGH */ case PREINCREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); opcode = PLUS_EXPR; rhs = integer_one_node; break; case POSTDECREMENT_EXPR: if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block) code = OMP_ATOMIC_CAPTURE_OLD; /* FALLTHROUGH */ case PREDECREMENT_EXPR: lhs = TREE_OPERAND (lhs, 0); opcode = MINUS_EXPR; rhs = integer_one_node; break; case COMPOUND_EXPR: if (TREE_CODE (TREE_OPERAND (lhs, 0)) == SAVE_EXPR && TREE_CODE (TREE_OPERAND (lhs, 1)) == COMPOUND_EXPR && TREE_CODE (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0)) == MODIFY_EXPR && TREE_OPERAND (TREE_OPERAND (lhs, 1), 1) == TREE_OPERAND (lhs, 0) && TREE_CODE (TREE_TYPE (TREE_OPERAND (TREE_OPERAND (TREE_OPERAND (lhs, 1), 0), 0))) == BOOLEAN_TYPE) /* Undo effects of boolean_increment for post {in,de}crement. */ lhs = TREE_OPERAND (TREE_OPERAND (lhs, 1), 0); /* FALLTHRU */ case MODIFY_EXPR: if (TREE_CODE (lhs) == MODIFY_EXPR && TREE_CODE (TREE_TYPE (TREE_OPERAND (lhs, 0))) == BOOLEAN_TYPE) { /* Undo effects of boolean_increment. */ if (integer_onep (TREE_OPERAND (lhs, 1))) { /* This is pre or post increment. */ rhs = TREE_OPERAND (lhs, 1); lhs = TREE_OPERAND (lhs, 0); opcode = NOP_EXPR; if (code == OMP_ATOMIC_CAPTURE_NEW && !structured_block && TREE_CODE (orig_lhs) == COMPOUND_EXPR) code = OMP_ATOMIC_CAPTURE_OLD; break; } } /* FALLTHRU */ default: switch (cp_lexer_peek_token (parser->lexer)->type) { case CPP_MULT_EQ: opcode = MULT_EXPR; break; case CPP_DIV_EQ: opcode = TRUNC_DIV_EXPR; break; case CPP_PLUS_EQ: opcode = PLUS_EXPR; break; case CPP_MINUS_EQ: opcode = MINUS_EXPR; break; case CPP_LSHIFT_EQ: opcode = LSHIFT_EXPR; break; case CPP_RSHIFT_EQ: opcode = RSHIFT_EXPR; break; case CPP_AND_EQ: opcode = BIT_AND_EXPR; break; case CPP_OR_EQ: opcode = BIT_IOR_EXPR; break; case CPP_XOR_EQ: opcode = BIT_XOR_EXPR; break; case CPP_EQ: enum cp_parser_prec oprec; cp_token *token; cp_lexer_consume_token (parser->lexer); cp_parser_parse_tentatively (parser); rhs1 = cp_parser_simple_cast_expression (parser); if (rhs1 == error_mark_node) { cp_parser_abort_tentative_parse (parser); cp_parser_simple_cast_expression (parser); goto saw_error; } token = cp_lexer_peek_token (parser->lexer); if (token->type != CPP_SEMICOLON && !cp_tree_equal (lhs, rhs1)) { cp_parser_abort_tentative_parse (parser); cp_parser_parse_tentatively (parser); rhs = cp_parser_binary_expression (parser, false, true, PREC_NOT_OPERATOR, NULL); if (rhs == error_mark_node) { cp_parser_abort_tentative_parse (parser); cp_parser_binary_expression (parser, false, true, PREC_NOT_OPERATOR, NULL); goto saw_error; } switch (TREE_CODE (rhs)) { case MULT_EXPR: case TRUNC_DIV_EXPR: case RDIV_EXPR: case PLUS_EXPR: case MINUS_EXPR: case LSHIFT_EXPR: case RSHIFT_EXPR: case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: if (cp_tree_equal (lhs, TREE_OPERAND (rhs, 1))) { if (cp_parser_parse_definitely (parser)) { opcode = TREE_CODE (rhs); rhs1 = TREE_OPERAND (rhs, 0); rhs = TREE_OPERAND (rhs, 1); goto stmt_done; } else goto saw_error; } break; default: break; } cp_parser_abort_tentative_parse (parser); if (structured_block && code == OMP_ATOMIC_CAPTURE_OLD) { rhs = cp_parser_expression (parser); if (rhs == error_mark_node) goto saw_error; opcode = NOP_EXPR; rhs1 = NULL_TREE; goto stmt_done; } cp_parser_error (parser, "invalid form of %<#pragma omp atomic%>"); goto saw_error; } if (!cp_parser_parse_definitely (parser)) goto saw_error; switch (token->type) { case CPP_SEMICOLON: if (structured_block && code == OMP_ATOMIC_CAPTURE_NEW) { code = OMP_ATOMIC_CAPTURE_OLD; v = lhs; lhs = NULL_TREE; lhs1 = rhs1; rhs1 = NULL_TREE; cp_lexer_consume_token (parser->lexer); goto restart; } else if (structured_block) { opcode = NOP_EXPR; rhs = rhs1; rhs1 = NULL_TREE; goto stmt_done; } cp_parser_error (parser, "invalid form of %<#pragma omp atomic%>"); goto saw_error; case CPP_MULT: opcode = MULT_EXPR; break; case CPP_DIV: opcode = TRUNC_DIV_EXPR; break; case CPP_PLUS: opcode = PLUS_EXPR; break; case CPP_MINUS: opcode = MINUS_EXPR; break; case CPP_LSHIFT: opcode = LSHIFT_EXPR; break; case CPP_RSHIFT: opcode = RSHIFT_EXPR; break; case CPP_AND: opcode = BIT_AND_EXPR; break; case CPP_OR: opcode = BIT_IOR_EXPR; break; case CPP_XOR: opcode = BIT_XOR_EXPR; break; default: cp_parser_error (parser, "invalid operator for %<#pragma omp atomic%>"); goto saw_error; } oprec = TOKEN_PRECEDENCE (token); gcc_assert (oprec != PREC_NOT_OPERATOR); if (commutative_tree_code (opcode)) oprec = (enum cp_parser_prec) (oprec - 1); cp_lexer_consume_token (parser->lexer); rhs = cp_parser_binary_expression (parser, false, false, oprec, NULL); if (rhs == error_mark_node) goto saw_error; goto stmt_done; /* FALLTHROUGH */ default: cp_parser_error (parser, "invalid operator for %<#pragma omp atomic%>"); goto saw_error; } cp_lexer_consume_token (parser->lexer); rhs = cp_parser_expression (parser); if (rhs == error_mark_node) goto saw_error; break; } stmt_done: if (structured_block && code == OMP_ATOMIC_CAPTURE_NEW) { if (!cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON)) goto saw_error; v = cp_parser_unary_expression (parser); if (v == error_mark_node) goto saw_error; if (!cp_parser_require (parser, CPP_EQ, RT_EQ)) goto saw_error; lhs1 = cp_parser_unary_expression (parser); if (lhs1 == error_mark_node) goto saw_error; } if (structured_block) { cp_parser_consume_semicolon_at_end_of_statement (parser); cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); } done: finish_omp_atomic (code, opcode, lhs, rhs, v, lhs1, rhs1, seq_cst); if (!structured_block) cp_parser_consume_semicolon_at_end_of_statement (parser); return; saw_error: cp_parser_skip_to_end_of_block_or_statement (parser); if (structured_block) { if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) cp_lexer_consume_token (parser->lexer); else if (code == OMP_ATOMIC_CAPTURE_NEW) { cp_parser_skip_to_end_of_block_or_statement (parser); if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) cp_lexer_consume_token (parser->lexer); } } } /* OpenMP 2.5: # pragma omp barrier new-line */ static void cp_parser_omp_barrier (cp_parser *parser, cp_token *pragma_tok) { cp_parser_require_pragma_eol (parser, pragma_tok); finish_omp_barrier (); } /* OpenMP 2.5: # pragma omp critical [(name)] new-line structured-block */ static tree cp_parser_omp_critical (cp_parser *parser, cp_token *pragma_tok) { tree stmt, name = NULL; if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { cp_lexer_consume_token (parser->lexer); name = cp_parser_identifier (parser); if (name == error_mark_node || !cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); if (name == error_mark_node) name = NULL; } cp_parser_require_pragma_eol (parser, pragma_tok); stmt = cp_parser_omp_structured_block (parser); return c_finish_omp_critical (input_location, stmt, name); } /* OpenMP 2.5: # pragma omp flush flush-vars[opt] new-line flush-vars: ( variable-list ) */ static void cp_parser_omp_flush (cp_parser *parser, cp_token *pragma_tok) { if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) (void) cp_parser_omp_var_list (parser, OMP_CLAUSE_ERROR, NULL); cp_parser_require_pragma_eol (parser, pragma_tok); finish_omp_flush (); } /* Helper function, to parse omp for increment expression. */ static tree cp_parser_omp_for_cond (cp_parser *parser, tree decl, enum tree_code code) { tree cond = cp_parser_binary_expression (parser, false, true, PREC_NOT_OPERATOR, NULL); if (cond == error_mark_node || cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) { cp_parser_skip_to_end_of_statement (parser); return error_mark_node; } switch (TREE_CODE (cond)) { case GT_EXPR: case GE_EXPR: case LT_EXPR: case LE_EXPR: break; case NE_EXPR: if (code == CILK_SIMD || code == CILK_FOR) break; /* Fall through: OpenMP disallows NE_EXPR. */ default: return error_mark_node; } /* If decl is an iterator, preserve LHS and RHS of the relational expr until finish_omp_for. */ if (decl && (type_dependent_expression_p (decl) || CLASS_TYPE_P (TREE_TYPE (decl)))) return cond; return build_x_binary_op (input_location, TREE_CODE (cond), TREE_OPERAND (cond, 0), ERROR_MARK, TREE_OPERAND (cond, 1), ERROR_MARK, /*overload=*/NULL, tf_warning_or_error); } /* Helper function, to parse omp for increment expression. */ static tree cp_parser_omp_for_incr (cp_parser *parser, tree decl) { cp_token *token = cp_lexer_peek_token (parser->lexer); enum tree_code op; tree lhs, rhs; cp_id_kind idk; bool decl_first; if (token->type == CPP_PLUS_PLUS || token->type == CPP_MINUS_MINUS) { op = (token->type == CPP_PLUS_PLUS ? PREINCREMENT_EXPR : PREDECREMENT_EXPR); cp_lexer_consume_token (parser->lexer); lhs = cp_parser_simple_cast_expression (parser); if (lhs != decl) return error_mark_node; return build2 (op, TREE_TYPE (decl), decl, NULL_TREE); } lhs = cp_parser_primary_expression (parser, false, false, false, &idk); if (lhs != decl) return error_mark_node; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_PLUS_PLUS || token->type == CPP_MINUS_MINUS) { op = (token->type == CPP_PLUS_PLUS ? POSTINCREMENT_EXPR : POSTDECREMENT_EXPR); cp_lexer_consume_token (parser->lexer); return build2 (op, TREE_TYPE (decl), decl, NULL_TREE); } op = cp_parser_assignment_operator_opt (parser); if (op == ERROR_MARK) return error_mark_node; if (op != NOP_EXPR) { rhs = cp_parser_assignment_expression (parser); rhs = build2 (op, TREE_TYPE (decl), decl, rhs); return build2 (MODIFY_EXPR, TREE_TYPE (decl), decl, rhs); } lhs = cp_parser_binary_expression (parser, false, false, PREC_ADDITIVE_EXPRESSION, NULL); token = cp_lexer_peek_token (parser->lexer); decl_first = lhs == decl; if (decl_first) lhs = NULL_TREE; if (token->type != CPP_PLUS && token->type != CPP_MINUS) return error_mark_node; do { op = token->type == CPP_PLUS ? PLUS_EXPR : MINUS_EXPR; cp_lexer_consume_token (parser->lexer); rhs = cp_parser_binary_expression (parser, false, false, PREC_ADDITIVE_EXPRESSION, NULL); token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_PLUS || token->type == CPP_MINUS || decl_first) { if (lhs == NULL_TREE) { if (op == PLUS_EXPR) lhs = rhs; else lhs = build_x_unary_op (input_location, NEGATE_EXPR, rhs, tf_warning_or_error); } else lhs = build_x_binary_op (input_location, op, lhs, ERROR_MARK, rhs, ERROR_MARK, NULL, tf_warning_or_error); } } while (token->type == CPP_PLUS || token->type == CPP_MINUS); if (!decl_first) { if (rhs != decl || op == MINUS_EXPR) return error_mark_node; rhs = build2 (op, TREE_TYPE (decl), lhs, decl); } else rhs = build2 (PLUS_EXPR, TREE_TYPE (decl), decl, lhs); return build2 (MODIFY_EXPR, TREE_TYPE (decl), decl, rhs); } /* Parse the initialization statement of either an OpenMP for loop or a Cilk Plus for loop. Return true if the resulting construct should have an OMP_CLAUSE_PRIVATE added to it. */ static bool cp_parser_omp_for_loop_init (cp_parser *parser, enum tree_code code, tree &this_pre_body, vec<tree, va_gc> *for_block, tree &init, tree &decl, tree &real_decl) { if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) return false; bool add_private_clause = false; /* See 2.5.1 (in OpenMP 3.0, similar wording is in 2.5 standard too): init-expr: var = lb integer-type var = lb random-access-iterator-type var = lb pointer-type var = lb */ cp_decl_specifier_seq type_specifiers; /* First, try to parse as an initialized declaration. See cp_parser_condition, from whence the bulk of this is copied. */ cp_parser_parse_tentatively (parser); cp_parser_type_specifier_seq (parser, /*is_declaration=*/true, /*is_trailing_return=*/false, &type_specifiers); if (cp_parser_parse_definitely (parser)) { /* If parsing a type specifier seq succeeded, then this MUST be a initialized declaration. */ tree asm_specification, attributes; cp_declarator *declarator; declarator = cp_parser_declarator (parser, CP_PARSER_DECLARATOR_NAMED, /*ctor_dtor_or_conv_p=*/NULL, /*parenthesized_p=*/NULL, /*member_p=*/false, /*friend_p=*/false); attributes = cp_parser_attributes_opt (parser); asm_specification = cp_parser_asm_specification_opt (parser); if (declarator == cp_error_declarator) cp_parser_skip_to_end_of_statement (parser); else { tree pushed_scope, auto_node; decl = start_decl (declarator, &type_specifiers, SD_INITIALIZED, attributes, /*prefix_attributes=*/NULL_TREE, &pushed_scope); auto_node = type_uses_auto (TREE_TYPE (decl)); if (cp_lexer_next_token_is_not (parser->lexer, CPP_EQ)) { if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN)) { if (code != CILK_SIMD && code != CILK_FOR) error ("parenthesized initialization is not allowed in " "OpenMP %<for%> loop"); else error ("parenthesized initialization is " "not allowed in for-loop"); } else /* Trigger an error. */ cp_parser_require (parser, CPP_EQ, RT_EQ); init = error_mark_node; cp_parser_skip_to_end_of_statement (parser); } else if (CLASS_TYPE_P (TREE_TYPE (decl)) || type_dependent_expression_p (decl) || auto_node) { bool is_direct_init, is_non_constant_init; init = cp_parser_initializer (parser, &is_direct_init, &is_non_constant_init); if (auto_node) { TREE_TYPE (decl) = do_auto_deduction (TREE_TYPE (decl), init, auto_node); if (!CLASS_TYPE_P (TREE_TYPE (decl)) && !type_dependent_expression_p (decl)) goto non_class; } cp_finish_decl (decl, init, !is_non_constant_init, asm_specification, LOOKUP_ONLYCONVERTING); if (CLASS_TYPE_P (TREE_TYPE (decl))) { vec_safe_push (for_block, this_pre_body); init = NULL_TREE; } else init = pop_stmt_list (this_pre_body); this_pre_body = NULL_TREE; } else { /* Consume '='. */ cp_lexer_consume_token (parser->lexer); init = cp_parser_assignment_expression (parser); non_class: if (TREE_CODE (TREE_TYPE (decl)) == REFERENCE_TYPE) init = error_mark_node; else cp_finish_decl (decl, NULL_TREE, /*init_const_expr_p=*/false, asm_specification, LOOKUP_ONLYCONVERTING); } if (pushed_scope) pop_scope (pushed_scope); } } else { cp_id_kind idk; /* If parsing a type specifier sequence failed, then this MUST be a simple expression. */ if (code == CILK_FOR) error ("%<_Cilk_for%> allows expression instead of declaration only " "in C, not in C++"); cp_parser_parse_tentatively (parser); decl = cp_parser_primary_expression (parser, false, false, false, &idk); if (!cp_parser_error_occurred (parser) && decl && DECL_P (decl) && CLASS_TYPE_P (TREE_TYPE (decl))) { tree rhs; cp_parser_parse_definitely (parser); cp_parser_require (parser, CPP_EQ, RT_EQ); rhs = cp_parser_assignment_expression (parser); finish_expr_stmt (build_x_modify_expr (EXPR_LOCATION (rhs), decl, NOP_EXPR, rhs, tf_warning_or_error)); add_private_clause = true; } else { decl = NULL; cp_parser_abort_tentative_parse (parser); init = cp_parser_expression (parser); if (init) { if (TREE_CODE (init) == MODIFY_EXPR || TREE_CODE (init) == MODOP_EXPR) real_decl = TREE_OPERAND (init, 0); } } } return add_private_clause; } /* Parse the restricted form of the for statement allowed by OpenMP. */ static tree cp_parser_omp_for_loop (cp_parser *parser, enum tree_code code, tree clauses, tree *cclauses) { tree init, cond, incr, body, decl, pre_body = NULL_TREE, ret; tree real_decl, initv, condv, incrv, declv; tree this_pre_body, cl; location_t loc_first; bool collapse_err = false; int i, collapse = 1, nbraces = 0; vec<tree, va_gc> *for_block = make_tree_vector (); for (cl = clauses; cl; cl = OMP_CLAUSE_CHAIN (cl)) if (OMP_CLAUSE_CODE (cl) == OMP_CLAUSE_COLLAPSE) collapse = tree_to_shwi (OMP_CLAUSE_COLLAPSE_EXPR (cl)); gcc_assert (collapse >= 1); declv = make_tree_vec (collapse); initv = make_tree_vec (collapse); condv = make_tree_vec (collapse); incrv = make_tree_vec (collapse); loc_first = cp_lexer_peek_token (parser->lexer)->location; for (i = 0; i < collapse; i++) { int bracecount = 0; bool add_private_clause = false; location_t loc; if (code != CILK_FOR && !cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR)) { cp_parser_error (parser, "for statement expected"); return NULL; } if (code == CILK_FOR && !cp_lexer_next_token_is_keyword (parser->lexer, RID_CILK_FOR)) { cp_parser_error (parser, "_Cilk_for statement expected"); return NULL; } loc = cp_lexer_consume_token (parser->lexer)->location; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return NULL; init = decl = real_decl = NULL; this_pre_body = push_stmt_list (); add_private_clause |= cp_parser_omp_for_loop_init (parser, code, this_pre_body, for_block, init, decl, real_decl); cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); if (this_pre_body) { this_pre_body = pop_stmt_list (this_pre_body); if (pre_body) { tree t = pre_body; pre_body = push_stmt_list (); add_stmt (t); add_stmt (this_pre_body); pre_body = pop_stmt_list (pre_body); } else pre_body = this_pre_body; } if (decl) real_decl = decl; if (cclauses != NULL && cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL] != NULL && real_decl != NULL_TREE) { tree *c; for (c = &cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL]; *c ; ) if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_FIRSTPRIVATE && OMP_CLAUSE_DECL (*c) == real_decl) { error_at (loc, "iteration variable %qD" " should not be firstprivate", real_decl); *c = OMP_CLAUSE_CHAIN (*c); } else if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_DECL (*c) == real_decl) { /* Add lastprivate (decl) clause to OMP_FOR_CLAUSES, change it to shared (decl) in OMP_PARALLEL_CLAUSES. */ tree l = build_omp_clause (loc, OMP_CLAUSE_LASTPRIVATE); OMP_CLAUSE_DECL (l) = real_decl; CP_OMP_CLAUSE_INFO (l) = CP_OMP_CLAUSE_INFO (*c); if (code == OMP_SIMD) { OMP_CLAUSE_CHAIN (l) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; cclauses[C_OMP_CLAUSE_SPLIT_FOR] = l; } else { OMP_CLAUSE_CHAIN (l) = clauses; clauses = l; } OMP_CLAUSE_SET_CODE (*c, OMP_CLAUSE_SHARED); CP_OMP_CLAUSE_INFO (*c) = NULL; add_private_clause = false; } else { if (OMP_CLAUSE_CODE (*c) == OMP_CLAUSE_PRIVATE && OMP_CLAUSE_DECL (*c) == real_decl) add_private_clause = false; c = &OMP_CLAUSE_CHAIN (*c); } } if (add_private_clause) { tree c; for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) { if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE) && OMP_CLAUSE_DECL (c) == decl) break; else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE && OMP_CLAUSE_DECL (c) == decl) error_at (loc, "iteration variable %qD " "should not be firstprivate", decl); else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && OMP_CLAUSE_DECL (c) == decl) error_at (loc, "iteration variable %qD should not be reduction", decl); } if (c == NULL) { c = build_omp_clause (loc, OMP_CLAUSE_PRIVATE); OMP_CLAUSE_DECL (c) = decl; c = finish_omp_clauses (c); if (c) { OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } } } cond = NULL; if (cp_lexer_next_token_is_not (parser->lexer, CPP_SEMICOLON)) cond = cp_parser_omp_for_cond (parser, decl, code); cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); incr = NULL; if (cp_lexer_next_token_is_not (parser->lexer, CPP_CLOSE_PAREN)) { /* If decl is an iterator, preserve the operator on decl until finish_omp_for. */ if (real_decl && ((processing_template_decl && !POINTER_TYPE_P (TREE_TYPE (real_decl))) || CLASS_TYPE_P (TREE_TYPE (real_decl)))) incr = cp_parser_omp_for_incr (parser, real_decl); else incr = cp_parser_expression (parser); if (CAN_HAVE_LOCATION_P (incr) && !EXPR_HAS_LOCATION (incr)) SET_EXPR_LOCATION (incr, input_location); } if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) cp_parser_skip_to_closing_parenthesis (parser, /*recovering=*/true, /*or_comma=*/false, /*consume_paren=*/true); TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; if (i == collapse - 1) break; /* FIXME: OpenMP 3.0 draft isn't very clear on what exactly is allowed in between the collapsed for loops to be still considered perfectly nested. Hopefully the final version clarifies this. For now handle (multiple) {'s and empty statements. */ cp_parser_parse_tentatively (parser); do { if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR)) break; else if (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_BRACE)) { cp_lexer_consume_token (parser->lexer); bracecount++; } else if (bracecount && cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); else { loc = cp_lexer_peek_token (parser->lexer)->location; error_at (loc, "not enough collapsed for loops"); collapse_err = true; cp_parser_abort_tentative_parse (parser); declv = NULL_TREE; break; } } while (1); if (declv) { cp_parser_parse_definitely (parser); nbraces += bracecount; } } /* Note that we saved the original contents of this flag when we entered the structured block, and so we don't need to re-save it here. */ if (code == CILK_SIMD || code == CILK_FOR) parser->in_statement = IN_CILK_SIMD_FOR; else parser->in_statement = IN_OMP_FOR; /* Note that the grammar doesn't call for a structured block here, though the loop as a whole is a structured block. */ body = push_stmt_list (); cp_parser_statement (parser, NULL_TREE, false, NULL); body = pop_stmt_list (body); if (declv == NULL_TREE) ret = NULL_TREE; else ret = finish_omp_for (loc_first, code, declv, initv, condv, incrv, body, pre_body, clauses); while (nbraces) { if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_BRACE)) { cp_lexer_consume_token (parser->lexer); nbraces--; } else if (cp_lexer_next_token_is (parser->lexer, CPP_SEMICOLON)) cp_lexer_consume_token (parser->lexer); else { if (!collapse_err) { error_at (cp_lexer_peek_token (parser->lexer)->location, "collapsed loops not perfectly nested"); } collapse_err = true; cp_parser_statement_seq_opt (parser, NULL); if (cp_lexer_next_token_is (parser->lexer, CPP_EOF)) break; } } while (!for_block->is_empty ()) add_stmt (pop_stmt_list (for_block->pop ())); release_tree_vector (for_block); return ret; } /* Helper function for OpenMP parsing, split clauses and call finish_omp_clauses on each of the set of clauses afterwards. */ static void cp_omp_split_clauses (location_t loc, enum tree_code code, omp_clause_mask mask, tree clauses, tree *cclauses) { int i; c_omp_split_clauses (loc, code, mask, clauses, cclauses); for (i = 0; i < C_OMP_CLAUSE_SPLIT_COUNT; i++) if (cclauses[i]) cclauses[i] = finish_omp_clauses (cclauses[i]); } /* OpenMP 4.0: #pragma omp simd simd-clause[optseq] new-line for-loop */ #define OMP_SIMD_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SAFELEN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALIGNED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE)) static tree cp_parser_omp_simd (cp_parser *parser, cp_token *pragma_tok, char *p_name, omp_clause_mask mask, tree *cclauses) { tree clauses, sb, ret; unsigned int save; location_t loc = cp_lexer_peek_token (parser->lexer)->location; strcat (p_name, " simd"); mask |= OMP_SIMD_CLAUSE_MASK; mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDERED); clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok, cclauses == NULL); if (cclauses) { cp_omp_split_clauses (loc, OMP_SIMD, mask, clauses, cclauses); clauses = cclauses[C_OMP_CLAUSE_SPLIT_SIMD]; } sb = begin_omp_structured_block (); save = cp_parser_begin_omp_structured_block (parser); ret = cp_parser_omp_for_loop (parser, OMP_SIMD, clauses, cclauses); cp_parser_end_omp_structured_block (parser, save); add_stmt (finish_omp_structured_block (sb)); return ret; } /* OpenMP 2.5: #pragma omp for for-clause[optseq] new-line for-loop OpenMP 4.0: #pragma omp for simd for-simd-clause[optseq] new-line for-loop */ #define OMP_FOR_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ORDERED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SCHEDULE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE)) static tree cp_parser_omp_for (cp_parser *parser, cp_token *pragma_tok, char *p_name, omp_clause_mask mask, tree *cclauses) { tree clauses, sb, ret; unsigned int save; location_t loc = cp_lexer_peek_token (parser->lexer)->location; strcat (p_name, " for"); mask |= OMP_FOR_CLAUSE_MASK; if (cclauses) mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT); if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (strcmp (p, "simd") == 0) { tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT]; if (cclauses == NULL) cclauses = cclauses_buf; cp_lexer_consume_token (parser->lexer); if (!flag_openmp) /* flag_openmp_simd */ return cp_parser_omp_simd (parser, pragma_tok, p_name, mask, cclauses); sb = begin_omp_structured_block (); save = cp_parser_begin_omp_structured_block (parser); ret = cp_parser_omp_simd (parser, pragma_tok, p_name, mask, cclauses); cp_parser_end_omp_structured_block (parser, save); tree body = finish_omp_structured_block (sb); if (ret == NULL) return ret; ret = make_node (OMP_FOR); TREE_TYPE (ret) = void_type_node; OMP_FOR_BODY (ret) = body; OMP_FOR_CLAUSES (ret) = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; SET_EXPR_LOCATION (ret, loc); add_stmt (ret); return ret; } } if (!flag_openmp) /* flag_openmp_simd */ { cp_parser_skip_to_pragma_eol (parser, pragma_tok); return NULL_TREE; } clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok, cclauses == NULL); if (cclauses) { cp_omp_split_clauses (loc, OMP_FOR, mask, clauses, cclauses); clauses = cclauses[C_OMP_CLAUSE_SPLIT_FOR]; } sb = begin_omp_structured_block (); save = cp_parser_begin_omp_structured_block (parser); ret = cp_parser_omp_for_loop (parser, OMP_FOR, clauses, cclauses); cp_parser_end_omp_structured_block (parser, save); add_stmt (finish_omp_structured_block (sb)); return ret; } /* OpenMP 2.5: # pragma omp master new-line structured-block */ static tree cp_parser_omp_master (cp_parser *parser, cp_token *pragma_tok) { cp_parser_require_pragma_eol (parser, pragma_tok); return c_finish_omp_master (input_location, cp_parser_omp_structured_block (parser)); } /* OpenMP 2.5: # pragma omp ordered new-line structured-block */ static tree cp_parser_omp_ordered (cp_parser *parser, cp_token *pragma_tok) { location_t loc = cp_lexer_peek_token (parser->lexer)->location; cp_parser_require_pragma_eol (parser, pragma_tok); return c_finish_omp_ordered (loc, cp_parser_omp_structured_block (parser)); } /* OpenMP 2.5: section-scope: { section-sequence } section-sequence: section-directive[opt] structured-block section-sequence section-directive structured-block */ static tree cp_parser_omp_sections_scope (cp_parser *parser) { tree stmt, substmt; bool error_suppress = false; cp_token *tok; if (!cp_parser_require (parser, CPP_OPEN_BRACE, RT_OPEN_BRACE)) return NULL_TREE; stmt = push_stmt_list (); if (cp_lexer_peek_token (parser->lexer)->pragma_kind != PRAGMA_OMP_SECTION) { substmt = cp_parser_omp_structured_block (parser); substmt = build1 (OMP_SECTION, void_type_node, substmt); add_stmt (substmt); } while (1) { tok = cp_lexer_peek_token (parser->lexer); if (tok->type == CPP_CLOSE_BRACE) break; if (tok->type == CPP_EOF) break; if (tok->pragma_kind == PRAGMA_OMP_SECTION) { cp_lexer_consume_token (parser->lexer); cp_parser_require_pragma_eol (parser, tok); error_suppress = false; } else if (!error_suppress) { cp_parser_error (parser, "expected %<#pragma omp section%> or %<}%>"); error_suppress = true; } substmt = cp_parser_omp_structured_block (parser); substmt = build1 (OMP_SECTION, void_type_node, substmt); add_stmt (substmt); } cp_parser_require (parser, CPP_CLOSE_BRACE, RT_CLOSE_BRACE); substmt = pop_stmt_list (stmt); stmt = make_node (OMP_SECTIONS); TREE_TYPE (stmt) = void_type_node; OMP_SECTIONS_BODY (stmt) = substmt; add_stmt (stmt); return stmt; } /* OpenMP 2.5: # pragma omp sections sections-clause[optseq] newline sections-scope */ #define OMP_SECTIONS_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LASTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree cp_parser_omp_sections (cp_parser *parser, cp_token *pragma_tok, char *p_name, omp_clause_mask mask, tree *cclauses) { tree clauses, ret; location_t loc = cp_lexer_peek_token (parser->lexer)->location; strcat (p_name, " sections"); mask |= OMP_SECTIONS_CLAUSE_MASK; if (cclauses) mask &= ~(OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT); clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok, cclauses == NULL); if (cclauses) { cp_omp_split_clauses (loc, OMP_SECTIONS, mask, clauses, cclauses); clauses = cclauses[C_OMP_CLAUSE_SPLIT_SECTIONS]; } ret = cp_parser_omp_sections_scope (parser); if (ret) OMP_SECTIONS_CLAUSES (ret) = clauses; return ret; } /* OpenMP 2.5: # pragma omp parallel parallel-clause[optseq] new-line structured-block # pragma omp parallel for parallel-for-clause[optseq] new-line structured-block # pragma omp parallel sections parallel-sections-clause[optseq] new-line structured-block OpenMP 4.0: # pragma omp parallel for simd parallel-for-simd-clause[optseq] new-line structured-block */ #define OMP_PARALLEL_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_THREADS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PROC_BIND)) static tree cp_parser_omp_parallel (cp_parser *parser, cp_token *pragma_tok, char *p_name, omp_clause_mask mask, tree *cclauses) { tree stmt, clauses, block; unsigned int save; location_t loc = cp_lexer_peek_token (parser->lexer)->location; strcat (p_name, " parallel"); mask |= OMP_PARALLEL_CLAUSE_MASK; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_FOR)) { tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT]; if (cclauses == NULL) cclauses = cclauses_buf; cp_lexer_consume_token (parser->lexer); if (!flag_openmp) /* flag_openmp_simd */ return cp_parser_omp_for (parser, pragma_tok, p_name, mask, cclauses); block = begin_omp_parallel (); save = cp_parser_begin_omp_structured_block (parser); tree ret = cp_parser_omp_for (parser, pragma_tok, p_name, mask, cclauses); cp_parser_end_omp_structured_block (parser, save); stmt = finish_omp_parallel (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL], block); if (ret == NULL_TREE) return ret; OMP_PARALLEL_COMBINED (stmt) = 1; return stmt; } else if (cclauses) { error_at (loc, "expected %<for%> after %qs", p_name); cp_parser_skip_to_pragma_eol (parser, pragma_tok); return NULL_TREE; } else if (!flag_openmp) /* flag_openmp_simd */ { cp_parser_skip_to_pragma_eol (parser, pragma_tok); return NULL_TREE; } else if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (strcmp (p, "sections") == 0) { tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT]; cclauses = cclauses_buf; cp_lexer_consume_token (parser->lexer); block = begin_omp_parallel (); save = cp_parser_begin_omp_structured_block (parser); cp_parser_omp_sections (parser, pragma_tok, p_name, mask, cclauses); cp_parser_end_omp_structured_block (parser, save); stmt = finish_omp_parallel (cclauses[C_OMP_CLAUSE_SPLIT_PARALLEL], block); OMP_PARALLEL_COMBINED (stmt) = 1; return stmt; } } clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok); block = begin_omp_parallel (); save = cp_parser_begin_omp_structured_block (parser); cp_parser_statement (parser, NULL_TREE, false, NULL); cp_parser_end_omp_structured_block (parser, save); stmt = finish_omp_parallel (clauses, block); return stmt; } /* OpenMP 2.5: # pragma omp single single-clause[optseq] new-line structured-block */ #define OMP_SINGLE_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COPYPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOWAIT)) static tree cp_parser_omp_single (cp_parser *parser, cp_token *pragma_tok) { tree stmt = make_node (OMP_SINGLE); TREE_TYPE (stmt) = void_type_node; OMP_SINGLE_CLAUSES (stmt) = cp_parser_omp_all_clauses (parser, OMP_SINGLE_CLAUSE_MASK, "#pragma omp single", pragma_tok); OMP_SINGLE_BODY (stmt) = cp_parser_omp_structured_block (parser); return add_stmt (stmt); } /* OpenMP 3.0: # pragma omp task task-clause[optseq] new-line structured-block */ #define OMP_TASK_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNTIED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FINAL) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MERGEABLE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEPEND)) static tree cp_parser_omp_task (cp_parser *parser, cp_token *pragma_tok) { tree clauses, block; unsigned int save; clauses = cp_parser_omp_all_clauses (parser, OMP_TASK_CLAUSE_MASK, "#pragma omp task", pragma_tok); block = begin_omp_task (); save = cp_parser_begin_omp_structured_block (parser); cp_parser_statement (parser, NULL_TREE, false, NULL); cp_parser_end_omp_structured_block (parser, save); return finish_omp_task (clauses, block); } /* OpenMP 3.0: # pragma omp taskwait new-line */ static void cp_parser_omp_taskwait (cp_parser *parser, cp_token *pragma_tok) { cp_parser_require_pragma_eol (parser, pragma_tok); finish_omp_taskwait (); } /* OpenMP 3.1: # pragma omp taskyield new-line */ static void cp_parser_omp_taskyield (cp_parser *parser, cp_token *pragma_tok) { cp_parser_require_pragma_eol (parser, pragma_tok); finish_omp_taskyield (); } /* OpenMP 4.0: # pragma omp taskgroup new-line structured-block */ static tree cp_parser_omp_taskgroup (cp_parser *parser, cp_token *pragma_tok) { cp_parser_require_pragma_eol (parser, pragma_tok); return c_finish_omp_taskgroup (input_location, cp_parser_omp_structured_block (parser)); } /* OpenMP 2.5: # pragma omp threadprivate (variable-list) */ static void cp_parser_omp_threadprivate (cp_parser *parser, cp_token *pragma_tok) { tree vars; vars = cp_parser_omp_var_list (parser, OMP_CLAUSE_ERROR, NULL); cp_parser_require_pragma_eol (parser, pragma_tok); finish_omp_threadprivate (vars); } /* OpenMP 4.0: # pragma omp cancel cancel-clause[optseq] new-line */ #define OMP_CANCEL_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PARALLEL) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FOR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SECTIONS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASKGROUP) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF)) static void cp_parser_omp_cancel (cp_parser *parser, cp_token *pragma_tok) { tree clauses = cp_parser_omp_all_clauses (parser, OMP_CANCEL_CLAUSE_MASK, "#pragma omp cancel", pragma_tok); finish_omp_cancel (clauses); } /* OpenMP 4.0: # pragma omp cancellation point cancelpt-clause[optseq] new-line */ #define OMP_CANCELLATION_POINT_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PARALLEL) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FOR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SECTIONS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TASKGROUP)) static void cp_parser_omp_cancellation_point (cp_parser *parser, cp_token *pragma_tok) { tree clauses; bool point_seen = false; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (strcmp (p, "point") == 0) { cp_lexer_consume_token (parser->lexer); point_seen = true; } } if (!point_seen) { cp_parser_error (parser, "expected %<point%>"); cp_parser_require_pragma_eol (parser, pragma_tok); return; } clauses = cp_parser_omp_all_clauses (parser, OMP_CANCELLATION_POINT_CLAUSE_MASK, "#pragma omp cancellation point", pragma_tok); finish_omp_cancellation_point (clauses); } /* OpenMP 4.0: #pragma omp distribute distribute-clause[optseq] new-line for-loop */ #define OMP_DISTRIBUTE_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DIST_SCHEDULE)\ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_COLLAPSE)) static tree cp_parser_omp_distribute (cp_parser *parser, cp_token *pragma_tok, char *p_name, omp_clause_mask mask, tree *cclauses) { tree clauses, sb, ret; unsigned int save; location_t loc = cp_lexer_peek_token (parser->lexer)->location; strcat (p_name, " distribute"); mask |= OMP_DISTRIBUTE_CLAUSE_MASK; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); bool simd = false; bool parallel = false; if (strcmp (p, "simd") == 0) simd = true; else parallel = strcmp (p, "parallel") == 0; if (parallel || simd) { tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT]; if (cclauses == NULL) cclauses = cclauses_buf; cp_lexer_consume_token (parser->lexer); if (!flag_openmp) /* flag_openmp_simd */ { if (simd) return cp_parser_omp_simd (parser, pragma_tok, p_name, mask, cclauses); else return cp_parser_omp_parallel (parser, pragma_tok, p_name, mask, cclauses); } sb = begin_omp_structured_block (); save = cp_parser_begin_omp_structured_block (parser); if (simd) ret = cp_parser_omp_simd (parser, pragma_tok, p_name, mask, cclauses); else ret = cp_parser_omp_parallel (parser, pragma_tok, p_name, mask, cclauses); cp_parser_end_omp_structured_block (parser, save); tree body = finish_omp_structured_block (sb); if (ret == NULL) return ret; ret = make_node (OMP_DISTRIBUTE); TREE_TYPE (ret) = void_type_node; OMP_FOR_BODY (ret) = body; OMP_FOR_CLAUSES (ret) = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE]; SET_EXPR_LOCATION (ret, loc); add_stmt (ret); return ret; } } if (!flag_openmp) /* flag_openmp_simd */ { cp_parser_skip_to_pragma_eol (parser, pragma_tok); return NULL_TREE; } clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok, cclauses == NULL); if (cclauses) { cp_omp_split_clauses (loc, OMP_DISTRIBUTE, mask, clauses, cclauses); clauses = cclauses[C_OMP_CLAUSE_SPLIT_DISTRIBUTE]; } sb = begin_omp_structured_block (); save = cp_parser_begin_omp_structured_block (parser); ret = cp_parser_omp_for_loop (parser, OMP_DISTRIBUTE, clauses, NULL); cp_parser_end_omp_structured_block (parser, save); add_stmt (finish_omp_structured_block (sb)); return ret; } /* OpenMP 4.0: # pragma omp teams teams-clause[optseq] new-line structured-block */ #define OMP_TEAMS_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_PRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FIRSTPRIVATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SHARED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NUM_TEAMS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_THREAD_LIMIT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEFAULT)) static tree cp_parser_omp_teams (cp_parser *parser, cp_token *pragma_tok, char *p_name, omp_clause_mask mask, tree *cclauses) { tree clauses, sb, ret; unsigned int save; location_t loc = cp_lexer_peek_token (parser->lexer)->location; strcat (p_name, " teams"); mask |= OMP_TEAMS_CLAUSE_MASK; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (strcmp (p, "distribute") == 0) { tree cclauses_buf[C_OMP_CLAUSE_SPLIT_COUNT]; if (cclauses == NULL) cclauses = cclauses_buf; cp_lexer_consume_token (parser->lexer); if (!flag_openmp) /* flag_openmp_simd */ return cp_parser_omp_distribute (parser, pragma_tok, p_name, mask, cclauses); sb = begin_omp_structured_block (); save = cp_parser_begin_omp_structured_block (parser); ret = cp_parser_omp_distribute (parser, pragma_tok, p_name, mask, cclauses); cp_parser_end_omp_structured_block (parser, save); tree body = finish_omp_structured_block (sb); if (ret == NULL) return ret; clauses = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; ret = make_node (OMP_TEAMS); TREE_TYPE (ret) = void_type_node; OMP_TEAMS_CLAUSES (ret) = clauses; OMP_TEAMS_BODY (ret) = body; return add_stmt (ret); } } if (!flag_openmp) /* flag_openmp_simd */ { cp_parser_skip_to_pragma_eol (parser, pragma_tok); return NULL_TREE; } clauses = cp_parser_omp_all_clauses (parser, mask, p_name, pragma_tok, cclauses == NULL); if (cclauses) { cp_omp_split_clauses (loc, OMP_TEAMS, mask, clauses, cclauses); clauses = cclauses[C_OMP_CLAUSE_SPLIT_TEAMS]; } tree stmt = make_node (OMP_TEAMS); TREE_TYPE (stmt) = void_type_node; OMP_TEAMS_CLAUSES (stmt) = clauses; OMP_TEAMS_BODY (stmt) = cp_parser_omp_structured_block (parser); return add_stmt (stmt); } /* OpenMP 4.0: # pragma omp target data target-data-clause[optseq] new-line structured-block */ #define OMP_TARGET_DATA_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF)) static tree cp_parser_omp_target_data (cp_parser *parser, cp_token *pragma_tok) { tree stmt = make_node (OMP_TARGET_DATA); TREE_TYPE (stmt) = void_type_node; OMP_TARGET_DATA_CLAUSES (stmt) = cp_parser_omp_all_clauses (parser, OMP_TARGET_DATA_CLAUSE_MASK, "#pragma omp target data", pragma_tok); keep_next_level (true); OMP_TARGET_DATA_BODY (stmt) = cp_parser_omp_structured_block (parser); SET_EXPR_LOCATION (stmt, pragma_tok->location); return add_stmt (stmt); } /* OpenMP 4.0: # pragma omp target update target-update-clause[optseq] new-line */ #define OMP_TARGET_UPDATE_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_FROM) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_TO) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF)) static bool cp_parser_omp_target_update (cp_parser *parser, cp_token *pragma_tok, enum pragma_context context) { if (context == pragma_stmt) { error_at (pragma_tok->location, "%<#pragma omp target update%> may only be " "used in compound statements"); cp_parser_skip_to_pragma_eol (parser, pragma_tok); return false; } tree clauses = cp_parser_omp_all_clauses (parser, OMP_TARGET_UPDATE_CLAUSE_MASK, "#pragma omp target update", pragma_tok); if (find_omp_clause (clauses, OMP_CLAUSE_TO) == NULL_TREE && find_omp_clause (clauses, OMP_CLAUSE_FROM) == NULL_TREE) { error_at (pragma_tok->location, "%<#pragma omp target update must contain at least one " "%<from%> or %<to%> clauses"); return false; } tree stmt = make_node (OMP_TARGET_UPDATE); TREE_TYPE (stmt) = void_type_node; OMP_TARGET_UPDATE_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, pragma_tok->location); add_stmt (stmt); return false; } /* OpenMP 4.0: # pragma omp target target-clause[optseq] new-line structured-block */ #define OMP_TARGET_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_DEVICE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_MAP) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_IF)) static bool cp_parser_omp_target (cp_parser *parser, cp_token *pragma_tok, enum pragma_context context) { if (context != pragma_stmt && context != pragma_compound) { cp_parser_error (parser, "expected declaration specifiers"); cp_parser_skip_to_pragma_eol (parser, pragma_tok); return false; } if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (strcmp (p, "teams") == 0) { tree cclauses[C_OMP_CLAUSE_SPLIT_COUNT]; char p_name[sizeof ("#pragma omp target teams distribute " "parallel for simd")]; cp_lexer_consume_token (parser->lexer); strcpy (p_name, "#pragma omp target"); if (!flag_openmp) /* flag_openmp_simd */ { tree stmt = cp_parser_omp_teams (parser, pragma_tok, p_name, OMP_TARGET_CLAUSE_MASK, cclauses); return stmt != NULL_TREE; } keep_next_level (true); tree sb = begin_omp_structured_block (); unsigned save = cp_parser_begin_omp_structured_block (parser); tree ret = cp_parser_omp_teams (parser, pragma_tok, p_name, OMP_TARGET_CLAUSE_MASK, cclauses); cp_parser_end_omp_structured_block (parser, save); tree body = finish_omp_structured_block (sb); if (ret == NULL_TREE) return false; tree stmt = make_node (OMP_TARGET); TREE_TYPE (stmt) = void_type_node; OMP_TARGET_CLAUSES (stmt) = cclauses[C_OMP_CLAUSE_SPLIT_TARGET]; OMP_TARGET_BODY (stmt) = body; add_stmt (stmt); return true; } else if (!flag_openmp) /* flag_openmp_simd */ { cp_parser_skip_to_pragma_eol (parser, pragma_tok); return false; } else if (strcmp (p, "data") == 0) { cp_lexer_consume_token (parser->lexer); cp_parser_omp_target_data (parser, pragma_tok); return true; } else if (strcmp (p, "update") == 0) { cp_lexer_consume_token (parser->lexer); return cp_parser_omp_target_update (parser, pragma_tok, context); } } tree stmt = make_node (OMP_TARGET); TREE_TYPE (stmt) = void_type_node; OMP_TARGET_CLAUSES (stmt) = cp_parser_omp_all_clauses (parser, OMP_TARGET_CLAUSE_MASK, "#pragma omp target", pragma_tok); keep_next_level (true); OMP_TARGET_BODY (stmt) = cp_parser_omp_structured_block (parser); SET_EXPR_LOCATION (stmt, pragma_tok->location); add_stmt (stmt); return true; } /* OpenACC 2.0: # pragma acc cache (variable-list) new-line */ static tree cp_parser_oacc_cache (cp_parser *parser, cp_token *pragma_tok) { tree stmt, clauses; clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE__CACHE_, NULL_TREE); clauses = finish_omp_clauses (clauses); cp_parser_require_pragma_eol (parser, cp_lexer_peek_token (parser->lexer)); stmt = make_node (OACC_CACHE); TREE_TYPE (stmt) = void_type_node; OACC_CACHE_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, pragma_tok->location); add_stmt (stmt); return stmt; } /* OpenACC 2.0: # pragma acc data oacc-data-clause[optseq] new-line structured-block */ #define OACC_DATA_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE)) static tree cp_parser_oacc_data (cp_parser *parser, cp_token *pragma_tok) { tree stmt, clauses, block; unsigned int save; clauses = cp_parser_oacc_all_clauses (parser, OACC_DATA_CLAUSE_MASK, "#pragma acc data", pragma_tok); block = begin_omp_parallel (); save = cp_parser_begin_omp_structured_block (parser); cp_parser_statement (parser, NULL_TREE, false, NULL); cp_parser_end_omp_structured_block (parser, save); stmt = finish_oacc_data (clauses, block); return stmt; } /* OpenACC 2.0: # pragma acc enter data oacc-enter-data-clause[optseq] new-line or # pragma acc exit data oacc-exit-data-clause[optseq] new-line LOC is the location of the #pragma token. */ #define OACC_ENTER_DATA_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) ) #define OACC_EXIT_DATA_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DELETE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT) ) static tree cp_parser_oacc_enter_exit_data (cp_parser *parser, cp_token *pragma_tok, bool enter) { tree stmt, clauses; if (cp_lexer_next_token_is (parser->lexer, CPP_PRAGMA_EOL) || cp_lexer_next_token_is_not (parser->lexer, CPP_NAME)) { cp_parser_error (parser, enter ? "expected %<data%> in %<#pragma acc enter data%>" : "expected %<data%> in %<#pragma acc exit data%>"); cp_parser_skip_to_pragma_eol (parser, pragma_tok); return NULL_TREE; } const char *p = IDENTIFIER_POINTER (cp_lexer_peek_token (parser->lexer)->u.value); if (strcmp (p, "data") != 0) { cp_parser_error (parser, "invalid pragma"); cp_parser_skip_to_pragma_eol (parser, pragma_tok); return NULL_TREE; } cp_lexer_consume_token (parser->lexer); if (enter) clauses = cp_parser_oacc_all_clauses (parser, OACC_ENTER_DATA_CLAUSE_MASK, "#pragma acc enter data", pragma_tok); else clauses = cp_parser_oacc_all_clauses (parser, OACC_EXIT_DATA_CLAUSE_MASK, "#pragma acc exit data", pragma_tok); if (find_omp_clause (clauses, OMP_CLAUSE_MAP) == NULL_TREE) { error_at (pragma_tok->location, "%<#pragma acc enter data%> has no data movement clause"); return NULL_TREE; } stmt = enter ? make_node (OACC_ENTER_DATA) : make_node (OACC_EXIT_DATA); TREE_TYPE (stmt) = void_type_node; if (enter) OACC_ENTER_DATA_CLAUSES (stmt) = clauses; else OACC_EXIT_DATA_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, pragma_tok->location); add_stmt (stmt); return stmt; } /* OpenACC 2.0: # pragma acc kernels oacc-kernels-clause[optseq] new-line structured-block */ #define OACC_KERNELS_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT)) static tree cp_parser_oacc_kernels (cp_parser *parser, cp_token *pragma_tok) { tree stmt, clauses, block; unsigned int save; clauses = cp_parser_oacc_all_clauses (parser, OACC_KERNELS_CLAUSE_MASK, "#pragma acc kernels", pragma_tok); block = begin_omp_parallel (); save = cp_parser_begin_omp_structured_block (parser); cp_parser_statement (parser, NULL_TREE, false, NULL); cp_parser_end_omp_structured_block (parser, save); stmt = finish_oacc_kernels (clauses, block); return stmt; } /* OpenACC 2.0: # pragma acc loop oacc-loop-clause[optseq] new-line structured-block */ #define OACC_LOOP_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COLLAPSE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_REDUCTION)) static tree cp_parser_oacc_loop (cp_parser *parser, cp_token *pragma_tok) { tree stmt, clauses, block; int save; clauses = cp_parser_oacc_all_clauses (parser, OACC_LOOP_CLAUSE_MASK, "#pragma acc loop", pragma_tok); block = begin_omp_structured_block (); save = cp_parser_begin_omp_structured_block (parser); stmt = cp_parser_omp_for_loop (parser, OACC_LOOP, clauses, NULL); cp_parser_end_omp_structured_block (parser, save); add_stmt (finish_omp_structured_block (block)); return stmt; } /* OpenACC 2.0: # pragma acc parallel oacc-parallel-clause[optseq] new-line structured-block */ #define OACC_PARALLEL_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICEPTR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_GANGS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_NUM_WORKERS) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPY) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYIN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_COPYOUT) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_PRESENT_OR_CREATE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_REDUCTION) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_VECTOR_LENGTH) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT)) static tree cp_parser_oacc_parallel (cp_parser *parser, cp_token *pragma_tok) { tree stmt, clauses, block; unsigned int save; clauses = cp_parser_oacc_all_clauses (parser, OACC_PARALLEL_CLAUSE_MASK, "#pragma acc parallel", pragma_tok); block = begin_omp_parallel (); save = cp_parser_begin_omp_structured_block (parser); cp_parser_statement (parser, NULL_TREE, false, NULL); cp_parser_end_omp_structured_block (parser, save); stmt = finish_oacc_parallel (clauses, block); return stmt; } /* OpenACC 2.0: # pragma acc update oacc-update-clause[optseq] new-line */ #define OACC_UPDATE_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_DEVICE) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_HOST) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_IF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_SELF) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_WAIT)) static tree cp_parser_oacc_update (cp_parser *parser, cp_token *pragma_tok) { tree stmt, clauses; clauses = cp_parser_oacc_all_clauses (parser, OACC_UPDATE_CLAUSE_MASK, "#pragma acc update", pragma_tok); if (find_omp_clause (clauses, OMP_CLAUSE_MAP) == NULL_TREE) { error_at (pragma_tok->location, "%<#pragma acc update%> must contain at least one " "%<device%> or %<host/self%> clause"); return NULL_TREE; } stmt = make_node (OACC_UPDATE); TREE_TYPE (stmt) = void_type_node; OACC_UPDATE_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, pragma_tok->location); add_stmt (stmt); return stmt; } /* OpenACC 2.0: # pragma acc wait [(intseq)] oacc-wait-clause[optseq] new-line LOC is the location of the #pragma token. */ #define OACC_WAIT_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OACC_CLAUSE_ASYNC)) static tree cp_parser_oacc_wait (cp_parser *parser, cp_token *pragma_tok) { tree clauses, list = NULL_TREE, stmt = NULL_TREE; location_t loc = cp_lexer_peek_token (parser->lexer)->location; if (cp_lexer_peek_token (parser->lexer)->type == CPP_OPEN_PAREN) list = cp_parser_oacc_wait_list (parser, loc, list); clauses = cp_parser_oacc_all_clauses (parser, OACC_WAIT_CLAUSE_MASK, "#pragma acc wait", pragma_tok); stmt = c_finish_oacc_wait (loc, list, clauses); return stmt; } /* OpenMP 4.0: # pragma omp declare simd declare-simd-clauses[optseq] new-line */ #define OMP_DECLARE_SIMD_CLAUSE_MASK \ ( (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_SIMDLEN) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_LINEAR) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_ALIGNED) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_UNIFORM) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_INBRANCH) \ | (OMP_CLAUSE_MASK_1 << PRAGMA_OMP_CLAUSE_NOTINBRANCH)) static void cp_parser_omp_declare_simd (cp_parser *parser, cp_token *pragma_tok, enum pragma_context context) { bool first_p = parser->omp_declare_simd == NULL; cp_omp_declare_simd_data data; if (first_p) { data.error_seen = false; data.fndecl_seen = false; data.tokens = vNULL; parser->omp_declare_simd = &data; } while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL) && cp_lexer_next_token_is_not (parser->lexer, CPP_EOF)) cp_lexer_consume_token (parser->lexer); if (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL)) parser->omp_declare_simd->error_seen = true; cp_parser_require_pragma_eol (parser, pragma_tok); struct cp_token_cache *cp = cp_token_cache_new (pragma_tok, cp_lexer_peek_token (parser->lexer)); parser->omp_declare_simd->tokens.safe_push (cp); if (first_p) { while (cp_lexer_next_token_is (parser->lexer, CPP_PRAGMA)) cp_parser_pragma (parser, context); switch (context) { case pragma_external: cp_parser_declaration (parser); break; case pragma_member: cp_parser_member_declaration (parser); break; case pragma_objc_icode: cp_parser_block_declaration (parser, /*statement_p=*/false); break; default: cp_parser_declaration_statement (parser); break; } if (parser->omp_declare_simd && !parser->omp_declare_simd->error_seen && !parser->omp_declare_simd->fndecl_seen) error_at (pragma_tok->location, "%<#pragma omp declare simd%> not immediately followed by " "function declaration or definition"); data.tokens.release (); parser->omp_declare_simd = NULL; } } /* Handles the delayed parsing of the Cilk Plus SIMD-enabled function. This function is modelled similar to the late parsing of omp declare simd. */ static tree cp_parser_late_parsing_cilk_simd_fn_info (cp_parser *parser, tree attrs) { struct cp_token_cache *ce; cp_omp_declare_simd_data *info = parser->cilk_simd_fn_info; int ii = 0; if (parser->omp_declare_simd != NULL) { error ("%<#pragma omp declare simd%> cannot be used in the same function" " marked as a Cilk Plus SIMD-enabled function"); XDELETE (parser->cilk_simd_fn_info); parser->cilk_simd_fn_info = NULL; return attrs; } if (!info->error_seen && info->fndecl_seen) { error ("vector attribute not immediately followed by a single function" " declaration or definition"); info->error_seen = true; } if (info->error_seen) return attrs; FOR_EACH_VEC_ELT (info->tokens, ii, ce) { tree c, cl; cp_parser_push_lexer_for_tokens (parser, ce); parser->lexer->in_pragma = true; cl = cp_parser_omp_all_clauses (parser, CILK_SIMD_FN_CLAUSE_MASK, "SIMD-enabled functions attribute", NULL); cp_parser_pop_lexer (parser); if (cl) cl = tree_cons (NULL_TREE, cl, NULL_TREE); c = build_tree_list (get_identifier ("cilk simd function"), NULL_TREE); TREE_CHAIN (c) = attrs; attrs = c; c = build_tree_list (get_identifier ("omp declare simd"), cl); TREE_CHAIN (c) = attrs; if (processing_template_decl) ATTR_IS_DEPENDENT (c) = 1; attrs = c; } info->fndecl_seen = true; XDELETE (parser->cilk_simd_fn_info); parser->cilk_simd_fn_info = NULL; return attrs; } /* Finalize #pragma omp declare simd clauses after direct declarator has been parsed, and put that into "omp declare simd" attribute. */ static tree cp_parser_late_parsing_omp_declare_simd (cp_parser *parser, tree attrs) { struct cp_token_cache *ce; cp_omp_declare_simd_data *data = parser->omp_declare_simd; int i; if (!data->error_seen && data->fndecl_seen) { error ("%<#pragma omp declare simd%> not immediately followed by " "a single function declaration or definition"); data->error_seen = true; return attrs; } if (data->error_seen) return attrs; FOR_EACH_VEC_ELT (data->tokens, i, ce) { tree c, cl; cp_parser_push_lexer_for_tokens (parser, ce); parser->lexer->in_pragma = true; gcc_assert (cp_lexer_peek_token (parser->lexer)->type == CPP_PRAGMA); cp_token *pragma_tok = cp_lexer_consume_token (parser->lexer); cp_lexer_consume_token (parser->lexer); cl = cp_parser_omp_all_clauses (parser, OMP_DECLARE_SIMD_CLAUSE_MASK, "#pragma omp declare simd", pragma_tok); cp_parser_pop_lexer (parser); if (cl) cl = tree_cons (NULL_TREE, cl, NULL_TREE); c = build_tree_list (get_identifier ("omp declare simd"), cl); TREE_CHAIN (c) = attrs; if (processing_template_decl) ATTR_IS_DEPENDENT (c) = 1; attrs = c; } data->fndecl_seen = true; return attrs; } /* OpenMP 4.0: # pragma omp declare target new-line declarations and definitions # pragma omp end declare target new-line */ static void cp_parser_omp_declare_target (cp_parser *parser, cp_token *pragma_tok) { cp_parser_skip_to_pragma_eol (parser, pragma_tok); scope_chain->omp_declare_target_attribute++; } static void cp_parser_omp_end_declare_target (cp_parser *parser, cp_token *pragma_tok) { const char *p = ""; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; p = IDENTIFIER_POINTER (id); } if (strcmp (p, "declare") == 0) { cp_lexer_consume_token (parser->lexer); p = ""; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; p = IDENTIFIER_POINTER (id); } if (strcmp (p, "target") == 0) cp_lexer_consume_token (parser->lexer); else { cp_parser_error (parser, "expected %<target%>"); cp_parser_skip_to_pragma_eol (parser, pragma_tok); return; } } else { cp_parser_error (parser, "expected %<declare%>"); cp_parser_skip_to_pragma_eol (parser, pragma_tok); return; } cp_parser_skip_to_pragma_eol (parser, pragma_tok); if (!scope_chain->omp_declare_target_attribute) error_at (pragma_tok->location, "%<#pragma omp end declare target%> without corresponding " "%<#pragma omp declare target%>"); else scope_chain->omp_declare_target_attribute--; } /* Helper function of cp_parser_omp_declare_reduction. Parse the combiner expression and optional initializer clause of #pragma omp declare reduction. We store the expression(s) as either 3, 6 or 7 special statements inside of the artificial function's body. The first two statements are DECL_EXPRs for the artificial OMP_OUT resp. OMP_IN variables, followed by a statement with the combiner expression that uses those variables. If there was any INITIALIZER clause, this is followed by further statements, the fourth and fifth statements are DECL_EXPRs for the artificial OMP_PRIV resp. OMP_ORIG variables. If the INITIALIZER clause wasn't the constructor variant (first token after open paren is not omp_priv), then the sixth statement is a statement with the function call expression that uses the OMP_PRIV and optionally OMP_ORIG variable. Otherwise, the sixth statement is whatever statement cp_finish_decl emits to initialize the OMP_PRIV artificial variable and there is seventh statement, a DECL_EXPR of the OMP_PRIV statement again. */ static bool cp_parser_omp_declare_reduction_exprs (tree fndecl, cp_parser *parser) { tree type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); gcc_assert (TREE_CODE (type) == REFERENCE_TYPE); type = TREE_TYPE (type); tree omp_out = build_lang_decl (VAR_DECL, get_identifier ("omp_out"), type); DECL_ARTIFICIAL (omp_out) = 1; pushdecl (omp_out); add_decl_expr (omp_out); tree omp_in = build_lang_decl (VAR_DECL, get_identifier ("omp_in"), type); DECL_ARTIFICIAL (omp_in) = 1; pushdecl (omp_in); add_decl_expr (omp_in); tree combiner; tree omp_priv = NULL_TREE, omp_orig = NULL_TREE, initializer = NULL_TREE; keep_next_level (true); tree block = begin_omp_structured_block (); combiner = cp_parser_expression (parser); finish_expr_stmt (combiner); block = finish_omp_structured_block (block); add_stmt (block); if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) return false; const char *p = ""; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; p = IDENTIFIER_POINTER (id); } if (strcmp (p, "initializer") == 0) { cp_lexer_consume_token (parser->lexer); if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return false; p = ""; if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; p = IDENTIFIER_POINTER (id); } omp_priv = build_lang_decl (VAR_DECL, get_identifier ("omp_priv"), type); DECL_ARTIFICIAL (omp_priv) = 1; pushdecl (omp_priv); add_decl_expr (omp_priv); omp_orig = build_lang_decl (VAR_DECL, get_identifier ("omp_orig"), type); DECL_ARTIFICIAL (omp_orig) = 1; pushdecl (omp_orig); add_decl_expr (omp_orig); keep_next_level (true); block = begin_omp_structured_block (); bool ctor = false; if (strcmp (p, "omp_priv") == 0) { bool is_direct_init, is_non_constant_init; ctor = true; cp_lexer_consume_token (parser->lexer); /* Reject initializer (omp_priv) and initializer (omp_priv ()). */ if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN) || (cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN) && cp_lexer_peek_nth_token (parser->lexer, 2)->type == CPP_CLOSE_PAREN && cp_lexer_peek_nth_token (parser->lexer, 3)->type == CPP_CLOSE_PAREN)) { finish_omp_structured_block (block); error ("invalid initializer clause"); return false; } initializer = cp_parser_initializer (parser, &is_direct_init, &is_non_constant_init); cp_finish_decl (omp_priv, initializer, !is_non_constant_init, NULL_TREE, LOOKUP_ONLYCONVERTING); } else { cp_parser_parse_tentatively (parser); tree fn_name = cp_parser_id_expression (parser, /*template_p=*/false, /*check_dependency_p=*/true, /*template_p=*/NULL, /*declarator_p=*/false, /*optional_p=*/false); vec<tree, va_gc> *args; if (fn_name == error_mark_node || cp_parser_error_occurred (parser) || !cp_lexer_next_token_is (parser->lexer, CPP_OPEN_PAREN) || ((args = cp_parser_parenthesized_expression_list (parser, non_attr, /*cast_p=*/false, /*allow_expansion_p=*/true, /*non_constant_p=*/NULL)), cp_parser_error_occurred (parser))) { finish_omp_structured_block (block); cp_parser_abort_tentative_parse (parser); cp_parser_error (parser, "expected id-expression (arguments)"); return false; } unsigned int i; tree arg; FOR_EACH_VEC_SAFE_ELT (args, i, arg) if (arg == omp_priv || (TREE_CODE (arg) == ADDR_EXPR && TREE_OPERAND (arg, 0) == omp_priv)) break; cp_parser_abort_tentative_parse (parser); if (arg == NULL_TREE) error ("one of the initializer call arguments should be %<omp_priv%>" " or %<&omp_priv%>"); initializer = cp_parser_postfix_expression (parser, false, false, false, false, NULL); finish_expr_stmt (initializer); } block = finish_omp_structured_block (block); cp_walk_tree (&block, cp_remove_omp_priv_cleanup_stmt, omp_priv, NULL); add_stmt (block); if (ctor) add_decl_expr (omp_orig); if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) return false; } if (!cp_lexer_next_token_is (parser->lexer, CPP_PRAGMA_EOL)) cp_parser_required_error (parser, RT_PRAGMA_EOL, /*keyword=*/false); return true; } /* OpenMP 4.0 #pragma omp declare reduction (reduction-id : typename-list : expression) \ initializer-clause[opt] new-line initializer-clause: initializer (omp_priv initializer) initializer (function-name (argument-list)) */ static void cp_parser_omp_declare_reduction (cp_parser *parser, cp_token *pragma_tok, enum pragma_context) { auto_vec<tree> types; enum tree_code reduc_code = ERROR_MARK; tree reduc_id = NULL_TREE, orig_reduc_id = NULL_TREE, type; unsigned int i; cp_token *first_token; cp_token_cache *cp; int errs; void *p; /* Get the high-water mark for the DECLARATOR_OBSTACK. */ p = obstack_alloc (&declarator_obstack, 0); if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) goto fail; switch (cp_lexer_peek_token (parser->lexer)->type) { case CPP_PLUS: reduc_code = PLUS_EXPR; break; case CPP_MULT: reduc_code = MULT_EXPR; break; case CPP_MINUS: reduc_code = MINUS_EXPR; break; case CPP_AND: reduc_code = BIT_AND_EXPR; break; case CPP_XOR: reduc_code = BIT_XOR_EXPR; break; case CPP_OR: reduc_code = BIT_IOR_EXPR; break; case CPP_AND_AND: reduc_code = TRUTH_ANDIF_EXPR; break; case CPP_OR_OR: reduc_code = TRUTH_ORIF_EXPR; break; case CPP_NAME: reduc_id = orig_reduc_id = cp_parser_identifier (parser); break; default: cp_parser_error (parser, "expected %<+%>, %<*%>, %<-%>, %<&%>, %<^%>, " "%<|%>, %<&&%>, %<||%> or identifier"); goto fail; } if (reduc_code != ERROR_MARK) cp_lexer_consume_token (parser->lexer); reduc_id = omp_reduction_id (reduc_code, reduc_id, NULL_TREE); if (reduc_id == error_mark_node) goto fail; if (!cp_parser_require (parser, CPP_COLON, RT_COLON)) goto fail; /* Types may not be defined in declare reduction type list. */ const char *saved_message; saved_message = parser->type_definition_forbidden_message; parser->type_definition_forbidden_message = G_("types may not be defined in declare reduction type list"); bool saved_colon_corrects_to_scope_p; saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; parser->colon_corrects_to_scope_p = false; bool saved_colon_doesnt_start_class_def_p; saved_colon_doesnt_start_class_def_p = parser->colon_doesnt_start_class_def_p; parser->colon_doesnt_start_class_def_p = true; while (true) { location_t loc = cp_lexer_peek_token (parser->lexer)->location; type = cp_parser_type_id (parser); if (type == error_mark_node) ; else if (ARITHMETIC_TYPE_P (type) && (orig_reduc_id == NULL_TREE || (TREE_CODE (type) != COMPLEX_TYPE && (strcmp (IDENTIFIER_POINTER (orig_reduc_id), "min") == 0 || strcmp (IDENTIFIER_POINTER (orig_reduc_id), "max") == 0)))) error_at (loc, "predeclared arithmetic type %qT in " "%<#pragma omp declare reduction%>", type); else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE || TREE_CODE (type) == ARRAY_TYPE) error_at (loc, "function or array type %qT in " "%<#pragma omp declare reduction%>", type); else if (TREE_CODE (type) == REFERENCE_TYPE) error_at (loc, "reference type %qT in " "%<#pragma omp declare reduction%>", type); else if (TYPE_QUALS_NO_ADDR_SPACE (type)) error_at (loc, "const, volatile or __restrict qualified type %qT in " "%<#pragma omp declare reduction%>", type); else types.safe_push (type); if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); else break; } /* Restore the saved message. */ parser->type_definition_forbidden_message = saved_message; parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; parser->colon_doesnt_start_class_def_p = saved_colon_doesnt_start_class_def_p; if (!cp_parser_require (parser, CPP_COLON, RT_COLON) || types.is_empty ()) { fail: cp_parser_skip_to_pragma_eol (parser, pragma_tok); goto done; } first_token = cp_lexer_peek_token (parser->lexer); cp = NULL; errs = errorcount; FOR_EACH_VEC_ELT (types, i, type) { tree fntype = build_function_type_list (void_type_node, cp_build_reference_type (type, false), NULL_TREE); tree this_reduc_id = reduc_id; if (!dependent_type_p (type)) this_reduc_id = omp_reduction_id (ERROR_MARK, reduc_id, type); tree fndecl = build_lang_decl (FUNCTION_DECL, this_reduc_id, fntype); DECL_SOURCE_LOCATION (fndecl) = pragma_tok->location; DECL_ARTIFICIAL (fndecl) = 1; DECL_EXTERNAL (fndecl) = 1; DECL_DECLARED_INLINE_P (fndecl) = 1; DECL_IGNORED_P (fndecl) = 1; DECL_OMP_DECLARE_REDUCTION_P (fndecl) = 1; DECL_ATTRIBUTES (fndecl) = tree_cons (get_identifier ("gnu_inline"), NULL_TREE, DECL_ATTRIBUTES (fndecl)); if (processing_template_decl) fndecl = push_template_decl (fndecl); bool block_scope = false; tree block = NULL_TREE; if (current_function_decl) { block_scope = true; DECL_CONTEXT (fndecl) = global_namespace; if (!processing_template_decl) pushdecl (fndecl); } else if (current_class_type) { if (cp == NULL) { while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL) && cp_lexer_next_token_is_not (parser->lexer, CPP_EOF)) cp_lexer_consume_token (parser->lexer); if (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL)) goto fail; cp = cp_token_cache_new (first_token, cp_lexer_peek_nth_token (parser->lexer, 2)); } DECL_STATIC_FUNCTION_P (fndecl) = 1; finish_member_declaration (fndecl); DECL_PENDING_INLINE_INFO (fndecl) = cp; DECL_PENDING_INLINE_P (fndecl) = 1; vec_safe_push (unparsed_funs_with_definitions, fndecl); continue; } else { DECL_CONTEXT (fndecl) = current_namespace; pushdecl (fndecl); } if (!block_scope) start_preparsed_function (fndecl, NULL_TREE, SF_PRE_PARSED); else block = begin_omp_structured_block (); if (cp) { cp_parser_push_lexer_for_tokens (parser, cp); parser->lexer->in_pragma = true; } if (!cp_parser_omp_declare_reduction_exprs (fndecl, parser)) { if (!block_scope) finish_function (0); else DECL_CONTEXT (fndecl) = current_function_decl; if (cp) cp_parser_pop_lexer (parser); goto fail; } if (cp) cp_parser_pop_lexer (parser); if (!block_scope) finish_function (0); else { DECL_CONTEXT (fndecl) = current_function_decl; block = finish_omp_structured_block (block); if (TREE_CODE (block) == BIND_EXPR) DECL_SAVED_TREE (fndecl) = BIND_EXPR_BODY (block); else if (TREE_CODE (block) == STATEMENT_LIST) DECL_SAVED_TREE (fndecl) = block; if (processing_template_decl) add_decl_expr (fndecl); } cp_check_omp_declare_reduction (fndecl); if (cp == NULL && types.length () > 1) cp = cp_token_cache_new (first_token, cp_lexer_peek_nth_token (parser->lexer, 2)); if (errs != errorcount) break; } cp_parser_require_pragma_eol (parser, pragma_tok); done: /* Free any declarators allocated. */ obstack_free (&declarator_obstack, p); } /* OpenMP 4.0 #pragma omp declare simd declare-simd-clauses[optseq] new-line #pragma omp declare reduction (reduction-id : typename-list : expression) \ initializer-clause[opt] new-line #pragma omp declare target new-line */ static void cp_parser_omp_declare (cp_parser *parser, cp_token *pragma_tok, enum pragma_context context) { if (cp_lexer_next_token_is (parser->lexer, CPP_NAME)) { tree id = cp_lexer_peek_token (parser->lexer)->u.value; const char *p = IDENTIFIER_POINTER (id); if (strcmp (p, "simd") == 0) { cp_lexer_consume_token (parser->lexer); cp_parser_omp_declare_simd (parser, pragma_tok, context); return; } cp_ensure_no_omp_declare_simd (parser); if (strcmp (p, "reduction") == 0) { cp_lexer_consume_token (parser->lexer); cp_parser_omp_declare_reduction (parser, pragma_tok, context); return; } if (!flag_openmp) /* flag_openmp_simd */ { cp_parser_skip_to_pragma_eol (parser, pragma_tok); return; } if (strcmp (p, "target") == 0) { cp_lexer_consume_token (parser->lexer); cp_parser_omp_declare_target (parser, pragma_tok); return; } } cp_parser_error (parser, "expected %<simd%> or %<reduction%> " "or %<target%>"); cp_parser_require_pragma_eol (parser, pragma_tok); } /* Main entry point to OpenMP statement pragmas. */ static void cp_parser_omp_construct (cp_parser *parser, cp_token *pragma_tok) { tree stmt; char p_name[sizeof "#pragma omp teams distribute parallel for simd"]; omp_clause_mask mask (0); switch (pragma_tok->pragma_kind) { case PRAGMA_OACC_CACHE: stmt = cp_parser_oacc_cache (parser, pragma_tok); break; case PRAGMA_OACC_DATA: stmt = cp_parser_oacc_data (parser, pragma_tok); break; case PRAGMA_OACC_ENTER_DATA: stmt = cp_parser_oacc_enter_exit_data (parser, pragma_tok, true); break; case PRAGMA_OACC_EXIT_DATA: stmt = cp_parser_oacc_enter_exit_data (parser, pragma_tok, false); break; case PRAGMA_OACC_KERNELS: stmt = cp_parser_oacc_kernels (parser, pragma_tok); break; case PRAGMA_OACC_LOOP: stmt = cp_parser_oacc_loop (parser, pragma_tok); break; case PRAGMA_OACC_PARALLEL: stmt = cp_parser_oacc_parallel (parser, pragma_tok); break; case PRAGMA_OACC_UPDATE: stmt = cp_parser_oacc_update (parser, pragma_tok); break; case PRAGMA_OACC_WAIT: stmt = cp_parser_oacc_wait (parser, pragma_tok); break; case PRAGMA_OMP_ATOMIC: cp_parser_omp_atomic (parser, pragma_tok); return; case PRAGMA_OMP_CRITICAL: stmt = cp_parser_omp_critical (parser, pragma_tok); break; case PRAGMA_OMP_DISTRIBUTE: strcpy (p_name, "#pragma omp"); stmt = cp_parser_omp_distribute (parser, pragma_tok, p_name, mask, NULL); break; case PRAGMA_OMP_FOR: strcpy (p_name, "#pragma omp"); stmt = cp_parser_omp_for (parser, pragma_tok, p_name, mask, NULL); break; case PRAGMA_OMP_MASTER: stmt = cp_parser_omp_master (parser, pragma_tok); break; case PRAGMA_OMP_ORDERED: stmt = cp_parser_omp_ordered (parser, pragma_tok); break; case PRAGMA_OMP_PARALLEL: strcpy (p_name, "#pragma omp"); stmt = cp_parser_omp_parallel (parser, pragma_tok, p_name, mask, NULL); break; case PRAGMA_OMP_SECTIONS: strcpy (p_name, "#pragma omp"); stmt = cp_parser_omp_sections (parser, pragma_tok, p_name, mask, NULL); break; case PRAGMA_OMP_SIMD: strcpy (p_name, "#pragma omp"); stmt = cp_parser_omp_simd (parser, pragma_tok, p_name, mask, NULL); break; case PRAGMA_OMP_SINGLE: stmt = cp_parser_omp_single (parser, pragma_tok); break; case PRAGMA_OMP_TASK: stmt = cp_parser_omp_task (parser, pragma_tok); break; case PRAGMA_OMP_TASKGROUP: stmt = cp_parser_omp_taskgroup (parser, pragma_tok); break; case PRAGMA_OMP_TEAMS: strcpy (p_name, "#pragma omp"); stmt = cp_parser_omp_teams (parser, pragma_tok, p_name, mask, NULL); break; default: gcc_unreachable (); } if (stmt) SET_EXPR_LOCATION (stmt, pragma_tok->location); } /* Transactional Memory parsing routines. */ /* Parse a transaction attribute. txn-attribute: attribute [ [ identifier ] ] ??? Simplify this when C++0x bracket attributes are implemented properly. */ static tree cp_parser_txn_attribute_opt (cp_parser *parser) { cp_token *token; tree attr_name, attr = NULL; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_ATTRIBUTE)) return cp_parser_attributes_opt (parser); if (cp_lexer_next_token_is_not (parser->lexer, CPP_OPEN_SQUARE)) return NULL_TREE; cp_lexer_consume_token (parser->lexer); if (!cp_parser_require (parser, CPP_OPEN_SQUARE, RT_OPEN_SQUARE)) goto error1; token = cp_lexer_peek_token (parser->lexer); if (token->type == CPP_NAME || token->type == CPP_KEYWORD) { token = cp_lexer_consume_token (parser->lexer); attr_name = (token->type == CPP_KEYWORD /* For keywords, use the canonical spelling, not the parsed identifier. */ ? ridpointers[(int) token->keyword] : token->u.value); attr = build_tree_list (attr_name, NULL_TREE); } else cp_parser_error (parser, "expected identifier"); cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); error1: cp_parser_require (parser, CPP_CLOSE_SQUARE, RT_CLOSE_SQUARE); return attr; } /* Parse a __transaction_atomic or __transaction_relaxed statement. transaction-statement: __transaction_atomic txn-attribute[opt] txn-noexcept-spec[opt] compound-statement __transaction_relaxed txn-noexcept-spec[opt] compound-statement */ static tree cp_parser_transaction (cp_parser *parser, enum rid keyword) { unsigned char old_in = parser->in_transaction; unsigned char this_in = 1, new_in; cp_token *token; tree stmt, attrs, noex; gcc_assert (keyword == RID_TRANSACTION_ATOMIC || keyword == RID_TRANSACTION_RELAXED); token = cp_parser_require_keyword (parser, keyword, (keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC : RT_TRANSACTION_RELAXED)); gcc_assert (token != NULL); if (keyword == RID_TRANSACTION_RELAXED) this_in |= TM_STMT_ATTR_RELAXED; else { attrs = cp_parser_txn_attribute_opt (parser); if (attrs) this_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER); } /* Parse a noexcept specification. */ noex = cp_parser_noexcept_specification_opt (parser, true, NULL, true); /* Keep track if we're in the lexical scope of an outer transaction. */ new_in = this_in | (old_in & TM_STMT_ATTR_OUTER); stmt = begin_transaction_stmt (token->location, NULL, this_in); parser->in_transaction = new_in; cp_parser_compound_statement (parser, NULL, false, false); parser->in_transaction = old_in; finish_transaction_stmt (stmt, NULL, this_in, noex); return stmt; } /* Parse a __transaction_atomic or __transaction_relaxed expression. transaction-expression: __transaction_atomic txn-noexcept-spec[opt] ( expression ) __transaction_relaxed txn-noexcept-spec[opt] ( expression ) */ static tree cp_parser_transaction_expression (cp_parser *parser, enum rid keyword) { unsigned char old_in = parser->in_transaction; unsigned char this_in = 1; cp_token *token; tree expr, noex; bool noex_expr; gcc_assert (keyword == RID_TRANSACTION_ATOMIC || keyword == RID_TRANSACTION_RELAXED); if (!flag_tm) error (keyword == RID_TRANSACTION_RELAXED ? G_("%<__transaction_relaxed%> without transactional memory " "support enabled") : G_("%<__transaction_atomic%> without transactional memory " "support enabled")); token = cp_parser_require_keyword (parser, keyword, (keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC : RT_TRANSACTION_RELAXED)); gcc_assert (token != NULL); if (keyword == RID_TRANSACTION_RELAXED) this_in |= TM_STMT_ATTR_RELAXED; /* Set this early. This might mean that we allow transaction_cancel in an expression that we find out later actually has to be a constexpr. However, we expect that cxx_constant_value will be able to deal with this; also, if the noexcept has no constexpr, then what we parse next really is a transaction's body. */ parser->in_transaction = this_in; /* Parse a noexcept specification. */ noex = cp_parser_noexcept_specification_opt (parser, false, &noex_expr, true); if (!noex || !noex_expr || cp_lexer_peek_token (parser->lexer)->type == CPP_OPEN_PAREN) { cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN); expr = cp_parser_expression (parser); expr = finish_parenthesized_expr (expr); cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN); } else { /* The only expression that is available got parsed for the noexcept already. noexcept is true then. */ expr = noex; noex = boolean_true_node; } expr = build_transaction_expr (token->location, expr, this_in, noex); parser->in_transaction = old_in; if (cp_parser_non_integral_constant_expression (parser, NIC_TRANSACTION)) return error_mark_node; return (flag_tm ? expr : error_mark_node); } /* Parse a function-transaction-block. function-transaction-block: __transaction_atomic txn-attribute[opt] ctor-initializer[opt] function-body __transaction_atomic txn-attribute[opt] function-try-block __transaction_relaxed ctor-initializer[opt] function-body __transaction_relaxed function-try-block */ static bool cp_parser_function_transaction (cp_parser *parser, enum rid keyword) { unsigned char old_in = parser->in_transaction; unsigned char new_in = 1; tree compound_stmt, stmt, attrs; bool ctor_initializer_p; cp_token *token; gcc_assert (keyword == RID_TRANSACTION_ATOMIC || keyword == RID_TRANSACTION_RELAXED); token = cp_parser_require_keyword (parser, keyword, (keyword == RID_TRANSACTION_ATOMIC ? RT_TRANSACTION_ATOMIC : RT_TRANSACTION_RELAXED)); gcc_assert (token != NULL); if (keyword == RID_TRANSACTION_RELAXED) new_in |= TM_STMT_ATTR_RELAXED; else { attrs = cp_parser_txn_attribute_opt (parser); if (attrs) new_in |= parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER); } stmt = begin_transaction_stmt (token->location, &compound_stmt, new_in); parser->in_transaction = new_in; if (cp_lexer_next_token_is_keyword (parser->lexer, RID_TRY)) ctor_initializer_p = cp_parser_function_try_block (parser); else ctor_initializer_p = cp_parser_ctor_initializer_opt_and_function_body (parser, /*in_function_try_block=*/false); parser->in_transaction = old_in; finish_transaction_stmt (stmt, compound_stmt, new_in, NULL_TREE); return ctor_initializer_p; } /* Parse a __transaction_cancel statement. cancel-statement: __transaction_cancel txn-attribute[opt] ; __transaction_cancel txn-attribute[opt] throw-expression ; ??? Cancel and throw is not yet implemented. */ static tree cp_parser_transaction_cancel (cp_parser *parser) { cp_token *token; bool is_outer = false; tree stmt, attrs; token = cp_parser_require_keyword (parser, RID_TRANSACTION_CANCEL, RT_TRANSACTION_CANCEL); gcc_assert (token != NULL); attrs = cp_parser_txn_attribute_opt (parser); if (attrs) is_outer = (parse_tm_stmt_attr (attrs, TM_STMT_ATTR_OUTER) != 0); /* ??? Parse cancel-and-throw here. */ cp_parser_require (parser, CPP_SEMICOLON, RT_SEMICOLON); if (!flag_tm) { error_at (token->location, "%<__transaction_cancel%> without " "transactional memory support enabled"); return error_mark_node; } else if (parser->in_transaction & TM_STMT_ATTR_RELAXED) { error_at (token->location, "%<__transaction_cancel%> within a " "%<__transaction_relaxed%>"); return error_mark_node; } else if (is_outer) { if ((parser->in_transaction & TM_STMT_ATTR_OUTER) == 0 && !is_tm_may_cancel_outer (current_function_decl)) { error_at (token->location, "outer %<__transaction_cancel%> not " "within outer %<__transaction_atomic%>"); error_at (token->location, " or a %<transaction_may_cancel_outer%> function"); return error_mark_node; } } else if (parser->in_transaction == 0) { error_at (token->location, "%<__transaction_cancel%> not within " "%<__transaction_atomic%>"); return error_mark_node; } stmt = build_tm_abort_call (token->location, is_outer); add_stmt (stmt); return stmt; } /* The parser. */ static GTY (()) cp_parser *the_parser; /* Special handling for the first token or line in the file. The first thing in the file might be #pragma GCC pch_preprocess, which loads a PCH file, which is a GC collection point. So we need to handle this first pragma without benefit of an existing lexer structure. Always returns one token to the caller in *FIRST_TOKEN. This is either the true first token of the file, or the first token after the initial pragma. */ static void cp_parser_initial_pragma (cp_token *first_token) { tree name = NULL; cp_lexer_get_preprocessor_token (NULL, first_token); if (first_token->pragma_kind != PRAGMA_GCC_PCH_PREPROCESS) return; cp_lexer_get_preprocessor_token (NULL, first_token); if (first_token->type == CPP_STRING) { name = first_token->u.value; cp_lexer_get_preprocessor_token (NULL, first_token); if (first_token->type != CPP_PRAGMA_EOL) error_at (first_token->location, "junk at end of %<#pragma GCC pch_preprocess%>"); } else error_at (first_token->location, "expected string literal"); /* Skip to the end of the pragma. */ while (first_token->type != CPP_PRAGMA_EOL && first_token->type != CPP_EOF) cp_lexer_get_preprocessor_token (NULL, first_token); /* Now actually load the PCH file. */ if (name) c_common_pch_pragma (parse_in, TREE_STRING_POINTER (name)); /* Read one more token to return to our caller. We have to do this after reading the PCH file in, since its pointers have to be live. */ cp_lexer_get_preprocessor_token (NULL, first_token); } /* Parses the grainsize pragma for the _Cilk_for statement. Syntax: #pragma cilk grainsize = <VALUE>. */ static void cp_parser_cilk_grainsize (cp_parser *parser, cp_token *pragma_tok) { if (cp_parser_require (parser, CPP_EQ, RT_EQ)) { tree exp = cp_parser_binary_expression (parser, false, false, PREC_NOT_OPERATOR, NULL); cp_parser_skip_to_pragma_eol (parser, pragma_tok); if (!exp || exp == error_mark_node) { error_at (pragma_tok->location, "invalid grainsize for _Cilk_for"); return; } /* Make sure the next token is _Cilk_for, it is invalid otherwise. */ if (cp_lexer_next_token_is_keyword (parser->lexer, RID_CILK_FOR)) cp_parser_cilk_for (parser, exp); else warning_at (cp_lexer_peek_token (parser->lexer)->location, 0, "%<#pragma cilk grainsize%> is not followed by " "%<_Cilk_for%>"); return; } cp_parser_skip_to_pragma_eol (parser, pragma_tok); } /* Normal parsing of a pragma token. Here we can (and must) use the regular lexer. */ static bool cp_parser_pragma (cp_parser *parser, enum pragma_context context) { cp_token *pragma_tok; unsigned int id; pragma_tok = cp_lexer_consume_token (parser->lexer); gcc_assert (pragma_tok->type == CPP_PRAGMA); parser->lexer->in_pragma = true; id = pragma_tok->pragma_kind; if (id != PRAGMA_OMP_DECLARE_REDUCTION) cp_ensure_no_omp_declare_simd (parser); switch (id) { case PRAGMA_GCC_PCH_PREPROCESS: error_at (pragma_tok->location, "%<#pragma GCC pch_preprocess%> must be first"); break; case PRAGMA_OMP_BARRIER: switch (context) { case pragma_compound: cp_parser_omp_barrier (parser, pragma_tok); return false; case pragma_stmt: error_at (pragma_tok->location, "%<#pragma omp barrier%> may only be " "used in compound statements"); break; default: goto bad_stmt; } break; case PRAGMA_OMP_FLUSH: switch (context) { case pragma_compound: cp_parser_omp_flush (parser, pragma_tok); return false; case pragma_stmt: error_at (pragma_tok->location, "%<#pragma omp flush%> may only be " "used in compound statements"); break; default: goto bad_stmt; } break; case PRAGMA_OMP_TASKWAIT: switch (context) { case pragma_compound: cp_parser_omp_taskwait (parser, pragma_tok); return false; case pragma_stmt: error_at (pragma_tok->location, "%<#pragma omp taskwait%> may only be " "used in compound statements"); break; default: goto bad_stmt; } break; case PRAGMA_OMP_TASKYIELD: switch (context) { case pragma_compound: cp_parser_omp_taskyield (parser, pragma_tok); return false; case pragma_stmt: error_at (pragma_tok->location, "%<#pragma omp taskyield%> may only be " "used in compound statements"); break; default: goto bad_stmt; } break; case PRAGMA_OMP_CANCEL: switch (context) { case pragma_compound: cp_parser_omp_cancel (parser, pragma_tok); return false; case pragma_stmt: error_at (pragma_tok->location, "%<#pragma omp cancel%> may only be " "used in compound statements"); break; default: goto bad_stmt; } break; case PRAGMA_OMP_CANCELLATION_POINT: switch (context) { case pragma_compound: cp_parser_omp_cancellation_point (parser, pragma_tok); return false; case pragma_stmt: error_at (pragma_tok->location, "%<#pragma omp cancellation point%> may only be " "used in compound statements"); break; default: goto bad_stmt; } break; case PRAGMA_OMP_THREADPRIVATE: cp_parser_omp_threadprivate (parser, pragma_tok); return false; case PRAGMA_OMP_DECLARE_REDUCTION: cp_parser_omp_declare (parser, pragma_tok, context); return false; case PRAGMA_OACC_CACHE: case PRAGMA_OACC_DATA: case PRAGMA_OACC_ENTER_DATA: case PRAGMA_OACC_EXIT_DATA: case PRAGMA_OACC_KERNELS: case PRAGMA_OACC_PARALLEL: case PRAGMA_OACC_LOOP: case PRAGMA_OACC_UPDATE: case PRAGMA_OACC_WAIT: case PRAGMA_OMP_ATOMIC: case PRAGMA_OMP_CRITICAL: case PRAGMA_OMP_DISTRIBUTE: case PRAGMA_OMP_FOR: case PRAGMA_OMP_MASTER: case PRAGMA_OMP_ORDERED: case PRAGMA_OMP_PARALLEL: case PRAGMA_OMP_SECTIONS: case PRAGMA_OMP_SIMD: case PRAGMA_OMP_SINGLE: case PRAGMA_OMP_TASK: case PRAGMA_OMP_TASKGROUP: case PRAGMA_OMP_TEAMS: if (context != pragma_stmt && context != pragma_compound) goto bad_stmt; cp_parser_omp_construct (parser, pragma_tok); return true; case PRAGMA_OMP_TARGET: return cp_parser_omp_target (parser, pragma_tok, context); case PRAGMA_OMP_END_DECLARE_TARGET: cp_parser_omp_end_declare_target (parser, pragma_tok); return false; case PRAGMA_OMP_SECTION: error_at (pragma_tok->location, "%<#pragma omp section%> may only be used in " "%<#pragma omp sections%> construct"); break; case PRAGMA_IVDEP: { if (context == pragma_external) { error_at (pragma_tok->location, "%<#pragma GCC ivdep%> must be inside a function"); break; } cp_parser_skip_to_pragma_eol (parser, pragma_tok); cp_token *tok; tok = cp_lexer_peek_token (the_parser->lexer); if (tok->type != CPP_KEYWORD || (tok->keyword != RID_FOR && tok->keyword != RID_WHILE && tok->keyword != RID_DO)) { cp_parser_error (parser, "for, while or do statement expected"); return false; } cp_parser_iteration_statement (parser, true); return true; } case PRAGMA_CILK_SIMD: if (context == pragma_external) { error_at (pragma_tok->location, "%<#pragma simd%> must be inside a function"); break; } cp_parser_cilk_simd (parser, pragma_tok); return true; case PRAGMA_CILK_GRAINSIZE: if (context == pragma_external) { error_at (pragma_tok->location, "%<#pragma cilk grainsize%> must be inside a function"); break; } /* Ignore the pragma if Cilk Plus is not enabled. */ if (flag_cilkplus) { cp_parser_cilk_grainsize (parser, pragma_tok); return true; } else { error_at (pragma_tok->location, "-fcilkplus must be enabled to use " "%<#pragma cilk grainsize%>"); break; } default: gcc_assert (id >= PRAGMA_FIRST_EXTERNAL); c_invoke_pragma_handler (id); break; bad_stmt: cp_parser_error (parser, "expected declaration specifiers"); break; } cp_parser_skip_to_pragma_eol (parser, pragma_tok); return false; } /* The interface the pragma parsers have to the lexer. */ enum cpp_ttype pragma_lex (tree *value) { cp_token *tok; enum cpp_ttype ret; tok = cp_lexer_peek_token (the_parser->lexer); ret = tok->type; *value = tok->u.value; if (ret == CPP_PRAGMA_EOL || ret == CPP_EOF) ret = CPP_EOF; else if (ret == CPP_STRING) *value = cp_parser_string_literal (the_parser, false, false); else { cp_lexer_consume_token (the_parser->lexer); if (ret == CPP_KEYWORD) ret = CPP_NAME; } return ret; } /* External interface. */ /* Parse one entire translation unit. */ void c_parse_file (void) { static bool already_called = false; if (already_called) fatal_error (input_location, "inter-module optimizations not implemented for C++"); already_called = true; the_parser = cp_parser_new (); push_deferring_access_checks (flag_access_control ? dk_no_deferred : dk_no_check); cp_parser_translation_unit (the_parser); the_parser = NULL; } /* Parses the Cilk Plus #pragma simd and SIMD-enabled function attribute's vectorlength clause: Syntax: vectorlength ( constant-expression ) */ static tree cp_parser_cilk_simd_vectorlength (cp_parser *parser, tree clauses, bool is_simd_fn) { location_t loc = cp_lexer_peek_token (parser->lexer)->location; tree expr; /* The vectorlength clause in #pragma simd behaves exactly like OpenMP's safelen clause. Thus, vectorlength is represented as OMP 4.0 safelen. For SIMD-enabled function it is represented by OMP 4.0 simdlen. */ if (!is_simd_fn) check_no_duplicate_clause (clauses, OMP_CLAUSE_SAFELEN, "vectorlength", loc); else check_no_duplicate_clause (clauses, OMP_CLAUSE_SIMDLEN, "vectorlength", loc); if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return error_mark_node; expr = cp_parser_constant_expression (parser); expr = maybe_constant_value (expr); /* If expr == error_mark_node, then don't emit any errors nor create a clause. if any of the above functions returns error mark node then they would have emitted an error message. */ if (expr == error_mark_node) ; else if (!TREE_TYPE (expr) || !TREE_CONSTANT (expr) || !INTEGRAL_TYPE_P (TREE_TYPE (expr))) error_at (loc, "vectorlength must be an integer constant"); else if (TREE_CONSTANT (expr) && exact_log2 (TREE_INT_CST_LOW (expr)) == -1) error_at (loc, "vectorlength must be a power of 2"); else { tree c; if (!is_simd_fn) { c = build_omp_clause (loc, OMP_CLAUSE_SAFELEN); OMP_CLAUSE_SAFELEN_EXPR (c) = expr; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } else { c = build_omp_clause (loc, OMP_CLAUSE_SIMDLEN); OMP_CLAUSE_SIMDLEN_EXPR (c) = expr; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } } if (!cp_parser_require (parser, CPP_CLOSE_PAREN, RT_CLOSE_PAREN)) return error_mark_node; return clauses; } /* Handles the Cilk Plus #pragma simd linear clause. Syntax: linear ( simd-linear-variable-list ) simd-linear-variable-list: simd-linear-variable simd-linear-variable-list , simd-linear-variable simd-linear-variable: id-expression id-expression : simd-linear-step simd-linear-step: conditional-expression */ static tree cp_parser_cilk_simd_linear (cp_parser *parser, tree clauses) { location_t loc = cp_lexer_peek_token (parser->lexer)->location; if (!cp_parser_require (parser, CPP_OPEN_PAREN, RT_OPEN_PAREN)) return clauses; if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME)) { cp_parser_error (parser, "expected identifier"); cp_parser_skip_to_closing_parenthesis (parser, false, false, true); return error_mark_node; } bool saved_colon_corrects_to_scope_p = parser->colon_corrects_to_scope_p; parser->colon_corrects_to_scope_p = false; while (1) { cp_token *token = cp_lexer_peek_token (parser->lexer); if (cp_lexer_next_token_is_not (parser->lexer, CPP_NAME)) { cp_parser_error (parser, "expected variable-name"); clauses = error_mark_node; break; } tree var_name = cp_parser_id_expression (parser, false, true, NULL, false, false); tree decl = cp_parser_lookup_name_simple (parser, var_name, token->location); if (decl == error_mark_node) { cp_parser_name_lookup_error (parser, var_name, decl, NLE_NULL, token->location); clauses = error_mark_node; } else { tree e = NULL_TREE; tree step_size = integer_one_node; /* If present, parse the linear step. Otherwise, assume the default value of 1. */ if (cp_lexer_peek_token (parser->lexer)->type == CPP_COLON) { cp_lexer_consume_token (parser->lexer); e = cp_parser_assignment_expression (parser); e = maybe_constant_value (e); if (e == error_mark_node) { /* If an error has occurred, then the whole pragma is considered ill-formed. Thus, no reason to keep parsing. */ clauses = error_mark_node; break; } else if (type_dependent_expression_p (e) || value_dependent_expression_p (e) || (TREE_TYPE (e) && INTEGRAL_TYPE_P (TREE_TYPE (e)) && (TREE_CONSTANT (e) || DECL_P (e)))) step_size = e; else cp_parser_error (parser, "step size must be an integer constant " "expression or an integer variable"); } /* Use the OMP_CLAUSE_LINEAR, which has the same semantics. */ tree l = build_omp_clause (loc, OMP_CLAUSE_LINEAR); OMP_CLAUSE_DECL (l) = decl; OMP_CLAUSE_LINEAR_STEP (l) = step_size; OMP_CLAUSE_CHAIN (l) = clauses; clauses = l; } if (cp_lexer_next_token_is (parser->lexer, CPP_COMMA)) cp_lexer_consume_token (parser->lexer); else if (cp_lexer_next_token_is (parser->lexer, CPP_CLOSE_PAREN)) break; else { error_at (cp_lexer_peek_token (parser->lexer)->location, "expected %<,%> or %<)%> after %qE", decl); clauses = error_mark_node; break; } } parser->colon_corrects_to_scope_p = saved_colon_corrects_to_scope_p; cp_parser_skip_to_closing_parenthesis (parser, false, false, true); return clauses; } /* Returns the name of the next clause. If the clause is not recognized, then PRAGMA_CILK_CLAUSE_NONE is returned and the next token is not consumed. Otherwise, the appropriate enum from the pragma_simd_clause is returned and the token is consumed. */ static pragma_omp_clause cp_parser_cilk_simd_clause_name (cp_parser *parser) { pragma_omp_clause clause_type; cp_token *token = cp_lexer_peek_token (parser->lexer); if (token->keyword == RID_PRIVATE) clause_type = PRAGMA_CILK_CLAUSE_PRIVATE; else if (!token->u.value || token->type != CPP_NAME) return PRAGMA_CILK_CLAUSE_NONE; else if (!strcmp (IDENTIFIER_POINTER (token->u.value), "vectorlength")) clause_type = PRAGMA_CILK_CLAUSE_VECTORLENGTH; else if (!strcmp (IDENTIFIER_POINTER (token->u.value), "linear")) clause_type = PRAGMA_CILK_CLAUSE_LINEAR; else if (!strcmp (IDENTIFIER_POINTER (token->u.value), "firstprivate")) clause_type = PRAGMA_CILK_CLAUSE_FIRSTPRIVATE; else if (!strcmp (IDENTIFIER_POINTER (token->u.value), "lastprivate")) clause_type = PRAGMA_CILK_CLAUSE_LASTPRIVATE; else if (!strcmp (IDENTIFIER_POINTER (token->u.value), "reduction")) clause_type = PRAGMA_CILK_CLAUSE_REDUCTION; else return PRAGMA_CILK_CLAUSE_NONE; cp_lexer_consume_token (parser->lexer); return clause_type; } /* Parses all the #pragma simd clauses. Returns a list of clauses found. */ static tree cp_parser_cilk_simd_all_clauses (cp_parser *parser, cp_token *pragma_token) { tree clauses = NULL_TREE; while (cp_lexer_next_token_is_not (parser->lexer, CPP_PRAGMA_EOL) && clauses != error_mark_node) { pragma_omp_clause c_kind; c_kind = cp_parser_cilk_simd_clause_name (parser); if (c_kind == PRAGMA_CILK_CLAUSE_VECTORLENGTH) clauses = cp_parser_cilk_simd_vectorlength (parser, clauses, false); else if (c_kind == PRAGMA_CILK_CLAUSE_LINEAR) clauses = cp_parser_cilk_simd_linear (parser, clauses); else if (c_kind == PRAGMA_CILK_CLAUSE_PRIVATE) /* Use the OpenMP 4.0 equivalent function. */ clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_PRIVATE, clauses); else if (c_kind == PRAGMA_CILK_CLAUSE_FIRSTPRIVATE) /* Use the OpenMP 4.0 equivalent function. */ clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_FIRSTPRIVATE, clauses); else if (c_kind == PRAGMA_CILK_CLAUSE_LASTPRIVATE) /* Use the OMP 4.0 equivalent function. */ clauses = cp_parser_omp_var_list (parser, OMP_CLAUSE_LASTPRIVATE, clauses); else if (c_kind == PRAGMA_CILK_CLAUSE_REDUCTION) /* Use the OMP 4.0 equivalent function. */ clauses = cp_parser_omp_clause_reduction (parser, clauses); else { clauses = error_mark_node; cp_parser_error (parser, "expected %<#pragma simd%> clause"); break; } } cp_parser_skip_to_pragma_eol (parser, pragma_token); if (clauses == error_mark_node) return error_mark_node; else return c_finish_cilk_clauses (clauses); } /* Main entry-point for parsing Cilk Plus <#pragma simd> for loops. */ static void cp_parser_cilk_simd (cp_parser *parser, cp_token *pragma_token) { tree clauses = cp_parser_cilk_simd_all_clauses (parser, pragma_token); if (clauses == error_mark_node) return; if (cp_lexer_next_token_is_not_keyword (parser->lexer, RID_FOR)) { error_at (cp_lexer_peek_token (parser->lexer)->location, "for statement expected"); return; } tree sb = begin_omp_structured_block (); int save = cp_parser_begin_omp_structured_block (parser); tree ret = cp_parser_omp_for_loop (parser, CILK_SIMD, clauses, NULL); if (ret) cpp_validate_cilk_plus_loop (OMP_FOR_BODY (ret)); cp_parser_end_omp_structured_block (parser, save); add_stmt (finish_omp_structured_block (sb)); } /* Main entry-point for parsing Cilk Plus _Cilk_for loops. The return value is error_mark_node when errors happen and CILK_FOR tree on success. */ static tree cp_parser_cilk_for (cp_parser *parser, tree grain) { if (cp_lexer_next_token_is_not_keyword (parser->lexer, RID_CILK_FOR)) gcc_unreachable (); tree sb = begin_omp_structured_block (); int save = cp_parser_begin_omp_structured_block (parser); tree clauses = build_omp_clause (EXPR_LOCATION (grain), OMP_CLAUSE_SCHEDULE); OMP_CLAUSE_SCHEDULE_KIND (clauses) = OMP_CLAUSE_SCHEDULE_CILKFOR; OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (clauses) = grain; clauses = finish_omp_clauses (clauses); tree ret = cp_parser_omp_for_loop (parser, CILK_FOR, clauses, NULL); if (ret) cpp_validate_cilk_plus_loop (ret); else ret = error_mark_node; cp_parser_end_omp_structured_block (parser, save); add_stmt (finish_omp_structured_block (sb)); return ret; } /* Create an identifier for a generic parameter type (a synthesized template parameter implied by `auto' or a concept identifier). */ static GTY(()) int generic_parm_count; static tree make_generic_type_name () { char buf[32]; sprintf (buf, "auto:%d", ++generic_parm_count); return get_identifier (buf); } /* Predicate that behaves as is_auto_or_concept but matches the parent node of the generic type rather than the generic type itself. This allows for type transformation in add_implicit_template_parms. */ static inline bool tree_type_is_auto_or_concept (const_tree t) { return TREE_TYPE (t) && is_auto_or_concept (TREE_TYPE (t)); } /* Add an implicit template type parameter to the CURRENT_TEMPLATE_PARMS (creating a new template parameter list if necessary). Returns the newly created template type parm. */ tree synthesize_implicit_template_parm (cp_parser *parser) { gcc_assert (current_binding_level->kind == sk_function_parms); /* We are either continuing a function template that already contains implicit template parameters, creating a new fully-implicit function template, or extending an existing explicit function template with implicit template parameters. */ cp_binding_level *const entry_scope = current_binding_level; bool become_template = false; cp_binding_level *parent_scope = 0; if (parser->implicit_template_scope) { gcc_assert (parser->implicit_template_parms); current_binding_level = parser->implicit_template_scope; } else { /* Roll back to the existing template parameter scope (in the case of extending an explicit function template) or introduce a new template parameter scope ahead of the function parameter scope (or class scope in the case of out-of-line member definitions). The function scope is added back after template parameter synthesis below. */ cp_binding_level *scope = entry_scope; while (scope->kind == sk_function_parms) { parent_scope = scope; scope = scope->level_chain; } if (current_class_type && !LAMBDA_TYPE_P (current_class_type)) { /* If not defining a class, then any class scope is a scope level in an out-of-line member definition. In this case simply wind back beyond the first such scope to inject the template parameter list. Otherwise wind back to the class being defined. The latter can occur in class member friend declarations such as: class A { void foo (auto); }; class B { friend void A::foo (auto); }; The template parameter list synthesized for the friend declaration must be injected in the scope of 'B'. This can also occur in erroneous cases such as: struct A { struct B { void foo (auto); }; void B::foo (auto) {} }; Here the attempted definition of 'B::foo' within 'A' is ill-formed but, nevertheless, the template parameter list synthesized for the declarator should be injected into the scope of 'A' as if the ill-formed template was specified explicitly. */ while (scope->kind == sk_class && !scope->defining_class_p) { parent_scope = scope; scope = scope->level_chain; } } current_binding_level = scope; if (scope->kind != sk_template_parms || !function_being_declared_is_template_p (parser)) { /* Introduce a new template parameter list for implicit template parameters. */ become_template = true; parser->implicit_template_scope = begin_scope (sk_template_parms, NULL); ++processing_template_decl; parser->fully_implicit_function_template_p = true; ++parser->num_template_parameter_lists; } else { /* Synthesize implicit template parameters at the end of the explicit template parameter list. */ gcc_assert (current_template_parms); parser->implicit_template_scope = scope; tree v = INNERMOST_TEMPLATE_PARMS (current_template_parms); parser->implicit_template_parms = TREE_VEC_ELT (v, TREE_VEC_LENGTH (v) - 1); } } /* Synthesize a new template parameter and track the current template parameter chain with implicit_template_parms. */ tree synth_id = make_generic_type_name (); tree synth_tmpl_parm = finish_template_type_parm (class_type_node, synth_id); tree new_parm = process_template_parm (parser->implicit_template_parms, input_location, build_tree_list (NULL_TREE, synth_tmpl_parm), /*non_type=*/false, /*param_pack=*/false); if (parser->implicit_template_parms) parser->implicit_template_parms = TREE_CHAIN (parser->implicit_template_parms); else parser->implicit_template_parms = new_parm; tree new_type = TREE_TYPE (getdecls ()); /* If creating a fully implicit function template, start the new implicit template parameter list with this synthesized type, otherwise grow the current template parameter list. */ if (become_template) { parent_scope->level_chain = current_binding_level; tree new_parms = make_tree_vec (1); TREE_VEC_ELT (new_parms, 0) = parser->implicit_template_parms; current_template_parms = tree_cons (size_int (processing_template_decl), new_parms, current_template_parms); } else { tree& new_parms = INNERMOST_TEMPLATE_PARMS (current_template_parms); int new_parm_idx = TREE_VEC_LENGTH (new_parms); new_parms = grow_tree_vec (new_parms, new_parm_idx + 1); TREE_VEC_ELT (new_parms, new_parm_idx) = parser->implicit_template_parms; } current_binding_level = entry_scope; return new_type; } /* Finish the declaration of a fully implicit function template. Such a template has no explicit template parameter list so has not been through the normal template head and tail processing. synthesize_implicit_template_parm tries to do the head; this tries to do the tail. MEMBER_DECL_OPT should be provided if the declaration is a class member such that its template declaration can be completed. If MEMBER_DECL_OPT is provided the finished form is returned. Otherwise NULL_TREE is returned. */ tree finish_fully_implicit_template (cp_parser *parser, tree member_decl_opt) { gcc_assert (parser->fully_implicit_function_template_p); if (member_decl_opt && member_decl_opt != error_mark_node && DECL_VIRTUAL_P (member_decl_opt)) { error_at (DECL_SOURCE_LOCATION (member_decl_opt), "implicit templates may not be %<virtual%>"); DECL_VIRTUAL_P (member_decl_opt) = false; } if (member_decl_opt) member_decl_opt = finish_member_template_decl (member_decl_opt); end_template_decl (); parser->fully_implicit_function_template_p = false; --parser->num_template_parameter_lists; return member_decl_opt; } #include "gt-cp-parser.h"
openmp_parallel_for.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> /* simple test of parallel for on openmp */ int main (int argc, char *argv[]) { int n = 7; //shared int i; //private #pragma omp parallel default(shared) private(i) { #pragma omp single { printf("thread_num: %d\n", omp_get_num_threads()); } #pragma omp for schedule(static) for (i = 0; i < n; i++) { printf("iteration: %d, thread: %d\n", i, omp_get_thread_num()); } } return 0; }
trsm_x_sky_u_hi_col.c
#include "alphasparse/kernel.h" #include "alphasparse/util.h" #include "alphasparse/opt.h" #include <memory.h> #ifdef _OPENMP #include <omp.h> #endif alphasparse_status_t ONAME(const ALPHA_Number alpha, const ALPHA_SPMAT_SKY *A, const ALPHA_Number *x, const ALPHA_INT columns, const ALPHA_INT ldx, ALPHA_Number *y, const ALPHA_INT ldy) { int num_thread = alpha_get_thread_num(); #ifdef _OPENMP #pragma omp parallel for num_threads(num_thread) #endif for(ALPHA_INT out_y_col = 0; out_y_col < columns; out_y_col++) { for (ALPHA_INT c = A->cols - 1; c >= 0; c--) { ALPHA_Number temp; alpha_setzero(temp); for (ALPHA_INT ic = A->cols - 1; ic > c; ic--) { ALPHA_INT start = A->pointers[ic]; ALPHA_INT end = A->pointers[ic + 1]; ALPHA_INT eles_num = ic - c; if(end - eles_num - 1 >= start) alpha_madde(temp, A->values[end - eles_num - 1], y[out_y_col * ldy + ic]); } ALPHA_Number t; alpha_mul(t, alpha, x[out_y_col * ldx + c]); alpha_sub(y[out_y_col * ldy + c], t, temp); } } return ALPHA_SPARSE_STATUS_SUCCESS; }
RelativeNeighborhoodGraph.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_COMMON_RNG_H_ #define _SPTAG_COMMON_RNG_H_ #include "NeighborhoodGraph.h" namespace SPTAG { namespace COMMON { class RelativeNeighborhoodGraph: public NeighborhoodGraph { public: void RebuildNeighbors(VectorIndex* index, const SizeType node, SizeType* nodes, const BasicResult* queryResults, const int numResults) { DimensionType count = 0; for (int j = 0; j < numResults && count < m_iNeighborhoodSize; j++) { const BasicResult& item = queryResults[j]; if (item.VID < 0) break; if (item.VID == node) continue; bool good = true; for (DimensionType k = 0; k < count; k++) { if (index->ComputeDistance(index->GetSample(nodes[k]), index->GetSample(item.VID)) <= item.Dist) { good = false; break; } } if (good) nodes[count++] = item.VID; } for (DimensionType j = count; j < m_iNeighborhoodSize; j++) nodes[j] = -1; } void InsertNeighbors(VectorIndex* index, const SizeType node, SizeType insertNode, float insertDist) { SizeType* nodes = m_pNeighborhoodGraph[node]; for (DimensionType k = 0; k < m_iNeighborhoodSize; k++) { SizeType tmpNode = nodes[k]; if (tmpNode < -1) continue; if (tmpNode < 0) { bool good = true; for (DimensionType t = 0; t < k; t++) { if (index->ComputeDistance(index->GetSample(insertNode), index->GetSample(nodes[t])) < insertDist) { good = false; break; } } if (good) { nodes[k] = insertNode; } break; } float tmpDist = index->ComputeDistance(index->GetSample(node), index->GetSample(tmpNode)); if (insertDist < tmpDist || (insertDist == tmpDist && insertNode < tmpNode)) { bool good = true; for (DimensionType t = 0; t < k; t++) { if (index->ComputeDistance(index->GetSample(insertNode), index->GetSample(nodes[t])) < insertDist) { good = false; break; } } if (good) { nodes[k] = insertNode; insertNode = tmpNode; insertDist = tmpDist; } else { break; } } } } float GraphAccuracyEstimation(VectorIndex* index, const SizeType samples, const std::unordered_map<SizeType, SizeType>* idmap = nullptr) { DimensionType* correct = new DimensionType[samples]; #pragma omp parallel for schedule(dynamic) for (SizeType i = 0; i < samples; i++) { SizeType x = COMMON::Utils::rand(m_iGraphSize); //int x = i; COMMON::QueryResultSet<void> query(nullptr, m_iCEF); for (SizeType y = 0; y < m_iGraphSize; y++) { if ((idmap != nullptr && idmap->find(y) != idmap->end())) continue; float dist = index->ComputeDistance(index->GetSample(x), index->GetSample(y)); query.AddPoint(y, dist); } query.SortResult(); SizeType * exact_rng = new SizeType[m_iNeighborhoodSize]; RebuildNeighbors(index, x, exact_rng, query.GetResults(), m_iCEF); correct[i] = 0; for (DimensionType j = 0; j < m_iNeighborhoodSize; j++) { if (exact_rng[j] == -1) { correct[i] += m_iNeighborhoodSize - j; break; } for (DimensionType k = 0; k < m_iNeighborhoodSize; k++) if ((m_pNeighborhoodGraph)[x][k] == exact_rng[j]) { correct[i]++; break; } } delete[] exact_rng; } float acc = 0; for (SizeType i = 0; i < samples; i++) acc += float(correct[i]); acc = acc / samples / m_iNeighborhoodSize; delete[] correct; return acc; } }; } } #endif
cache.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC AAA CCCC H H EEEEE % % C A A C H H E % % C AAAAA C HHHHH EEE % % C A A C H H E % % CCCC A A CCCC H H EEEEE % % % % % % MagickCore Pixel Cache Methods % % % % Software Design % % Cristy % % July 1999 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/distribute-cache-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/nt-base-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/quantum.h" #include "MagickCore/random_.h" #include "MagickCore/registry.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/timer-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #if defined(MAGICKCORE_ZLIB_DELEGATE) #include "zlib.h" #endif /* Define declarations. */ #define CacheTick(offset,extent) QuantumTick((MagickOffsetType) offset,extent) #define IsFileDescriptorLimitExceeded() (GetMagickResource(FileResource) > \ GetMagickResourceLimit(FileResource) ? MagickTrue : MagickFalse) /* Typedef declarations. */ typedef struct _MagickModulo { ssize_t quotient, remainder; } MagickModulo; /* Forward declarations. */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static Cache GetImagePixelCache(Image *,const MagickBooleanType,ExceptionInfo *) magick_hot_spot; static const Quantum *GetVirtualPixelCache(const Image *,const VirtualPixelMethod,const ssize_t, const ssize_t,const size_t,const size_t,ExceptionInfo *), *GetVirtualPixelsCache(const Image *); static const void *GetVirtualMetacontentFromCache(const Image *); static MagickBooleanType GetOneAuthenticPixelFromCache(Image *,const ssize_t,const ssize_t,Quantum *, ExceptionInfo *), GetOneVirtualPixelFromCache(const Image *,const VirtualPixelMethod, const ssize_t,const ssize_t,Quantum *,ExceptionInfo *), OpenPixelCache(Image *,const MapMode,ExceptionInfo *), OpenPixelCacheOnDisk(CacheInfo *,const MapMode), ReadPixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), ReadPixelCacheMetacontent(CacheInfo *magick_restrict, NexusInfo *magick_restrict,ExceptionInfo *), SyncAuthenticPixelsCache(Image *,ExceptionInfo *), WritePixelCachePixels(CacheInfo *magick_restrict,NexusInfo *magick_restrict, ExceptionInfo *), WritePixelCacheMetacontent(CacheInfo *,NexusInfo *magick_restrict, ExceptionInfo *); static Quantum *GetAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *QueueAuthenticPixelsCache(Image *,const ssize_t,const ssize_t,const size_t, const size_t,ExceptionInfo *), *SetPixelCacheNexusPixels(const CacheInfo *magick_restrict,const MapMode, const ssize_t,const ssize_t,const size_t,const size_t, const MagickBooleanType,NexusInfo *magick_restrict,ExceptionInfo *) magick_hot_spot; #if defined(MAGICKCORE_OPENCL_SUPPORT) static void CopyOpenCLBuffer(CacheInfo *magick_restrict); #endif #if defined(__cplusplus) || defined(c_plusplus) } #endif /* Global declarations. */ static SemaphoreInfo *cache_semaphore = (SemaphoreInfo *) NULL; static ssize_t cache_anonymous_memory = (-1); static time_t cache_epoch = 0; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + A c q u i r e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCache() acquires a pixel cache. % % The format of the AcquirePixelCache() method is: % % Cache AcquirePixelCache(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate Cache AcquirePixelCache(const size_t number_threads) { CacheInfo *magick_restrict cache_info; char *value; cache_info=(CacheInfo *) AcquireAlignedMemory(1,sizeof(*cache_info)); if (cache_info == (CacheInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(cache_info,0,sizeof(*cache_info)); cache_info->type=UndefinedCache; cache_info->mode=IOMode; cache_info->disk_mode=IOMode; cache_info->colorspace=sRGBColorspace; cache_info->file=(-1); cache_info->id=GetMagickThreadId(); cache_info->number_threads=number_threads; if (GetOpenMPMaximumThreads() > cache_info->number_threads) cache_info->number_threads=GetOpenMPMaximumThreads(); if (GetMagickResourceLimit(ThreadResource) > cache_info->number_threads) cache_info->number_threads=(size_t) GetMagickResourceLimit(ThreadResource); if (cache_info->number_threads == 0) cache_info->number_threads=1; cache_info->nexus_info=AcquirePixelCacheNexus(cache_info->number_threads); if (cache_info->nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); value=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } value=GetPolicyValue("cache:synchronize"); if (value != (const char *) NULL) { cache_info->synchronize=IsStringTrue(value); value=DestroyString(value); } cache_info->width_limit=GetMagickResourceLimit(WidthResource); cache_info->height_limit=GetMagickResourceLimit(HeightResource); cache_info->semaphore=AcquireSemaphoreInfo(); cache_info->reference_count=1; cache_info->file_semaphore=AcquireSemaphoreInfo(); cache_info->debug=IsEventLogging(); cache_info->signature=MagickCoreSignature; return((Cache ) cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCacheNexus() allocates the NexusInfo structure. % % The format of the AcquirePixelCacheNexus method is: % % NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) % % A description of each parameter follows: % % o number_threads: the number of nexus threads. % */ MagickPrivate NexusInfo **AcquirePixelCacheNexus(const size_t number_threads) { NexusInfo **magick_restrict nexus_info; register ssize_t i; nexus_info=(NexusInfo **) MagickAssumeAligned(AcquireAlignedMemory(2* number_threads,sizeof(*nexus_info))); if (nexus_info == (NexusInfo **) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *nexus_info=(NexusInfo *) AcquireQuantumMemory(2*number_threads, sizeof(**nexus_info)); if (*nexus_info == (NexusInfo *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) memset(*nexus_info,0,2*number_threads*sizeof(**nexus_info)); for (i=0; i < (ssize_t) (2*number_threads); i++) { nexus_info[i]=(*nexus_info+i); if (i < (ssize_t) number_threads) nexus_info[i]->virtual_nexus=(*nexus_info+number_threads+i); nexus_info[i]->signature=MagickCoreSignature; } return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixelCachePixels() returns the pixels associated with the specified % image. % % The format of the AcquirePixelCachePixels() method is: % % void *AcquirePixelCachePixels(const Image *image,size_t *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *AcquirePixelCachePixels(const Image *image,size_t *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=0; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); *length=(size_t) cache_info->length; return(cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentGenesis() instantiates the cache component. % % The format of the CacheComponentGenesis method is: % % MagickBooleanType CacheComponentGenesis(void) % */ MagickPrivate MagickBooleanType CacheComponentGenesis(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) cache_semaphore=AcquireSemaphoreInfo(); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C a c h e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CacheComponentTerminus() destroys the cache component. % % The format of the CacheComponentTerminus() method is: % % CacheComponentTerminus(void) % */ MagickPrivate void CacheComponentTerminus(void) { if (cache_semaphore == (SemaphoreInfo *) NULL) ActivateSemaphoreInfo(&cache_semaphore); /* no op-- nothing to destroy */ RelinquishSemaphoreInfo(&cache_semaphore); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l i p P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPixelCacheNexus() clips the cache nexus as defined by the image clip % mask. The method returns MagickTrue if the pixel region is clipped, % otherwise MagickFalse. % % The format of the ClipPixelCacheNexus() method is: % % MagickBooleanType ClipPixelCacheNexus(Image *image,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClipPixelCacheNexus(Image *image, NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickSizeType number_pixels; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t n; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & WriteMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; for (n=0; n < (ssize_t) number_pixels; n++) { double mask_alpha; register ssize_t i; if (p == (Quantum *) NULL) break; mask_alpha=QuantumScale*GetPixelWriteMask(image,p); if (fabs(mask_alpha) >= MagickEpsilon) { for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampToQuantum(MagickOver_((double) p[i],mask_alpha* GetPixelAlpha(image,p),(double) q[i],(double) GetPixelAlpha(image,q))); } SetPixelAlpha(image,GetPixelAlpha(image,p),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } return(n < (ssize_t) number_pixels ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCache() clones a pixel cache. % % The format of the ClonePixelCache() method is: % % Cache ClonePixelCache(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate Cache ClonePixelCache(const Cache cache) { CacheInfo *magick_restrict clone_info; const CacheInfo *magick_restrict cache_info; assert(cache != NULL); cache_info=(const CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); clone_info=(CacheInfo *) AcquirePixelCache(cache_info->number_threads); clone_info->virtual_pixel_method=cache_info->virtual_pixel_method; return((Cache ) clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheMethods() clones the pixel cache methods from one cache to % another. % % The format of the ClonePixelCacheMethods() method is: % % void ClonePixelCacheMethods(Cache clone,const Cache cache) % % A description of each parameter follows: % % o clone: Specifies a pointer to a Cache structure. % % o cache: the pixel cache. % */ MagickPrivate void ClonePixelCacheMethods(Cache clone,const Cache cache) { CacheInfo *magick_restrict cache_info, *magick_restrict source_info; assert(clone != (Cache) NULL); source_info=(CacheInfo *) clone; assert(source_info->signature == MagickCoreSignature); if (source_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", source_info->filename); assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); source_info->methods=cache_info->methods; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l o n e P i x e l C a c h e R e p o s i t o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClonePixelCacheRepository() clones the source pixel cache to the destination % cache. % % The format of the ClonePixelCacheRepository() method is: % % MagickBooleanType ClonePixelCacheRepository(CacheInfo *cache_info, % CacheInfo *source_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o source_info: the source pixel cache. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ClonePixelCacheOnDisk( CacheInfo *magick_restrict cache_info,CacheInfo *magick_restrict clone_info) { MagickSizeType extent; size_t quantum; ssize_t count; struct stat file_stats; unsigned char *buffer; /* Clone pixel cache on disk with identical morphology. */ if ((OpenPixelCacheOnDisk(cache_info,ReadMode) == MagickFalse) || (OpenPixelCacheOnDisk(clone_info,IOMode) == MagickFalse)) return(MagickFalse); if ((lseek(cache_info->file,0,SEEK_SET) < 0) || (lseek(clone_info->file,0,SEEK_SET) < 0)) return(MagickFalse); quantum=(size_t) MagickMaxBufferExtent; if ((fstat(cache_info->file,&file_stats) == 0) && (file_stats.st_size > 0)) quantum=(size_t) MagickMin(file_stats.st_size,MagickMaxBufferExtent); buffer=(unsigned char *) AcquireQuantumMemory(quantum,sizeof(*buffer)); if (buffer == (unsigned char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); extent=0; while ((count=read(cache_info->file,buffer,quantum)) > 0) { ssize_t number_bytes; number_bytes=write(clone_info->file,buffer,(size_t) count); if (number_bytes != count) break; extent+=number_bytes; } buffer=(unsigned char *) RelinquishMagickMemory(buffer); if (extent != cache_info->length) return(MagickFalse); return(MagickTrue); } static MagickBooleanType ClonePixelCacheRepository( CacheInfo *magick_restrict clone_info,CacheInfo *magick_restrict cache_info, ExceptionInfo *exception) { #define MaxCacheThreads ((size_t) GetMagickResourceLimit(ThreadResource)) #define cache_number_threads(source,destination,chunk,multithreaded) \ num_threads((multithreaded) == 0 ? 1 : \ (((source)->type != MemoryCache) && ((source)->type != MapCache)) || \ (((destination)->type != MemoryCache) && ((destination)->type != MapCache)) ? \ MagickMax(MagickMin(GetMagickResourceLimit(ThreadResource),2),1) : \ MagickMax(MagickMin((ssize_t) GetMagickResourceLimit(ThreadResource),(ssize_t) (chunk)/256),1)) MagickBooleanType optimize, status; NexusInfo **magick_restrict cache_nexus, **magick_restrict clone_nexus; size_t length; ssize_t y; assert(cache_info != (CacheInfo *) NULL); assert(clone_info != (CacheInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); if (cache_info->type == PingCache) return(MagickTrue); length=cache_info->number_channels*sizeof(*cache_info->channel_map); if ((cache_info->storage_class == clone_info->storage_class) && (cache_info->colorspace == clone_info->colorspace) && (cache_info->alpha_trait == clone_info->alpha_trait) && (cache_info->channels == clone_info->channels) && (cache_info->columns == clone_info->columns) && (cache_info->rows == clone_info->rows) && (cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) && (cache_info->metacontent_extent == clone_info->metacontent_extent)) { /* Identical pixel cache morphology. */ if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && ((clone_info->type == MemoryCache) || (clone_info->type == MapCache))) { (void) memcpy(clone_info->pixels,cache_info->pixels, cache_info->number_channels*cache_info->columns*cache_info->rows* sizeof(*cache_info->pixels)); if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) (void) memcpy(clone_info->metacontent,cache_info->metacontent, cache_info->columns*cache_info->rows* clone_info->metacontent_extent*sizeof(unsigned char)); return(MagickTrue); } if ((cache_info->type == DiskCache) && (clone_info->type == DiskCache)) return(ClonePixelCacheOnDisk(cache_info,clone_info)); } /* Mismatched pixel cache morphology. */ cache_nexus=AcquirePixelCacheNexus(cache_info->number_threads); clone_nexus=AcquirePixelCacheNexus(clone_info->number_threads); length=cache_info->number_channels*sizeof(*cache_info->channel_map); optimize=(cache_info->number_channels == clone_info->number_channels) && (memcmp(cache_info->channel_map,clone_info->channel_map,length) == 0) ? MagickTrue : MagickFalse; length=(size_t) MagickMin(cache_info->number_channels*cache_info->columns, clone_info->number_channels*clone_info->columns); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; register ssize_t x; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCachePixels(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; (void) memset(clone_nexus[id]->pixels,0,(size_t) clone_nexus[id]->length); if (optimize != MagickFalse) (void) memcpy(clone_nexus[id]->pixels,cache_nexus[id]->pixels,length* sizeof(Quantum)); else { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; /* Mismatched pixel channel map. */ p=cache_nexus[id]->pixels; q=clone_nexus[id]->pixels; for (x=0; x < (ssize_t) cache_info->columns; x++) { register ssize_t i; if (x == (ssize_t) clone_info->columns) break; for (i=0; i < (ssize_t) clone_info->number_channels; i++) { PixelChannel channel; PixelTrait traits; channel=clone_info->channel_map[i].channel; traits=cache_info->channel_map[channel].traits; if (traits != UndefinedPixelTrait) *q=*(p+cache_info->channel_map[channel].offset); q++; } p+=cache_info->number_channels; } } status=WritePixelCachePixels(clone_info,clone_nexus[id],exception); } if ((cache_info->metacontent_extent != 0) && (clone_info->metacontent_extent != 0)) { /* Clone metacontent. */ length=(size_t) MagickMin(cache_info->metacontent_extent, clone_info->metacontent_extent); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ cache_number_threads(cache_info,clone_info,cache_info->rows,1) #endif for (y=0; y < (ssize_t) cache_info->rows; y++) { const int id = GetOpenMPThreadId(); Quantum *pixels; if (status == MagickFalse) continue; if (y >= (ssize_t) clone_info->rows) continue; pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,0,y, cache_info->columns,1,MagickFalse,cache_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; status=ReadPixelCacheMetacontent(cache_info,cache_nexus[id],exception); if (status == MagickFalse) continue; pixels=SetPixelCacheNexusPixels(clone_info,WriteMode,0,y, clone_info->columns,1,MagickFalse,clone_nexus[id],exception); if (pixels == (Quantum *) NULL) continue; if ((clone_nexus[id]->metacontent != (void *) NULL) && (cache_nexus[id]->metacontent != (void *) NULL)) (void) memcpy(clone_nexus[id]->metacontent, cache_nexus[id]->metacontent,length*sizeof(unsigned char)); status=WritePixelCacheMetacontent(clone_info,clone_nexus[id],exception); } } clone_nexus=DestroyPixelCacheNexus(clone_nexus,clone_info->number_threads); cache_nexus=DestroyPixelCacheNexus(cache_nexus,cache_info->number_threads); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"%s => %s", CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type), CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) clone_info->type)); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixelCache() method is: % % void DestroyImagePixelCache(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void DestroyImagePixelCache(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->cache != (void *) NULL) image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImagePixels() deallocates memory associated with the pixel cache. % % The format of the DestroyImagePixels() method is: % % void DestroyImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImagePixels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.destroy_pixel_handler != (DestroyPixelHandler) NULL) { cache_info->methods.destroy_pixel_handler(image); return; } image->cache=DestroyPixelCache(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCache() deallocates memory associated with the pixel cache. % % The format of the DestroyPixelCache() method is: % % Cache DestroyPixelCache(Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ static MagickBooleanType ClosePixelCacheOnDisk(CacheInfo *cache_info) { int status; status=(-1); if (cache_info->file != -1) { status=close(cache_info->file); cache_info->file=(-1); RelinquishMagickResource(FileResource,1); } return(status == -1 ? MagickFalse : MagickTrue); } static inline void RelinquishPixelCachePixels(CacheInfo *cache_info) { switch (cache_info->type) { case MemoryCache: { #if defined(MAGICKCORE_OPENCL_SUPPORT) if (cache_info->opencl != (MagickCLCacheInfo) NULL) { cache_info->opencl=RelinquishMagickCLCacheInfo(cache_info->opencl, MagickTrue); cache_info->pixels=(Quantum *) NULL; break; } #endif if (cache_info->mapped == MagickFalse) cache_info->pixels=(Quantum *) RelinquishAlignedMemory( cache_info->pixels); else (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); RelinquishMagickResource(MemoryResource,cache_info->length); break; } case MapCache: { (void) UnmapBlob(cache_info->pixels,(size_t) cache_info->length); cache_info->pixels=(Quantum *) NULL; if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(MapResource,cache_info->length); } case DiskCache: { if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); if ((cache_info->mode != ReadMode) && (cache_info->mode != PersistMode)) (void) RelinquishUniqueFileResource(cache_info->cache_filename); *cache_info->cache_filename='\0'; RelinquishMagickResource(DiskResource,cache_info->length); break; } case DistributedCache: { *cache_info->cache_filename='\0'; (void) RelinquishDistributePixelCache((DistributeCacheInfo *) cache_info->server_info); break; } default: break; } cache_info->type=UndefinedCache; cache_info->mapped=MagickFalse; cache_info->metacontent=(void *) NULL; } MagickPrivate Cache DestroyPixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count--; if (cache_info->reference_count != 0) { UnlockSemaphoreInfo(cache_info->semaphore); return((Cache) NULL); } UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->debug != MagickFalse) { char message[MagickPathExtent]; (void) FormatLocaleString(message,MagickPathExtent,"destroy %s", cache_info->filename); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } RelinquishPixelCachePixels(cache_info); if (cache_info->server_info != (DistributeCacheInfo *) NULL) cache_info->server_info=DestroyDistributeCacheInfo((DistributeCacheInfo *) cache_info->server_info); if (cache_info->nexus_info != (NexusInfo **) NULL) cache_info->nexus_info=DestroyPixelCacheNexus(cache_info->nexus_info, cache_info->number_threads); if (cache_info->random_info != (RandomInfo *) NULL) cache_info->random_info=DestroyRandomInfo(cache_info->random_info); if (cache_info->file_semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->file_semaphore); if (cache_info->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&cache_info->semaphore); cache_info->signature=(~MagickCoreSignature); cache_info=(CacheInfo *) RelinquishAlignedMemory(cache_info); cache=(Cache) NULL; return(cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyPixelCacheNexus() destroys a pixel cache nexus. % % The format of the DestroyPixelCacheNexus() method is: % % NexusInfo **DestroyPixelCacheNexus(NexusInfo *nexus_info, % const size_t number_threads) % % A description of each parameter follows: % % o nexus_info: the nexus to destroy. % % o number_threads: the number of nexus threads. % */ static inline void RelinquishCacheNexusPixels(NexusInfo *nexus_info) { if (nexus_info->mapped == MagickFalse) (void) RelinquishAlignedMemory(nexus_info->cache); else (void) UnmapBlob(nexus_info->cache,(size_t) nexus_info->length); nexus_info->cache=(Quantum *) NULL; nexus_info->pixels=(Quantum *) NULL; nexus_info->metacontent=(void *) NULL; nexus_info->length=0; nexus_info->mapped=MagickFalse; } MagickPrivate NexusInfo **DestroyPixelCacheNexus(NexusInfo **nexus_info, const size_t number_threads) { register ssize_t i; assert(nexus_info != (NexusInfo **) NULL); for (i=0; i < (ssize_t) (2*number_threads); i++) { if (nexus_info[i]->cache != (Quantum *) NULL) RelinquishCacheNexusPixels(nexus_info[i]); nexus_info[i]->signature=(~MagickCoreSignature); } *nexus_info=(NexusInfo *) RelinquishMagickMemory(*nexus_info); nexus_info=(NexusInfo **) RelinquishAlignedMemory(nexus_info); return(nexus_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontent() returns the authentic metacontent corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the associated pixels are not available. % % The format of the GetAuthenticMetacontent() method is: % % void *GetAuthenticMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void *GetAuthenticMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) { void *metacontent; metacontent=cache_info->methods. get_authentic_metacontent_from_handler(image); return(metacontent); } assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticMetacontentFromCache() returns the meta-content corresponding % with the last call to QueueAuthenticPixelsCache() or % GetAuthenticPixelsCache(). % % The format of the GetAuthenticMetacontentFromCache() method is: % % void *GetAuthenticMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void *GetAuthenticMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->metacontent); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticOpenCLBuffer() returns an OpenCL buffer used to execute OpenCL % operations. % % The format of the GetAuthenticOpenCLBuffer() method is: % % cl_mem GetAuthenticOpenCLBuffer(const Image *image, % MagickCLDevice device,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o device: the device to use. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate cl_mem GetAuthenticOpenCLBuffer(const Image *image, MagickCLDevice device,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(device != (const MagickCLDevice) NULL); cache_info=(CacheInfo *) image->cache; if ((cache_info->type == UndefinedCache) || (cache_info->reference_count > 1)) { SyncImagePixelCache((Image *) image,exception); cache_info=(CacheInfo *) image->cache; } if ((cache_info->type != MemoryCache) || (cache_info->mapped != MagickFalse)) return((cl_mem) NULL); LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->opencl != (MagickCLCacheInfo) NULL) && (cache_info->opencl->device->context != device->context)) cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); if (cache_info->opencl == (MagickCLCacheInfo) NULL) { assert(cache_info->pixels != (Quantum *) NULL); cache_info->opencl=AcquireMagickCLCacheInfo(device,cache_info->pixels, cache_info->length); } if (cache_info->opencl != (MagickCLCacheInfo) NULL) RetainOpenCLMemObject(cache_info->opencl->buffer); UnlockSemaphoreInfo(cache_info->semaphore); if (cache_info->opencl == (MagickCLCacheInfo) NULL) return((cl_mem) NULL); assert(cache_info->opencl->pixels == cache_info->pixels); return(cache_info->opencl->buffer); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelCacheNexus() gets authentic pixels from the in-memory or % disk pixel cache as defined by the geometry parameters. A pointer to the % pixels is returned if the pixels are transferred, otherwise a NULL is % returned. % % The format of the GetAuthenticPixelCacheNexus() method is: % % Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to return. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *GetAuthenticPixelCacheNexus(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; Quantum *magick_restrict pixels; /* Transfer pixels from the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickTrue, nexus_info,exception); if (pixels == (Quantum *) NULL) return((Quantum *) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (nexus_info->authentic_pixel_cache != MagickFalse) return(pixels); if (ReadPixelCachePixels(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); if (cache_info->metacontent_extent != 0) if (ReadPixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse) return((Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsFromCache() returns the pixels associated with the last % call to the QueueAuthenticPixelsCache() or GetAuthenticPixelsCache() methods. % % The format of the GetAuthenticPixelsFromCache() method is: % % Quantum *GetAuthenticPixelsFromCache(const Image image) % % A description of each parameter follows: % % o image: the image. % */ static Quantum *GetAuthenticPixelsFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelQueue() returns the authentic pixels associated % corresponding with the last call to QueueAuthenticPixels() or % GetAuthenticPixels(). % % The format of the GetAuthenticPixelQueue() method is: % % Quantum *GetAuthenticPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Quantum *GetAuthenticPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) return(cache_info->methods.get_authentic_pixels_from_handler(image)); assert(id < (int) cache_info->number_threads); return(cache_info->nexus_info[id]->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a Quantum array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in memory, or in a memory-mapped file. The returned pointer % must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image has corresponding metacontent,call % GetAuthenticMetacontent() after invoking GetAuthenticPixels() to obtain the % meta-content corresponding to the region. Once the Quantum array has % been updated, the changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the GetAuthenticPixels() method is: % % Quantum *GetAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *GetAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.get_authentic_pixels_handler(image,x,y,columns, rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetAuthenticPixelsCache() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetAuthenticPixelsCache() method is: % % Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *GetAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=GetAuthenticPixelCacheNexus(image,x,y,columns,rows, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageExtent() returns the extent of the pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetAuthenticPixels(). % % The format of the GetImageExtent() method is: % % MagickSizeType GetImageExtent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickSizeType GetImageExtent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetPixelCacheNexusExtent(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCache() ensures that there is only a single reference to the % pixel cache to be modified, updating the provided cache pointer to point to % a clone of the original pixel cache if necessary. % % The format of the GetImagePixelCache method is: % % Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o clone: any value other than MagickFalse clones the cache pixels. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType ValidatePixelCacheMorphology( const Image *magick_restrict image) { const CacheInfo *magick_restrict cache_info; const PixelChannelMap *magick_restrict p, *magick_restrict q; /* Does the image match the pixel cache morphology? */ cache_info=(CacheInfo *) image->cache; p=image->channel_map; q=cache_info->channel_map; if ((image->storage_class != cache_info->storage_class) || (image->colorspace != cache_info->colorspace) || (image->alpha_trait != cache_info->alpha_trait) || (image->channels != cache_info->channels) || (image->columns != cache_info->columns) || (image->rows != cache_info->rows) || (image->number_channels != cache_info->number_channels) || (memcmp(p,q,image->number_channels*sizeof(*p)) != 0) || (image->metacontent_extent != cache_info->metacontent_extent) || (cache_info->nexus_info == (NexusInfo **) NULL)) return(MagickFalse); return(MagickTrue); } static Cache GetImagePixelCache(Image *image,const MagickBooleanType clone, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType destroy, status; static MagickSizeType cache_timelimit = MagickResourceInfinity, cpu_throttle = MagickResourceInfinity, cycles = 0; status=MagickTrue; if (cpu_throttle == MagickResourceInfinity) cpu_throttle=GetMagickResourceLimit(ThrottleResource); if ((cpu_throttle != 0) && ((cycles++ % 32) == 0)) MagickDelay(cpu_throttle); if (cache_epoch == 0) { /* Set the expire time in seconds. */ cache_timelimit=GetMagickResourceLimit(TimeResource); cache_epoch=GetMagickTime(); } if ((cache_timelimit != MagickResourceInfinity) && ((MagickSizeType) (GetMagickTime()-cache_epoch) >= cache_timelimit)) { #if defined(ECANCELED) errno=ECANCELED; #endif cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); ThrowFatalException(ResourceLimitFatalError,"TimeLimitExceeded"); } LockSemaphoreInfo(image->semaphore); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif destroy=MagickFalse; if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { LockSemaphoreInfo(cache_info->semaphore); if ((cache_info->reference_count > 1) || (cache_info->mode == ReadMode)) { CacheInfo *clone_info; Image clone_image; /* Clone pixel cache. */ clone_image=(*image); clone_image.semaphore=AcquireSemaphoreInfo(); clone_image.reference_count=1; clone_image.cache=ClonePixelCache(cache_info); clone_info=(CacheInfo *) clone_image.cache; status=OpenPixelCache(&clone_image,IOMode,exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { if (clone != MagickFalse) status=ClonePixelCacheRepository(clone_info,cache_info, exception); if (status == MagickFalse) clone_info=(CacheInfo *) DestroyPixelCache(clone_info); else { destroy=MagickTrue; image->cache=clone_info; } } RelinquishSemaphoreInfo(&clone_image.semaphore); } UnlockSemaphoreInfo(cache_info->semaphore); } if (destroy != MagickFalse) cache_info=(CacheInfo *) DestroyPixelCache(cache_info); if (status != MagickFalse) { /* Ensure the image matches the pixel cache morphology. */ if (image->type != UndefinedType) image->type=UndefinedType; if (ValidatePixelCacheMorphology(image) == MagickFalse) { status=OpenPixelCache(image,IOMode,exception); cache_info=(CacheInfo *) image->cache; if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); } } UnlockSemaphoreInfo(image->semaphore); if (status == MagickFalse) return((Cache) NULL); return(image->cache); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e P i x e l C a c h e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixelCacheType() returns the pixel cache type: UndefinedCache, % DiskCache, MemoryCache, MapCache, or PingCache. % % The format of the GetImagePixelCacheType() method is: % % CacheType GetImagePixelCacheType(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheType GetImagePixelCacheType(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->type); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e A u t h e n t i c P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixel() method is: % % MagickBooleanType GetOneAuthenticPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType CopyPixel(const Image *image, const Quantum *source,Quantum *destination) { register ssize_t i; if (source == (const Quantum *) NULL) { destination[RedPixelChannel]=ClampToQuantum(image->background_color.red); destination[GreenPixelChannel]=ClampToQuantum( image->background_color.green); destination[BluePixelChannel]=ClampToQuantum( image->background_color.blue); destination[BlackPixelChannel]=ClampToQuantum( image->background_color.black); destination[AlphaPixelChannel]=ClampToQuantum( image->background_color.alpha); return(MagickFalse); } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); destination[channel]=source[i]; } return(MagickTrue); } MagickExport MagickBooleanType GetOneAuthenticPixel(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; register Quantum *magick_restrict q; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) return(cache_info->methods.get_one_authentic_pixel_from_handler(image,x,y,pixel,exception)); q=GetAuthenticPixelsCache(image,x,y,1UL,1UL,exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e A u t h e n t i c P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneAuthenticPixelFromCache() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. % % The format of the GetOneAuthenticPixelFromCache() method is: % % MagickBooleanType GetOneAuthenticPixelFromCache(const Image image, % const ssize_t x,const ssize_t y,Quantum *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneAuthenticPixelFromCache(Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); q=GetAuthenticPixelCacheNexus(image,x,y,1UL,1UL,cache_info->nexus_info[id], exception); return(CopyPixel(image,q,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixel() returns a single virtual pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixel() method is: % % MagickBooleanType GetOneVirtualPixel(const Image image,const ssize_t x, % const ssize_t y,Quantum *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixel(const Image *image, const ssize_t x,const ssize_t y,Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); if (cache_info->methods.get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) return(cache_info->methods.get_one_virtual_pixel_from_handler(image, GetPixelCacheVirtualMethod(image),x,y,pixel,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, 1UL,1UL,cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t O n e V i r t u a l P i x e l F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelFromCache() returns a single virtual pixel at the % specified (x,y) location. The image background color is returned if an % error occurs. % % The format of the GetOneVirtualPixelFromCache() method is: % % MagickBooleanType GetOneVirtualPixelFromCache(const Image image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % Quantum *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType GetOneVirtualPixelFromCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, Quantum *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); (void) memset(pixel,0,MaxPixelChannels*sizeof(*pixel)); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); return(CopyPixel(image,p,pixel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e V i r t u a l P i x e l I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOneVirtualPixelInfo() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneAuthenticPixel() instead. % % The format of the GetOneVirtualPixelInfo() method is: % % MagickBooleanType GetOneVirtualPixelInfo(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelInfo *pixel,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: these values define the location of the pixel to return. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetOneVirtualPixelInfo(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, PixelInfo *pixel,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); register const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); GetPixelInfo(image,pixel); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,1UL,1UL, cache_info->nexus_info[id],exception); if (p == (const Quantum *) NULL) return(MagickFalse); GetPixelInfoPixel(image,p,pixel); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheColorspace() returns the colorspace of the pixel cache. % % The format of the GetPixelCacheColorspace() method is: % % Colorspace GetPixelCacheColorspace(const Cache cache) % % A description of each parameter follows: % % o cache: the pixel cache. % */ MagickPrivate ColorspaceType GetPixelCacheColorspace(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->colorspace); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheFilename() returns the filename associated with the pixel % cache. % % The format of the GetPixelCacheFilename() method is: % % const char *GetPixelCacheFilename(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const char *GetPixelCacheFilename(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->cache_filename); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheMethods() initializes the CacheMethods structure. % % The format of the GetPixelCacheMethods() method is: % % void GetPixelCacheMethods(CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void GetPixelCacheMethods(CacheMethods *cache_methods) { assert(cache_methods != (CacheMethods *) NULL); (void) memset(cache_methods,0,sizeof(*cache_methods)); cache_methods->get_virtual_pixel_handler=GetVirtualPixelCache; cache_methods->get_virtual_pixels_handler=GetVirtualPixelsCache; cache_methods->get_virtual_metacontent_from_handler= GetVirtualMetacontentFromCache; cache_methods->get_one_virtual_pixel_from_handler=GetOneVirtualPixelFromCache; cache_methods->get_authentic_pixels_handler=GetAuthenticPixelsCache; cache_methods->get_authentic_metacontent_from_handler= GetAuthenticMetacontentFromCache; cache_methods->get_authentic_pixels_from_handler=GetAuthenticPixelsFromCache; cache_methods->get_one_authentic_pixel_from_handler= GetOneAuthenticPixelFromCache; cache_methods->queue_authentic_pixels_handler=QueueAuthenticPixelsCache; cache_methods->sync_authentic_pixels_handler=SyncAuthenticPixelsCache; cache_methods->destroy_pixel_handler=DestroyImagePixelCache; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e N e x u s E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheNexusExtent() returns the extent of the pixels associated % corresponding with the last call to SetPixelCacheNexusPixels() or % GetPixelCacheNexusPixels(). % % The format of the GetPixelCacheNexusExtent() method is: % % MagickSizeType GetPixelCacheNexusExtent(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o nexus_info: the nexus info. % */ MagickPrivate MagickSizeType GetPixelCacheNexusExtent(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; MagickSizeType extent; assert(cache != NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); extent=(MagickSizeType) nexus_info->region.width*nexus_info->region.height; if (extent == 0) return((MagickSizeType) cache_info->columns*cache_info->rows); return(extent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCachePixels() returns the pixels associated with the specified image. % % The format of the GetPixelCachePixels() method is: % % void *GetPixelCachePixels(Image *image,MagickSizeType *length, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o length: the pixel cache length. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetPixelCachePixels(Image *image,MagickSizeType *length, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); assert(length != (MagickSizeType *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *length=cache_info->length; if ((cache_info->type != MemoryCache) && (cache_info->type != MapCache)) return((void *) NULL); return((void *) cache_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheStorageClass() returns the class type of the pixel cache. % % The format of the GetPixelCacheStorageClass() method is: % % ClassType GetPixelCacheStorageClass(Cache cache) % % A description of each parameter follows: % % o type: GetPixelCacheStorageClass returns DirectClass or PseudoClass. % % o cache: the pixel cache. % */ MagickPrivate ClassType GetPixelCacheStorageClass(const Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); return(cache_info->storage_class); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e T i l e S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheTileSize() returns the pixel cache tile size. % % The format of the GetPixelCacheTileSize() method is: % % void GetPixelCacheTileSize(const Image *image,size_t *width, % size_t *height) % % A description of each parameter follows: % % o image: the image. % % o width: the optimized cache tile width in pixels. % % o height: the optimized cache tile height in pixels. % */ MagickPrivate void GetPixelCacheTileSize(const Image *image,size_t *width, size_t *height) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); *width=2048UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); if (GetImagePixelCacheType(image) == DiskCache) *width=8192UL/(MagickMax(cache_info->number_channels,1)*sizeof(Quantum)); *height=(*width); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixelCacheVirtualMethod() gets the "virtual pixels" method for the % pixel cache. A virtual pixel is any pixel access that is outside the % boundaries of the image cache. % % The format of the GetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate VirtualPixelMethod GetPixelCacheVirtualMethod(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); return(cache_info->virtual_pixel_method); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromCache() returns the meta-content corresponding with % the last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualMetacontentFromCache() method is: % % void *GetVirtualMetacontentFromCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const void *GetVirtualMetacontentFromCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l M e t a c o n t e n t F r o m N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontentFromNexus() returns the meta-content for the specified % cache nexus. % % The format of the GetVirtualMetacontentFromNexus() method is: % % const void *GetVirtualMetacontentFromNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the meta-content. % */ MagickPrivate const void *GetVirtualMetacontentFromNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((void *) NULL); return(nexus_info->metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualMetacontent() returns the virtual metacontent corresponding with % the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the meta-content are not available. % % The format of the GetVirtualMetacontent() method is: % % const void *GetVirtualMetacontent(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const void *GetVirtualMetacontent(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const void *magick_restrict metacontent; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); metacontent=cache_info->methods.get_virtual_metacontent_from_handler(image); if (metacontent != (void *) NULL) return(metacontent); assert(id < (int) cache_info->number_threads); metacontent=GetVirtualMetacontentFromNexus(cache_info, cache_info->nexus_info[id]); return(metacontent); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCacheNexus() gets virtual pixels from the in-memory or disk % pixel cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCacheNexus() method is: % % Quantum *GetVirtualPixelCacheNexus(const Image *image, % const VirtualPixelMethod method,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to acquire. % % o exception: return any errors or warnings in this structure. % */ static ssize_t DitherMatrix[64] = { 0, 48, 12, 60, 3, 51, 15, 63, 32, 16, 44, 28, 35, 19, 47, 31, 8, 56, 4, 52, 11, 59, 7, 55, 40, 24, 36, 20, 43, 27, 39, 23, 2, 50, 14, 62, 1, 49, 13, 61, 34, 18, 46, 30, 33, 17, 45, 29, 10, 58, 6, 54, 9, 57, 5, 53, 42, 26, 38, 22, 41, 25, 37, 21 }; static inline ssize_t DitherX(const ssize_t x,const size_t columns) { ssize_t index; index=x+DitherMatrix[x & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) columns) return((ssize_t) columns-1L); return(index); } static inline ssize_t DitherY(const ssize_t y,const size_t rows) { ssize_t index; index=y+DitherMatrix[y & 0x07]-32L; if (index < 0L) return(0L); if (index >= (ssize_t) rows) return((ssize_t) rows-1L); return(index); } static inline ssize_t EdgeX(const ssize_t x,const size_t columns) { if (x < 0L) return(0L); if (x >= (ssize_t) columns) return((ssize_t) (columns-1)); return(x); } static inline ssize_t EdgeY(const ssize_t y,const size_t rows) { if (y < 0L) return(0L); if (y >= (ssize_t) rows) return((ssize_t) (rows-1)); return(y); } static inline ssize_t RandomX(RandomInfo *random_info,const size_t columns) { return((ssize_t) (columns*GetPseudoRandomValue(random_info))); } static inline ssize_t RandomY(RandomInfo *random_info,const size_t rows) { return((ssize_t) (rows*GetPseudoRandomValue(random_info))); } static inline MagickModulo VirtualPixelModulo(const ssize_t offset, const size_t extent) { MagickModulo modulo; modulo.quotient=offset/((ssize_t) extent); modulo.remainder=offset % ((ssize_t) extent); if ((modulo.remainder != 0) && ((offset ^ ((ssize_t) extent)) < 0)) { modulo.quotient-=1; modulo.remainder+=((ssize_t) extent); } return(modulo); } MagickPrivate const Quantum *GetVirtualPixelCacheNexus(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType length, number_pixels; NexusInfo *magick_restrict virtual_nexus; Quantum *magick_restrict pixels, virtual_pixel[MaxPixelChannels]; register const Quantum *magick_restrict p; register const void *magick_restrict r; register Quantum *magick_restrict q; register ssize_t i, u; register unsigned char *magick_restrict s; ssize_t v; void *magick_restrict virtual_metacontent; /* Acquire pixels. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((const Quantum *) NULL); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif pixels=SetPixelCacheNexusPixels(cache_info,ReadMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); if (pixels == (Quantum *) NULL) return((const Quantum *) NULL); q=pixels; offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) (nexus_info->region.height-1L)*cache_info->columns+ nexus_info->region.width-1L; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; if ((offset >= 0) && (((MagickSizeType) offset+length) < number_pixels)) if ((x >= 0) && ((ssize_t) (x+columns-1) < (ssize_t) cache_info->columns) && (y >= 0) && ((ssize_t) (y+rows-1) < (ssize_t) cache_info->rows)) { MagickBooleanType status; /* Pixel request is inside cache extents. */ if (nexus_info->authentic_pixel_cache != MagickFalse) return(q); status=ReadPixelCachePixels(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); if (cache_info->metacontent_extent != 0) { status=ReadPixelCacheMetacontent(cache_info,nexus_info,exception); if (status == MagickFalse) return((const Quantum *) NULL); } return(q); } /* Pixel request is outside cache extents. */ virtual_nexus=nexus_info->virtual_nexus; s=(unsigned char *) nexus_info->metacontent; (void) memset(virtual_pixel,0,cache_info->number_channels* sizeof(*virtual_pixel)); virtual_metacontent=(void *) NULL; switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: case EdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: { if (cache_info->metacontent_extent != 0) { /* Acquire a metacontent buffer. */ virtual_metacontent=(void *) AcquireQuantumMemory(1, cache_info->metacontent_extent); if (virtual_metacontent == (void *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), CacheError,"UnableToGetCacheNexus","`%s'",image->filename); return((const Quantum *) NULL); } (void) memset(virtual_metacontent,0,cache_info->metacontent_extent); } switch (virtual_pixel_method) { case BlackVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case GrayVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange/2, virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } case TransparentVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,(Quantum) 0,virtual_pixel); SetPixelAlpha(image,TransparentAlpha,virtual_pixel); break; } case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { for (i=0; i < (ssize_t) cache_info->number_channels; i++) SetPixelChannel(image,(PixelChannel) i,QuantumRange,virtual_pixel); SetPixelAlpha(image,OpaqueAlpha,virtual_pixel); break; } default: { SetPixelRed(image,ClampToQuantum(image->background_color.red), virtual_pixel); SetPixelGreen(image,ClampToQuantum(image->background_color.green), virtual_pixel); SetPixelBlue(image,ClampToQuantum(image->background_color.blue), virtual_pixel); SetPixelBlack(image,ClampToQuantum(image->background_color.black), virtual_pixel); SetPixelAlpha(image,ClampToQuantum(image->background_color.alpha), virtual_pixel); break; } } break; } default: break; } for (v=0; v < (ssize_t) rows; v++) { ssize_t y_offset; y_offset=y+v; if ((virtual_pixel_method == EdgeVirtualPixelMethod) || (virtual_pixel_method == UndefinedVirtualPixelMethod)) y_offset=EdgeY(y_offset,cache_info->rows); for (u=0; u < (ssize_t) columns; u+=length) { ssize_t x_offset; x_offset=x+u; length=(MagickSizeType) MagickMin(cache_info->columns-x_offset,columns-u); if (((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) || ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) || (length == 0)) { MagickModulo x_modulo, y_modulo; /* Transfer a single pixel. */ length=(MagickSizeType) 1; switch (virtual_pixel_method) { case EdgeVirtualPixelMethod: default: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns), EdgeY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info, nexus_info->virtual_nexus); break; } case RandomVirtualPixelMethod: { if (cache_info->random_info == (RandomInfo *) NULL) cache_info->random_info=AcquireRandomInfo(); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, RandomX(cache_info->random_info,cache_info->columns), RandomY(cache_info->random_info,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case DitherVirtualPixelMethod: { p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, DitherX(x_offset,cache_info->columns), DitherY(y_offset,cache_info->rows),1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case TileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case MirrorVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); if ((x_modulo.quotient & 0x01) == 1L) x_modulo.remainder=(ssize_t) cache_info->columns- x_modulo.remainder-1L; y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if ((y_modulo.quotient & 0x01) == 1L) y_modulo.remainder=(ssize_t) cache_info->rows- y_modulo.remainder-1L; p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileEdgeVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,EdgeY(y_offset,cache_info->rows),1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileEdgeVirtualPixelMethod: { y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, EdgeX(x_offset,cache_info->columns),y_modulo.remainder,1UL,1UL, virtual_nexus,exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case BackgroundVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case TransparentVirtualPixelMethod: case MaskVirtualPixelMethod: case WhiteVirtualPixelMethod: { p=virtual_pixel; r=virtual_metacontent; break; } case CheckerTileVirtualPixelMethod: { x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); if (((x_modulo.quotient ^ y_modulo.quotient) & 0x01) != 0L) { p=virtual_pixel; r=virtual_metacontent; break; } p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case HorizontalTileVirtualPixelMethod: { if ((y_offset < 0) || (y_offset >= (ssize_t) cache_info->rows)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } case VerticalTileVirtualPixelMethod: { if ((x_offset < 0) || (x_offset >= (ssize_t) cache_info->columns)) { p=virtual_pixel; r=virtual_metacontent; break; } x_modulo=VirtualPixelModulo(x_offset,cache_info->columns); y_modulo=VirtualPixelModulo(y_offset,cache_info->rows); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method, x_modulo.remainder,y_modulo.remainder,1UL,1UL,virtual_nexus, exception); r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); break; } } if (p == (const Quantum *) NULL) break; (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels; if ((s != (void *) NULL) && (r != (const void *) NULL)) { (void) memcpy(s,r,(size_t) cache_info->metacontent_extent); s+=cache_info->metacontent_extent; } continue; } /* Transfer a run of pixels. */ p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x_offset,y_offset, (size_t) length,1UL,virtual_nexus,exception); if (p == (const Quantum *) NULL) break; r=GetVirtualMetacontentFromNexus(cache_info,virtual_nexus); (void) memcpy(q,p,(size_t) (cache_info->number_channels*length* sizeof(*p))); q+=cache_info->number_channels*length; if ((r != (void *) NULL) && (s != (const void *) NULL)) { (void) memcpy(s,r,(size_t) length); s+=length*cache_info->metacontent_extent; } } if (u < (ssize_t) columns) break; } /* Free resources. */ if (virtual_metacontent != (void *) NULL) virtual_metacontent=(void *) RelinquishMagickMemory(virtual_metacontent); if (v < (ssize_t) rows) return((const Quantum *) NULL); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelCache() get virtual pixels from the in-memory or disk pixel % cache as defined by the geometry parameters. A pointer to the pixels % is returned if the pixels are transferred, otherwise a NULL is returned. % % The format of the GetVirtualPixelCache() method is: % % const Quantum *GetVirtualPixelCache(const Image *image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: the virtual pixel method. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static const Quantum *GetVirtualPixelCache(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,virtual_pixel_method,x,y,columns,rows, cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l Q u e u e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelQueue() returns the virtual pixels associated corresponding % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). % % The format of the GetVirtualPixelQueue() method is: % % const Quantum *GetVirtualPixelQueue(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const Quantum *GetVirtualPixelQueue(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixels_handler != (GetVirtualPixelsHandler) NULL) return(cache_info->methods.get_virtual_pixels_handler(image)); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(cache_info,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t V i r t u a l P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in memory, or in a memory-mapped file. The % returned pointer must *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % access the meta-content (of type void) corresponding to the % region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the GetVirtualPixels() and GetAuthenticPixels() methods are not thread- % safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % The format of the GetVirtualPixels() method is: % % const Quantum *GetVirtualPixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const Quantum *GetVirtualPixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); const Quantum *magick_restrict p; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) return(cache_info->methods.get_virtual_pixel_handler(image, GetPixelCacheVirtualMethod(image),x,y,columns,rows,exception)); assert(id < (int) cache_info->number_threads); p=GetVirtualPixelCacheNexus(image,GetPixelCacheVirtualMethod(image),x,y, columns,rows,cache_info->nexus_info[id],exception); return(p); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s F r o m C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsCache() returns the pixels associated corresponding with the % last call to QueueAuthenticPixelsCache() or GetVirtualPixelCache(). % % The format of the GetVirtualPixelsCache() method is: % % Quantum *GetVirtualPixelsCache(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static const Quantum *GetVirtualPixelsCache(const Image *image) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); return(GetVirtualPixelsNexus(image->cache,cache_info->nexus_info[id])); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t V i r t u a l P i x e l s N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetVirtualPixelsNexus() returns the pixels associated with the specified % cache nexus. % % The format of the GetVirtualPixelsNexus() method is: % % const Quantum *GetVirtualPixelsNexus(const Cache cache, % NexusInfo *nexus_info) % % A description of each parameter follows: % % o cache: the pixel cache. % % o nexus_info: the cache nexus to return the colormap pixels. % */ MagickPrivate const Quantum *GetVirtualPixelsNexus(const Cache cache, NexusInfo *magick_restrict nexus_info) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->storage_class == UndefinedClass) return((Quantum *) NULL); return((const Quantum *) nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + M a s k P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaskPixelCacheNexus() masks the cache nexus as defined by the image mask. % The method returns MagickTrue if the pixel region is masked, otherwise % MagickFalse. % % The format of the MaskPixelCacheNexus() method is: % % MagickBooleanType MaskPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to clip. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum ApplyPixelCompositeMask(const Quantum p, const MagickRealType alpha,const Quantum q,const MagickRealType beta) { double mask_alpha; Quantum pixel; if (fabs(alpha-OpaqueAlpha) < MagickEpsilon) return(p); mask_alpha=1.0-QuantumScale*QuantumScale*alpha*beta; mask_alpha=PerceptibleReciprocal(mask_alpha); pixel=ClampToQuantum(mask_alpha*MagickOver_((double) p,alpha,(double) q, beta)); return(pixel); } static MagickBooleanType MaskPixelCacheNexus(Image *image,NexusInfo *nexus_info, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickSizeType number_pixels; register Quantum *magick_restrict p, *magick_restrict q; register ssize_t n; /* Apply clip mask. */ if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->channels & CompositeMaskChannel) == 0) return(MagickTrue); if ((nexus_info->region.width == 0) || (nexus_info->region.height == 0)) return(MagickTrue); cache_info=(CacheInfo *) image->cache; if (cache_info == (Cache) NULL) return(MagickFalse); p=GetAuthenticPixelCacheNexus(image,nexus_info->region.x,nexus_info->region.y, nexus_info->region.width,nexus_info->region.height, nexus_info->virtual_nexus,exception); q=nexus_info->pixels; number_pixels=(MagickSizeType) nexus_info->region.width* nexus_info->region.height; for (n=0; n < (ssize_t) number_pixels; n++) { double mask_alpha; register ssize_t i; if (p == (Quantum *) NULL) break; mask_alpha=(double) GetPixelCompositeMask(image,p); for (i=0; i < (ssize_t) image->number_channels; i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ApplyPixelCompositeMask(p[i],mask_alpha,q[i],(MagickRealType) GetPixelAlpha(image,q)); } p+=GetPixelChannels(image); q+=GetPixelChannels(image); } if (n < (ssize_t) number_pixels) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p e n P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenPixelCache() allocates the pixel cache. This includes defining the cache % dimensions, allocating space for the image pixels and optionally the % metacontent, and memory mapping the cache if it is disk based. The cache % nexus array is initialized as well. % % The format of the OpenPixelCache() method is: % % MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o mode: ReadMode, WriteMode, or IOMode. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType OpenPixelCacheOnDisk(CacheInfo *cache_info, const MapMode mode) { int file; /* Open pixel cache on disk. */ if ((cache_info->file != -1) && (cache_info->disk_mode == mode)) return(MagickTrue); /* cache already open and in the proper mode */ if (*cache_info->cache_filename == '\0') file=AcquireUniqueFileResource(cache_info->cache_filename); else switch (mode) { case ReadMode: { file=open_utf8(cache_info->cache_filename,O_RDONLY | O_BINARY,0); break; } case WriteMode: { file=open_utf8(cache_info->cache_filename,O_WRONLY | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_WRONLY | O_BINARY,S_MODE); break; } case IOMode: default: { file=open_utf8(cache_info->cache_filename,O_RDWR | O_CREAT | O_BINARY | O_EXCL,S_MODE); if (file == -1) file=open_utf8(cache_info->cache_filename,O_RDWR | O_BINARY,S_MODE); break; } } if (file == -1) return(MagickFalse); (void) AcquireMagickResource(FileResource,1); if (cache_info->file != -1) (void) ClosePixelCacheOnDisk(cache_info); cache_info->file=file; cache_info->disk_mode=mode; return(MagickTrue); } static inline MagickOffsetType WritePixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,const unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PWRITE) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PWRITE) count=write(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pwrite(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType SetPixelCacheExtent(Image *image,MagickSizeType length) { CacheInfo *magick_restrict cache_info; MagickOffsetType count, extent, offset; cache_info=(CacheInfo *) image->cache; if (image->debug != MagickFalse) { char format[MagickPathExtent], message[MagickPathExtent]; (void) FormatMagickSize(length,MagickFalse,"B",MagickPathExtent,format); (void) FormatLocaleString(message,MagickPathExtent, "extend %s (%s[%d], disk, %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (length != (MagickSizeType) ((MagickOffsetType) length)) return(MagickFalse); offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_END); if (offset < 0) return(MagickFalse); if ((MagickSizeType) offset >= length) count=(MagickOffsetType) 1; else { extent=(MagickOffsetType) length-1; count=WritePixelCacheRegion(cache_info,extent,1,(const unsigned char *) ""); if (count != 1) return(MagickFalse); #if defined(MAGICKCORE_HAVE_POSIX_FALLOCATE) if (cache_info->synchronize != MagickFalse) if (posix_fallocate(cache_info->file,offset+1,extent-offset) != 0) return(MagickFalse); #endif } offset=(MagickOffsetType) lseek(cache_info->file,0,SEEK_SET); if (offset < 0) return(MagickFalse); return(MagickTrue); } static MagickBooleanType OpenPixelCache(Image *image,const MapMode mode, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, source_info; char format[MagickPathExtent], message[MagickPathExtent]; const char *hosts, *type; MagickBooleanType status; MagickSizeType length, number_pixels; size_t columns, packet_size; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (cache_anonymous_memory < 0) { char *value; /* Does the security policy require anonymous mapping for pixel cache? */ cache_anonymous_memory=0; value=GetPolicyValue("pixel-cache-memory"); if (value == (char *) NULL) value=GetPolicyValue("cache:memory-map"); if (LocaleCompare(value,"anonymous") == 0) { #if defined(MAGICKCORE_HAVE_MMAP) && defined(MAP_ANONYMOUS) cache_anonymous_memory=1; #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateError,"DelegateLibrarySupportNotBuiltIn", "'%s' (policy requires anonymous memory mapping)",image->filename); #endif } value=DestroyString(value); } if ((image->columns == 0) || (image->rows == 0)) ThrowBinaryException(CacheError,"NoPixelsDefinedInCache",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (((MagickSizeType) image->columns > cache_info->width_limit) || ((MagickSizeType) image->rows > cache_info->height_limit)) ThrowBinaryException(ImageError,"WidthOrHeightExceedsLimit", image->filename); length=GetImageListLength(image); if (AcquireMagickResource(ListLengthResource,length) == MagickFalse) ThrowBinaryException(ResourceLimitError,"ListLengthExceedsLimit", image->filename); source_info=(*cache_info); source_info.file=(-1); (void) FormatLocaleString(cache_info->filename,MagickPathExtent,"%s[%.20g]", image->filename,(double) image->scene); cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->alpha_trait=image->alpha_trait; cache_info->channels=image->channels; cache_info->rows=image->rows; cache_info->columns=image->columns; InitializePixelChannelMap(image); cache_info->number_channels=GetPixelChannels(image); (void) memcpy(cache_info->channel_map,image->channel_map,MaxPixelChannels* sizeof(*image->channel_map)); cache_info->metacontent_extent=image->metacontent_extent; cache_info->mode=mode; number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; packet_size=cache_info->number_channels*sizeof(Quantum); if (image->metacontent_extent != 0) packet_size+=cache_info->metacontent_extent; length=number_pixels*packet_size; columns=(size_t) (length/cache_info->rows/packet_size); if ((cache_info->columns != columns) || ((ssize_t) cache_info->columns < 0) || ((ssize_t) cache_info->rows < 0)) ThrowBinaryException(ResourceLimitError,"PixelCacheAllocationFailed", image->filename); cache_info->length=length; if (image->ping != MagickFalse) { cache_info->storage_class=image->storage_class; cache_info->colorspace=image->colorspace; cache_info->type=PingCache; return(MagickTrue); } status=AcquireMagickResource(AreaResource,(MagickSizeType) cache_info->columns*cache_info->rows); if (cache_info->mode == PersistMode) status=MagickFalse; length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if ((status != MagickFalse) && (length == (MagickSizeType) ((size_t) length)) && ((cache_info->type == UndefinedCache) || (cache_info->type == MemoryCache))) { status=AcquireMagickResource(MemoryResource,cache_info->length); if (status != MagickFalse) { status=MagickTrue; if (cache_anonymous_memory <= 0) { cache_info->mapped=MagickFalse; cache_info->pixels=(Quantum *) MagickAssumeAligned( AcquireAlignedMemory(1,(size_t) cache_info->length)); } else { cache_info->mapped=MagickTrue; cache_info->pixels=(Quantum *) MapBlob(-1,IOMode,0,(size_t) cache_info->length); } if (cache_info->pixels == (Quantum *) NULL) { cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; } else { /* Create memory pixel cache. */ cache_info->type=MemoryCache; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->mapped != MagickFalse ? "Anonymous" : "Heap",type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } cache_info->storage_class=image->storage_class; if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=AcquireMagickResource(DiskResource,cache_info->length); hosts=(const char *) GetImageRegistry(StringRegistryType,"cache:hosts", exception); if ((status == MagickFalse) && (hosts != (const char *) NULL)) { DistributeCacheInfo *server_info; /* Distribute the pixel cache to a remote server. */ server_info=AcquireDistributeCacheInfo(exception); if (server_info != (DistributeCacheInfo *) NULL) { status=OpenDistributePixelCache(server_info,image); if (status == MagickFalse) { ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", GetDistributeCacheHostname(server_info)); server_info=DestroyDistributeCacheInfo(server_info); } else { /* Create a distributed pixel cache. */ status=MagickTrue; cache_info->type=DistributedCache; cache_info->server_info=server_info; (void) FormatLocaleString(cache_info->cache_filename, MagickPathExtent,"%s:%d",GetDistributeCacheHostname( (DistributeCacheInfo *) cache_info->server_info), GetDistributeCachePort((DistributeCacheInfo *) cache_info->server_info)); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, GetDistributeCacheFile((DistributeCacheInfo *) cache_info->server_info),type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } /* Create pixel cache on disk. */ if (status == MagickFalse) { cache_info->type=UndefinedCache; (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode) && (cache_info->mode != PersistMode)) { (void) ClosePixelCacheOnDisk(cache_info); *cache_info->cache_filename='\0'; } if (OpenPixelCacheOnDisk(cache_info,mode) == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToOpenPixelCache", image->filename); return(MagickFalse); } status=SetPixelCacheExtent(image,(MagickSizeType) cache_info->offset+ cache_info->length); if (status == MagickFalse) { cache_info->type=UndefinedCache; ThrowFileException(exception,CacheError,"UnableToExtendCache", image->filename); return(MagickFalse); } length=number_pixels*(cache_info->number_channels*sizeof(Quantum)+ cache_info->metacontent_extent); if (length != (MagickSizeType) ((size_t) length)) cache_info->type=DiskCache; else { status=AcquireMagickResource(MapResource,cache_info->length); if (status == MagickFalse) cache_info->type=DiskCache; else if ((cache_info->type != MapCache) && (cache_info->type != MemoryCache)) { cache_info->type=DiskCache; RelinquishMagickResource(MapResource,cache_info->length); } else { cache_info->pixels=(Quantum *) MapBlob(cache_info->file,mode, cache_info->offset,(size_t) cache_info->length); if (cache_info->pixels == (Quantum *) NULL) { cache_info->type=DiskCache; cache_info->mapped=source_info.mapped; cache_info->pixels=source_info.pixels; RelinquishMagickResource(MapResource,cache_info->length); } else { /* Create file-backed memory-mapped pixel cache. */ (void) ClosePixelCacheOnDisk(cache_info); cache_info->type=MapCache; cache_info->mapped=MagickTrue; cache_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) cache_info->metacontent=(void *) (cache_info->pixels+ cache_info->number_channels*number_pixels); if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info, exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickTrue,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)", cache_info->filename,cache_info->cache_filename, cache_info->file,type,(double) cache_info->columns, (double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s", message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } } } status=MagickTrue; if ((source_info.storage_class != UndefinedClass) && (mode != ReadMode)) { status=ClonePixelCacheRepository(cache_info,&source_info,exception); RelinquishPixelCachePixels(&source_info); } if (image->debug != MagickFalse) { (void) FormatMagickSize(cache_info->length,MagickFalse,"B", MagickPathExtent,format); type=CommandOptionToMnemonic(MagickCacheOptions,(ssize_t) cache_info->type); (void) FormatLocaleString(message,MagickPathExtent, "open %s (%s[%d], %s, %.20gx%.20gx%.20g %s)",cache_info->filename, cache_info->cache_filename,cache_info->file,type,(double) cache_info->columns,(double) cache_info->rows,(double) cache_info->number_channels,format); (void) LogMagickEvent(CacheEvent,GetMagickModule(),"%s",message); } if (status == 0) { cache_info->type=UndefinedCache; return(MagickFalse); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P e r s i s t P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PersistPixelCache() attaches to or initializes a persistent pixel cache. A % persistent pixel cache is one that resides on disk and is not destroyed % when the program exits. % % The format of the PersistPixelCache() method is: % % MagickBooleanType PersistPixelCache(Image *image,const char *filename, % const MagickBooleanType attach,MagickOffsetType *offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o filename: the persistent pixel cache filename. % % o attach: A value other than zero initializes the persistent pixel cache. % % o initialize: A value other than zero initializes the persistent pixel % cache. % % o offset: the offset in the persistent cache to store pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType PersistPixelCache(Image *image, const char *filename,const MagickBooleanType attach,MagickOffsetType *offset, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info, *magick_restrict clone_info; MagickBooleanType status; ssize_t page_size; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (void *) NULL); assert(filename != (const char *) NULL); assert(offset != (MagickOffsetType *) NULL); page_size=GetMagickPageSize(); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) CopyOpenCLBuffer(cache_info); #endif if (attach != MagickFalse) { /* Attach existing persistent pixel cache. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "attach persistent cache"); (void) CopyMagickString(cache_info->cache_filename,filename, MagickPathExtent); cache_info->type=MapCache; cache_info->offset=(*offset); if (OpenPixelCache(image,ReadMode,exception) == MagickFalse) return(MagickFalse); *offset+=cache_info->length+page_size-(cache_info->length % page_size); return(MagickTrue); } /* Clone persistent pixel cache. */ status=AcquireMagickResource(DiskResource,cache_info->length); if (status == MagickFalse) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "CacheResourcesExhausted","`%s'",image->filename); return(MagickFalse); } clone_info=(CacheInfo *) ClonePixelCache(cache_info); clone_info->type=DiskCache; (void) CopyMagickString(clone_info->cache_filename,filename,MagickPathExtent); clone_info->file=(-1); clone_info->storage_class=cache_info->storage_class; clone_info->colorspace=cache_info->colorspace; clone_info->alpha_trait=cache_info->alpha_trait; clone_info->channels=cache_info->channels; clone_info->columns=cache_info->columns; clone_info->rows=cache_info->rows; clone_info->number_channels=cache_info->number_channels; clone_info->metacontent_extent=cache_info->metacontent_extent; clone_info->mode=PersistMode; clone_info->length=cache_info->length; (void) memcpy(clone_info->channel_map,cache_info->channel_map, MaxPixelChannels*sizeof(*cache_info->channel_map)); clone_info->offset=(*offset); status=ClonePixelCacheRepository(clone_info,cache_info,exception); *offset+=cache_info->length+page_size-(cache_info->length % page_size); clone_info=(CacheInfo *) DestroyPixelCache(clone_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelCacheNexus() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelCacheNexus() method is: % % Quantum *QueueAuthenticPixelCacheNexus(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % const MagickBooleanType clone,NexusInfo *nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o nexus_info: the cache nexus to set. % % o clone: clone the pixel cache. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate Quantum *QueueAuthenticPixelCacheNexus(Image *image, const ssize_t x,const ssize_t y,const size_t columns,const size_t rows, const MagickBooleanType clone,NexusInfo *nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickOffsetType offset; MagickSizeType number_pixels; Quantum *magick_restrict pixels; /* Validate pixel cache geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,clone,exception); if (cache_info == (Cache) NULL) return((Quantum *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->columns == 0) || (cache_info->rows == 0) || (x < 0) || (y < 0) || (x >= (ssize_t) cache_info->columns) || (y >= (ssize_t) cache_info->rows)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "PixelsAreNotAuthentic","`%s'",image->filename); return((Quantum *) NULL); } offset=(MagickOffsetType) y*cache_info->columns+x; if (offset < 0) return((Quantum *) NULL); number_pixels=(MagickSizeType) cache_info->columns*cache_info->rows; offset+=(MagickOffsetType) (rows-1)*cache_info->columns+columns-1; if ((MagickSizeType) offset >= number_pixels) return((Quantum *) NULL); /* Return pixel cache. */ pixels=SetPixelCacheNexusPixels(cache_info,WriteMode,x,y,columns,rows, ((image->channels & WriteMaskChannel) != 0) || ((image->channels & CompositeMaskChannel) != 0) ? MagickTrue : MagickFalse, nexus_info,exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Q u e u e A u t h e n t i c P i x e l s C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixelsCache() allocates an region to store image pixels as % defined by the region rectangle and returns a pointer to the region. This % region is subsequently transferred from the pixel cache with % SyncAuthenticPixelsCache(). A pointer to the pixels is returned if the % pixels are transferred, otherwise a NULL is returned. % % The format of the QueueAuthenticPixelsCache() method is: % % Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ static Quantum *QueueAuthenticPixelsCache(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u e u e A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QueueAuthenticPixels() queues a mutable pixel region. If the region is % successfully initialized a pointer to a Quantum array representing the % region is returned, otherwise NULL is returned. The returned pointer may % point to a temporary working buffer for the pixels or it may point to the % final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This is useful if the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % QueueAuthenticPixels() any way it pleases. QueueAuthenticPixels() does not % initialize the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in memory, or in a % memory-mapped file. The returned pointer must *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % Quantum. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticMetacontent() after invoking GetAuthenticPixels() to % obtain the meta-content (of type void) corresponding to the region. % Once the Quantum (and/or Quantum) array has been updated, the % changes must be saved back to the underlying image using % SyncAuthenticPixels() or they may be lost. % % The format of the QueueAuthenticPixels() method is: % % Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport Quantum *QueueAuthenticPixels(Image *image,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); Quantum *magick_restrict pixels; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) { pixels=cache_info->methods.queue_authentic_pixels_handler(image,x,y, columns,rows,exception); return(pixels); } assert(id < (int) cache_info->number_threads); pixels=QueueAuthenticPixelCacheNexus(image,x,y,columns,rows,MagickFalse, cache_info->nexus_info[id],exception); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCacheMetacontent() reads metacontent from the specified region of % the pixel cache. % % The format of the ReadPixelCacheMetacontent() method is: % % MagickBooleanType ReadPixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the metacontent. % % o exception: return any errors or warnings in this structure. % */ static inline MagickOffsetType ReadPixelCacheRegion( const CacheInfo *magick_restrict cache_info,const MagickOffsetType offset, const MagickSizeType length,unsigned char *magick_restrict buffer) { register MagickOffsetType i; ssize_t count; #if !defined(MAGICKCORE_HAVE_PREAD) if (lseek(cache_info->file,offset,SEEK_SET) < 0) return((MagickOffsetType) -1); #endif count=0; for (i=0; i < (MagickOffsetType) length; i+=count) { #if !defined(MAGICKCORE_HAVE_PREAD) count=read(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX)); #else count=pread(cache_info->file,buffer+i,(size_t) MagickMin(length-i,(size_t) SSIZE_MAX),offset+i); #endif if (count <= 0) { count=0; if (errno != EINTR) break; } } return(i); } static MagickBooleanType ReadPixelCacheMetacontent( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register ssize_t y; register unsigned char *magick_restrict q; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; q=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict p; /* Read meta-content from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->metacontent_extent*cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } break; } case DiskCache: { /* Read meta content from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->metacontent_extent*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read metacontent from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e a d P i x e l C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPixelCachePixels() reads pixels from the specified region of the pixel % cache. % % The format of the ReadPixelCachePixels() method is: % % MagickBooleanType ReadPixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to read the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType ReadPixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register Quantum *magick_restrict q; register ssize_t y; size_t number_channels, rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns; if ((ssize_t) (offset/cache_info->columns) != nexus_info->region.y) return(MagickFalse); offset+=nexus_info->region.x; number_channels=cache_info->number_channels; length=(MagickSizeType) number_channels*nexus_info->region.width* sizeof(Quantum); if ((length/number_channels/sizeof(Quantum)) != nexus_info->region.width) return(MagickFalse); rows=nexus_info->region.height; extent=length*rows; if ((extent == 0) || ((extent/length) != rows)) return(MagickFalse); y=0; q=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict p; /* Read pixels from memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } p=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } break; } case DiskCache: { /* Read pixels from disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadPixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*q),length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; offset+=cache_info->columns; q+=cache_info->number_channels*nexus_info->region.width; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Read pixels from distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=ReadDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(unsigned char *) q); if (count != (MagickOffsetType) length) break; q+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToReadPixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e f e r e n c e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferencePixelCache() increments the reference count associated with the % pixel cache returning a pointer to the cache. % % The format of the ReferencePixelCache method is: % % Cache ReferencePixelCache(Cache cache_info) % % A description of each parameter follows: % % o cache_info: the pixel cache. % */ MagickPrivate Cache ReferencePixelCache(Cache cache) { CacheInfo *magick_restrict cache_info; assert(cache != (Cache *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); LockSemaphoreInfo(cache_info->semaphore); cache_info->reference_count++; UnlockSemaphoreInfo(cache_info->semaphore); return(cache_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e C h a n n e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheChannels() resets the pixel cache channels. % % The format of the ResetPixelCacheChannels method is: % % void ResetPixelCacheChannels(Image *) % % A description of each parameter follows: % % o image: the image. % */ MagickPrivate void ResetPixelCacheChannels(Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); cache_info->number_channels=GetPixelChannels(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t C a c h e A n o n y m o u s M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetCacheAnonymousMemory() resets the anonymous_memory value. % % The format of the ResetCacheAnonymousMemory method is: % % void ResetCacheAnonymousMemory(void) % */ MagickPrivate void ResetCacheAnonymousMemory(void) { cache_anonymous_memory=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + R e s e t P i x e l C a c h e E p o c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetPixelCacheEpoch() resets the pixel cache epoch. % % The format of the ResetPixelCacheEpoch method is: % % void ResetPixelCacheEpoch(void) % */ MagickPrivate void ResetPixelCacheEpoch(void) { cache_epoch=0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e M e t h o d s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheMethods() sets the image pixel methods to the specified ones. % % The format of the SetPixelCacheMethods() method is: % % SetPixelCacheMethods(Cache *,CacheMethods *cache_methods) % % A description of each parameter follows: % % o cache: the pixel cache. % % o cache_methods: Specifies a pointer to a CacheMethods structure. % */ MagickPrivate void SetPixelCacheMethods(Cache cache,CacheMethods *cache_methods) { CacheInfo *magick_restrict cache_info; GetOneAuthenticPixelFromHandler get_one_authentic_pixel_from_handler; GetOneVirtualPixelFromHandler get_one_virtual_pixel_from_handler; /* Set cache pixel methods. */ assert(cache != (Cache) NULL); assert(cache_methods != (CacheMethods *) NULL); cache_info=(CacheInfo *) cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", cache_info->filename); if (cache_methods->get_virtual_pixel_handler != (GetVirtualPixelHandler) NULL) cache_info->methods.get_virtual_pixel_handler= cache_methods->get_virtual_pixel_handler; if (cache_methods->destroy_pixel_handler != (DestroyPixelHandler) NULL) cache_info->methods.destroy_pixel_handler= cache_methods->destroy_pixel_handler; if (cache_methods->get_virtual_metacontent_from_handler != (GetVirtualMetacontentFromHandler) NULL) cache_info->methods.get_virtual_metacontent_from_handler= cache_methods->get_virtual_metacontent_from_handler; if (cache_methods->get_authentic_pixels_handler != (GetAuthenticPixelsHandler) NULL) cache_info->methods.get_authentic_pixels_handler= cache_methods->get_authentic_pixels_handler; if (cache_methods->queue_authentic_pixels_handler != (QueueAuthenticPixelsHandler) NULL) cache_info->methods.queue_authentic_pixels_handler= cache_methods->queue_authentic_pixels_handler; if (cache_methods->sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) cache_info->methods.sync_authentic_pixels_handler= cache_methods->sync_authentic_pixels_handler; if (cache_methods->get_authentic_pixels_from_handler != (GetAuthenticPixelsFromHandler) NULL) cache_info->methods.get_authentic_pixels_from_handler= cache_methods->get_authentic_pixels_from_handler; if (cache_methods->get_authentic_metacontent_from_handler != (GetAuthenticMetacontentFromHandler) NULL) cache_info->methods.get_authentic_metacontent_from_handler= cache_methods->get_authentic_metacontent_from_handler; get_one_virtual_pixel_from_handler= cache_info->methods.get_one_virtual_pixel_from_handler; if (get_one_virtual_pixel_from_handler != (GetOneVirtualPixelFromHandler) NULL) cache_info->methods.get_one_virtual_pixel_from_handler= cache_methods->get_one_virtual_pixel_from_handler; get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; if (get_one_authentic_pixel_from_handler != (GetOneAuthenticPixelFromHandler) NULL) cache_info->methods.get_one_authentic_pixel_from_handler= cache_methods->get_one_authentic_pixel_from_handler; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t P i x e l C a c h e N e x u s P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheNexusPixels() defines the region of the cache for the % specified cache nexus. % % The format of the SetPixelCacheNexusPixels() method is: % % Quantum SetPixelCacheNexusPixels( % const CacheInfo *magick_restrict cache_info,const MapMode mode, % const ssize_t x,const ssize_t y,const size_t width,const size_t height, % const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o mode: ReadMode, WriteMode, or IOMode. % % o x,y,width,height: define the region of this particular cache nexus. % % o buffered: if true, nexus pixels are buffered. % % o nexus_info: the cache nexus to set. % % o exception: return any errors or warnings in this structure. % */ static inline MagickBooleanType AcquireCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MagickSizeType length, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { if (length != (MagickSizeType) ((size_t) length)) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=0; nexus_info->mapped=MagickFalse; if (cache_anonymous_memory <= 0) { nexus_info->cache=(Quantum *) MagickAssumeAligned(AcquireAlignedMemory(1, (size_t) length)); if (nexus_info->cache != (Quantum *) NULL) (void) memset(nexus_info->cache,0,(size_t) length); } else { nexus_info->cache=(Quantum *) MapBlob(-1,IOMode,0,(size_t) length); if (nexus_info->cache != (Quantum *) NULL) nexus_info->mapped=MagickTrue; } if (nexus_info->cache == (Quantum *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"PixelCacheAllocationFailed","`%s'", cache_info->filename); return(MagickFalse); } nexus_info->length=length; return(MagickTrue); } static inline void PrefetchPixelCacheNexusPixels(const NexusInfo *nexus_info, const MapMode mode) { if (nexus_info->length < CACHE_LINE_SIZE) return; if (mode == ReadMode) { MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE, 0,1); return; } MagickCachePrefetch((unsigned char *) nexus_info->pixels+CACHE_LINE_SIZE,1,1); } static Quantum *SetPixelCacheNexusPixels( const CacheInfo *magick_restrict cache_info,const MapMode mode, const ssize_t x,const ssize_t y,const size_t width,const size_t height, const MagickBooleanType buffered,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickBooleanType status; MagickSizeType length, number_pixels; assert(cache_info != (const CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return((Quantum *) NULL); assert(nexus_info->signature == MagickCoreSignature); (void) memset(&nexus_info->region,0,sizeof(nexus_info->region)); if ((width == 0) || (height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CacheError, "NoPixelsDefinedInCache","`%s'",cache_info->filename); return((Quantum *) NULL); } if (((cache_info->type == MemoryCache) || (cache_info->type == MapCache)) && (buffered == MagickFalse)) { if (((x >= 0) && (y >= 0) && (((ssize_t) height+y-1) < (ssize_t) cache_info->rows)) && (((x == 0) && (width == cache_info->columns)) || ((height == 1) && (((ssize_t) width+x-1) < (ssize_t) cache_info->columns)))) { MagickOffsetType offset; /* Pixels are accessed directly from memory. */ offset=(MagickOffsetType) y*cache_info->columns+x; nexus_info->pixels=cache_info->pixels+cache_info->number_channels* offset; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(unsigned char *) cache_info->metacontent+ offset*cache_info->metacontent_extent; nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=MagickTrue; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } } /* Pixels are stored in a staging region until they are synced to the cache. */ if (((MagickSizeType) width > cache_info->width_limit) || ((MagickSizeType) height > cache_info->height_limit)) { (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "WidthOrHeightExceedsLimit","`%s'",cache_info->filename); return((Quantum *) NULL); } number_pixels=(MagickSizeType) width*height; length=MagickMax(number_pixels,MagickMax(cache_info->columns, cache_info->rows))*cache_info->number_channels*sizeof(*nexus_info->pixels); if (cache_info->metacontent_extent != 0) length+=number_pixels*cache_info->metacontent_extent; status=MagickTrue; if (nexus_info->cache == (Quantum *) NULL) status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); else if (nexus_info->length < length) { RelinquishCacheNexusPixels(nexus_info); status=AcquireCacheNexusPixels(cache_info,length,nexus_info,exception); } if (status == MagickFalse) return((Quantum *) NULL); nexus_info->pixels=nexus_info->cache; nexus_info->metacontent=(void *) NULL; if (cache_info->metacontent_extent != 0) nexus_info->metacontent=(void *) (nexus_info->pixels+ cache_info->number_channels*number_pixels); nexus_info->region.width=width; nexus_info->region.height=height; nexus_info->region.x=x; nexus_info->region.y=y; nexus_info->authentic_pixel_cache=cache_info->type == PingCache ? MagickTrue : MagickFalse; PrefetchPixelCacheNexusPixels(nexus_info,mode); return(nexus_info->pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t P i x e l C a c h e V i r t u a l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetPixelCacheVirtualMethod() sets the "virtual pixels" method for the % pixel cache and returns the previous setting. A virtual pixel is any pixel % access that is outside the boundaries of the image cache. % % The format of the SetPixelCacheVirtualMethod() method is: % % VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SetCacheAlphaChannel(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; CacheView *magick_restrict image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); /* must be virtual */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } status=SyncCacheViewAuthenticPixels(image_view,exception); } image_view=DestroyCacheView(image_view); return(status); } MagickPrivate VirtualPixelMethod SetPixelCacheVirtualMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; VirtualPixelMethod method; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); method=cache_info->virtual_pixel_method; cache_info->virtual_pixel_method=virtual_pixel_method; if ((image->columns != 0) && (image->rows != 0)) switch (virtual_pixel_method) { case BackgroundVirtualPixelMethod: { if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); if ((IsPixelInfoGray(&image->background_color) == MagickFalse) && (IsGrayColorspace(image->colorspace) != MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); break; } case TransparentVirtualPixelMethod: { if (image->alpha_trait == UndefinedPixelTrait) (void) SetCacheAlphaChannel(image,OpaqueAlpha,exception); break; } default: break; } return(method); } #if defined(MAGICKCORE_OPENCL_SUPPORT) /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c O p e n C L B u f f e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticOpenCLBuffer() makes sure that all the OpenCL operations have % been completed and updates the host memory. % % The format of the SyncAuthenticOpenCLBuffer() method is: % % void SyncAuthenticOpenCLBuffer(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ static void CopyOpenCLBuffer(CacheInfo *magick_restrict cache_info) { assert(cache_info != (CacheInfo *) NULL); assert(cache_info->signature == MagickCoreSignature); if ((cache_info->type != MemoryCache) || (cache_info->opencl == (MagickCLCacheInfo) NULL)) return; /* Ensure single threaded access to OpenCL environment. */ LockSemaphoreInfo(cache_info->semaphore); cache_info->opencl=CopyMagickCLCacheInfo(cache_info->opencl); UnlockSemaphoreInfo(cache_info->semaphore); } MagickPrivate void SyncAuthenticOpenCLBuffer(const Image *image) { CacheInfo *magick_restrict cache_info; assert(image != (const Image *) NULL); cache_info=(CacheInfo *) image->cache; CopyOpenCLBuffer(cache_info); } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e N e x u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelCacheNexus() saves the authentic image pixels to the % in-memory or disk cache. The method returns MagickTrue if the pixel region % is synced, otherwise MagickFalse. % % The format of the SyncAuthenticPixelCacheNexus() method is: % % MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o nexus_info: the cache nexus to sync. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncAuthenticPixelCacheNexus(Image *image, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; MagickBooleanType status; /* Transfer pixels to the cache. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->cache == (Cache) NULL) ThrowBinaryException(CacheError,"PixelCacheIsNotOpen",image->filename); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->type == UndefinedCache) return(MagickFalse); if (image->mask_trait != UpdatePixelTrait) { if (((image->channels & WriteMaskChannel) != 0) && (ClipPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); if (((image->channels & CompositeMaskChannel) != 0) && (MaskPixelCacheNexus(image,nexus_info,exception) == MagickFalse)) return(MagickFalse); } if (nexus_info->authentic_pixel_cache != MagickFalse) { if (image->taint == MagickFalse) image->taint=MagickTrue; return(MagickTrue); } assert(cache_info->signature == MagickCoreSignature); status=WritePixelCachePixels(cache_info,nexus_info,exception); if ((cache_info->metacontent_extent != 0) && (WritePixelCacheMetacontent(cache_info,nexus_info,exception) == MagickFalse)) return(MagickFalse); if ((status != MagickFalse) && (image->taint == MagickFalse)) image->taint=MagickTrue; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c A u t h e n t i c P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixelsCache() saves the authentic image pixels to the in-memory % or disk cache. The method returns MagickTrue if the pixel region is synced, % otherwise MagickFalse. % % The format of the SyncAuthenticPixelsCache() method is: % % MagickBooleanType SyncAuthenticPixelsCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType SyncAuthenticPixelsCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c A u t h e n t i c P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncAuthenticPixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncAuthenticPixels() method is: % % MagickBooleanType SyncAuthenticPixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncAuthenticPixels(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; const int id = GetOpenMPThreadId(); MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image->cache != (Cache) NULL); cache_info=(CacheInfo *) image->cache; assert(cache_info->signature == MagickCoreSignature); if (cache_info->methods.sync_authentic_pixels_handler != (SyncAuthenticPixelsHandler) NULL) { status=cache_info->methods.sync_authentic_pixels_handler(image, exception); return(status); } assert(id < (int) cache_info->number_threads); status=SyncAuthenticPixelCacheNexus(image,cache_info->nexus_info[id], exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e P i x e l C a c h e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixelCache() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is flushed, otherwise % MagickFalse. % % The format of the SyncImagePixelCache() method is: % % MagickBooleanType SyncImagePixelCache(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType SyncImagePixelCache(Image *image, ExceptionInfo *exception) { CacheInfo *magick_restrict cache_info; assert(image != (Image *) NULL); assert(exception != (ExceptionInfo *) NULL); cache_info=(CacheInfo *) GetImagePixelCache(image,MagickTrue,exception); return(cache_info == (CacheInfo *) NULL ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e P i x e l C a c h e M e t a c o n t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCacheMetacontent() writes the meta-content to the specified region % of the pixel cache. % % The format of the WritePixelCacheMetacontent() method is: % % MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the meta-content. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCacheMetacontent(CacheInfo *cache_info, NexusInfo *magick_restrict nexus_info,ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const unsigned char *magick_restrict p; register ssize_t y; size_t rows; if (cache_info->metacontent_extent == 0) return(MagickFalse); if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) nexus_info->region.width* cache_info->metacontent_extent; extent=(MagickSizeType) length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=(unsigned char *) nexus_info->metacontent; switch (cache_info->type) { case MemoryCache: case MapCache: { register unsigned char *magick_restrict q; /* Write associated pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=(unsigned char *) cache_info->metacontent+offset* cache_info->metacontent_extent; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=nexus_info->region.width*cache_info->metacontent_extent; q+=cache_info->columns*cache_info->metacontent_extent; } break; } case DiskCache: { /* Write associated pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } extent=(MagickSizeType) cache_info->columns*cache_info->rows; for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+extent* cache_info->number_channels*sizeof(Quantum)+offset* cache_info->metacontent_extent,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write metacontent to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCacheMetacontent((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->metacontent_extent*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + W r i t e C a c h e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePixelCachePixels() writes image pixels to the specified region of the % pixel cache. % % The format of the WritePixelCachePixels() method is: % % MagickBooleanType WritePixelCachePixels(CacheInfo *cache_info, % NexusInfo *nexus_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_info: the pixel cache. % % o nexus_info: the cache nexus to write the pixels. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType WritePixelCachePixels( CacheInfo *magick_restrict cache_info,NexusInfo *magick_restrict nexus_info, ExceptionInfo *exception) { MagickOffsetType count, offset; MagickSizeType extent, length; register const Quantum *magick_restrict p; register ssize_t y; size_t rows; if (nexus_info->authentic_pixel_cache != MagickFalse) return(MagickTrue); offset=(MagickOffsetType) nexus_info->region.y*cache_info->columns+ nexus_info->region.x; length=(MagickSizeType) cache_info->number_channels*nexus_info->region.width* sizeof(Quantum); extent=length*nexus_info->region.height; rows=nexus_info->region.height; y=0; p=nexus_info->pixels; switch (cache_info->type) { case MemoryCache: case MapCache: { register Quantum *magick_restrict q; /* Write pixels to memory. */ if ((cache_info->columns == nexus_info->region.width) && (extent == (MagickSizeType) ((size_t) extent))) { length=extent; rows=1UL; } q=cache_info->pixels+cache_info->number_channels*offset; for (y=0; y < (ssize_t) rows; y++) { (void) memcpy(q,p,(size_t) length); p+=cache_info->number_channels*nexus_info->region.width; q+=cache_info->number_channels*cache_info->columns; } break; } case DiskCache: { /* Write pixels to disk. */ LockSemaphoreInfo(cache_info->file_semaphore); if (OpenPixelCacheOnDisk(cache_info,IOMode) == MagickFalse) { ThrowFileException(exception,FileOpenError,"UnableToOpenFile", cache_info->cache_filename); UnlockSemaphoreInfo(cache_info->file_semaphore); return(MagickFalse); } if ((cache_info->columns == nexus_info->region.width) && (extent <= MagickMaxBufferExtent)) { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WritePixelCacheRegion(cache_info,cache_info->offset+offset* cache_info->number_channels*sizeof(*p),length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; offset+=cache_info->columns; } if (IsFileDescriptorLimitExceeded() != MagickFalse) (void) ClosePixelCacheOnDisk(cache_info); UnlockSemaphoreInfo(cache_info->file_semaphore); break; } case DistributedCache: { RectangleInfo region; /* Write pixels to distributed cache. */ LockSemaphoreInfo(cache_info->file_semaphore); region=nexus_info->region; if ((cache_info->columns != nexus_info->region.width) || (extent > MagickMaxBufferExtent)) region.height=1UL; else { length=extent; rows=1UL; } for (y=0; y < (ssize_t) rows; y++) { count=WriteDistributePixelCachePixels((DistributeCacheInfo *) cache_info->server_info,&region,length,(const unsigned char *) p); if (count != (MagickOffsetType) length) break; p+=cache_info->number_channels*nexus_info->region.width; region.y++; } UnlockSemaphoreInfo(cache_info->file_semaphore); break; } default: break; } if (y < (ssize_t) rows) { ThrowFileException(exception,CacheError,"UnableToWritePixelCache", cache_info->cache_filename); return(MagickFalse); } if ((cache_info->debug != MagickFalse) && (CacheTick(nexus_info->region.y,cache_info->rows) != MagickFalse)) (void) LogMagickEvent(CacheEvent,GetMagickModule(), "%s[%.20gx%.20g%+.20g%+.20g]",cache_info->filename,(double) nexus_info->region.width,(double) nexus_info->region.height,(double) nexus_info->region.x,(double) nexus_info->region.y); return(MagickTrue); }
reduction.c
/* 1. A local copy of reduction variable is made and initialized depending on the op(e.g. 0 for +). 2. Compiler finds standard reduction expressions containing op and uses them to update the local copy. 3. Local copies are reduced into a single value and combined with the original global value. */ #include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif #include "hif.h" int main() { int i; int total=100; long sumx; long *suma = &sumx; pers_attach(); #pragma omp target { long sum=0; long sum2 = 0; #pragma omp parallel for reduction(+:sum) for (i=0; i<= total; i++){ sum = sum + i; // printf("i is %d and sum is %ld\n", i, sum); } printf("after parallel region, sum is %ld\n", sum); long sum0; #pragma omp parallel private(sum0) { sum0=0; #pragma omp for private(i) for (i=0; i<= total; i++) sum0=sum0+i; #pragma omp critical sum2 = sum2 + sum0; } printf("sum of 1 to %d = %ld and %ld\n",total, sum, sum2); *suma = (sum + sum2) / 2; } printf("sumx is %ld %s\n", sumx, sumx == 5050 ? "PASSED" : "FAILED"); return (int)sumx; }
resample.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % RRRR EEEEE SSSSS AAA M M PPPP L EEEEE % % R R E SS A A MM MM P P L E % % RRRR EEE SSS AAAAA M M M PPPP L EEE % % R R E SS A A M M P L E % % R R EEEEE SSSSS A A M M P LLLLL EEEEE % % % % % % MagickCore Pixel Resampling Methods % % % % Software Design % % John Cristy % % Anthony Thyssen % % August 2007 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/artifact.h" #include "magick/color-private.h" #include "magick/cache.h" #include "magick/draw.h" #include "magick/exception-private.h" #include "magick/gem.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/log.h" #include "magick/magick.h" #include "magick/memory_.h" #include "magick/pixel.h" #include "magick/pixel-private.h" #include "magick/quantum.h" #include "magick/random_.h" #include "magick/resample.h" #include "magick/resize.h" #include "magick/resize-private.h" #include "magick/transform.h" #include "magick/signature-private.h" #include "magick/utility.h" /* EWA Resampling Options */ /* select ONE resampling method */ #define EWA 1 /* Normal EWA handling - raw or clamped */ /* if 0 then use "High Quality EWA" */ #define EWA_CLAMP 1 /* EWA Clamping from Nicolas Robidoux */ #define FILTER_LUT 1 /* Use a LUT rather then direct filter calls */ /* output debugging information */ #define DEBUG_ELLIPSE 0 /* output ellipse info for debug */ #define DEBUG_HIT_MISS 0 /* output hit/miss pixels (as gnuplot commands) */ #define DEBUG_NO_PIXEL_HIT 0 /* Make pixels that fail to hit anything - RED */ #if ! FILTER_DIRECT #define WLUT_WIDTH 1024 /* size of the filter cache */ #endif /* Typedef declarations. */ struct _ResampleFilter { CacheView *view; Image *image; ExceptionInfo *exception; MagickBooleanType debug; /* Information about image being resampled */ ssize_t image_area; InterpolatePixelMethod interpolate; VirtualPixelMethod virtual_pixel; FilterTypes filter; /* processing settings needed */ MagickBooleanType limit_reached, do_interpolate, average_defined; MagickPixelPacket average_pixel; /* current ellipitical area being resampled around center point */ double A, B, C, Vlimit, Ulimit, Uwidth, slope; #if FILTER_LUT /* LUT of weights for filtered average in elliptical area */ double filter_lut[WLUT_WIDTH]; #else /* Use a Direct call to the filter functions */ ResizeFilter *filter_def; double F; #endif /* the practical working support of the filter */ double support; size_t signature; }; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireResampleFilter() initializes the information resample needs do to a % scaled lookup of a color from an image, using area sampling. % % The algorithm is based on a Elliptical Weighted Average, where the pixels % found in a large elliptical area is averaged together according to a % weighting (filter) function. For more details see "Fundamentals of Texture % Mapping and Image Warping" a master's thesis by Paul.S.Heckbert, June 17, % 1989. Available for free from, http://www.cs.cmu.edu/~ph/ % % As EWA resampling (or any sort of resampling) can require a lot of % calculations to produce a distorted scaling of the source image for each % output pixel, the ResampleFilter structure generated holds that information % between individual image resampling. % % This function will make the appropriate AcquireCacheView() calls % to view the image, calling functions do not need to open a cache view. % % Usage Example... % resample_filter=AcquireResampleFilter(image,exception); % SetResampleFilter(resample_filter, GaussianFilter, 1.0); % for (y=0; y < (ssize_t) image->rows; y++) { % for (x=0; x < (ssize_t) image->columns; x++) { % u= ....; v= ....; % ScaleResampleFilter(resample_filter, ... scaling vectors ...); % (void) ResamplePixelColor(resample_filter,u,v,&pixel); % ... assign resampled pixel value ... % } % } % DestroyResampleFilter(resample_filter); % % The format of the AcquireResampleFilter method is: % % ResampleFilter *AcquireResampleFilter(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ResampleFilter *AcquireResampleFilter(const Image *image, ExceptionInfo *exception) { register ResampleFilter *resample_filter; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickSignature); resample_filter=(ResampleFilter *) AcquireMagickMemory( sizeof(*resample_filter)); if (resample_filter == (ResampleFilter *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(resample_filter,0,sizeof(*resample_filter)); resample_filter->exception=exception; resample_filter->image=ReferenceImage((Image *) image); resample_filter->view=AcquireCacheView(resample_filter->image); resample_filter->debug=IsEventLogging(); resample_filter->signature=MagickSignature; resample_filter->image_area=(ssize_t) (image->columns*image->rows); resample_filter->average_defined = MagickFalse; /* initialise the resampling filter settings */ SetResampleFilter(resample_filter, image->filter, image->blur); (void) SetResampleFilterInterpolateMethod(resample_filter, image->interpolate); (void) SetResampleFilterVirtualPixelMethod(resample_filter, GetImageVirtualPixelMethod(image)); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y R e s a m p l e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyResampleFilter() finalizes and cleans up the resampling % resample_filter as returned by AcquireResampleFilter(), freeing any memory % or other information as needed. % % The format of the DestroyResampleFilter method is: % % ResampleFilter *DestroyResampleFilter(ResampleFilter *resample_filter) % % A description of each parameter follows: % % o resample_filter: resampling information structure % */ MagickExport ResampleFilter *DestroyResampleFilter( ResampleFilter *resample_filter) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->view=DestroyCacheView(resample_filter->view); resample_filter->image=DestroyImage(resample_filter->image); #if ! FILTER_LUT resample_filter->filter_def=DestroyResizeFilter(resample_filter->filter_def); #endif resample_filter->signature=(~MagickSignature); resample_filter=(ResampleFilter *) RelinquishMagickMemory(resample_filter); return(resample_filter); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s a m p l e P i x e l C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResamplePixelColor() samples the pixel values surrounding the location % given using an elliptical weighted average, at the scale previously % calculated, and in the most efficent manner possible for the % VirtualPixelMethod setting. % % The format of the ResamplePixelColor method is: % % MagickBooleanType ResamplePixelColor(ResampleFilter *resample_filter, % const double u0,const double v0,MagickPixelPacket *pixel) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o u0,v0: A double representing the center of the area to resample, % The distortion transformed transformed x,y coordinate. % % o pixel: the resampled pixel is returned here. % */ MagickExport MagickBooleanType ResamplePixelColor( ResampleFilter *resample_filter,const double u0,const double v0, MagickPixelPacket *pixel) { MagickBooleanType status; ssize_t u,v, v1, v2, uw, hit; double u1; double U,V,Q,DQ,DDQ; double divisor_c,divisor_m; register double weight; register const PixelPacket *pixels; register const IndexPacket *indexes; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); status=MagickTrue; /* GetMagickPixelPacket(resample_filter->image,pixel); */ if ( resample_filter->do_interpolate ) { status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,resample_filter->interpolate,u0,v0,pixel, resample_filter->exception); return(status); } #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "u0=%lf; v0=%lf;\n", u0, v0); #endif /* Does resample area Miss the image? And is that area a simple solid color - then return that color */ hit = 0; switch ( resample_filter->virtual_pixel ) { case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case TransparentVirtualPixelMethod: case BlackVirtualPixelMethod: case GrayVirtualPixelMethod: case WhiteVirtualPixelMethod: case MaskVirtualPixelMethod: if ( resample_filter->limit_reached || u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns || v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows ) hit++; break; case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < 0.0 && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 + resample_filter->Ulimit < 0.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns && v0 + resample_filter->Vlimit < 0.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows ) ) hit++; break; case HorizontalTileVirtualPixelMethod: if ( v0 + resample_filter->Vlimit < 0.0 || v0 - resample_filter->Vlimit > (double) resample_filter->image->rows ) hit++; /* outside the horizontally tiled images. */ break; case VerticalTileVirtualPixelMethod: if ( u0 + resample_filter->Ulimit < 0.0 || u0 - resample_filter->Ulimit > (double) resample_filter->image->columns ) hit++; /* outside the vertically tiled images. */ break; case DitherVirtualPixelMethod: if ( ( u0 + resample_filter->Ulimit < -32.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 + resample_filter->Ulimit < -32.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+32.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+32.0 && v0 + resample_filter->Vlimit < -32.0 ) || ( u0 - resample_filter->Ulimit > (double) resample_filter->image->columns+32.0 && v0 - resample_filter->Vlimit > (double) resample_filter->image->rows+32.0 ) ) hit++; break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: case CheckerTileVirtualPixelMethod: /* resampling of area is always needed - no VP limits */ break; } if ( hit ) { /* whole area is a solid color -- just return that color */ status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,IntegerInterpolatePixel,u0,v0,pixel, resample_filter->exception); return(status); } /* Scaling limits reached, return an 'averaged' result. */ if ( resample_filter->limit_reached ) { switch ( resample_filter->virtual_pixel ) { /* This is always handled by the above, so no need. case BackgroundVirtualPixelMethod: case ConstantVirtualPixelMethod: case TransparentVirtualPixelMethod: case GrayVirtualPixelMethod, case WhiteVirtualPixelMethod case MaskVirtualPixelMethod: */ case UndefinedVirtualPixelMethod: case EdgeVirtualPixelMethod: case DitherVirtualPixelMethod: case HorizontalTileEdgeVirtualPixelMethod: case VerticalTileEdgeVirtualPixelMethod: /* We need an average edge pixel, from the correct edge! How should I calculate an average edge color? Just returning an averaged neighbourhood, works well in general, but falls down for TileEdge methods. This needs to be done properly!!!!!! */ status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,AverageInterpolatePixel,u0,v0,pixel, resample_filter->exception); break; case HorizontalTileVirtualPixelMethod: case VerticalTileVirtualPixelMethod: /* just return the background pixel - Is there more direct way? */ status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,IntegerInterpolatePixel,-1.0,-1.0,pixel, resample_filter->exception); break; case TileVirtualPixelMethod: case MirrorVirtualPixelMethod: case RandomVirtualPixelMethod: case CheckerTileVirtualPixelMethod: default: /* generate a average color of the WHOLE image */ if ( resample_filter->average_defined == MagickFalse ) { Image *average_image; CacheView *average_view; GetMagickPixelPacket(resample_filter->image,(MagickPixelPacket *) &resample_filter->average_pixel); resample_filter->average_defined=MagickTrue; /* Try to get an averaged pixel color of whole image */ average_image=ResizeImage(resample_filter->image,1,1,BoxFilter,1.0, resample_filter->exception); if (average_image == (Image *) NULL) { *pixel=resample_filter->average_pixel; /* FAILED */ break; } average_view=AcquireCacheView(average_image); pixels=(PixelPacket *)GetCacheViewVirtualPixels(average_view,0,0,1,1, resample_filter->exception); if (pixels == (const PixelPacket *) NULL) { average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); *pixel=resample_filter->average_pixel; /* FAILED */ break; } indexes=(IndexPacket *) GetCacheViewAuthenticIndexQueue(average_view); SetMagickPixelPacket(resample_filter->image,pixels,indexes, &(resample_filter->average_pixel)); average_view=DestroyCacheView(average_view); average_image=DestroyImage(average_image); if ( resample_filter->virtual_pixel == CheckerTileVirtualPixelMethod ) { /* CheckerTile is avergae of image average half background */ /* FUTURE: replace with a 50% blend of both pixels */ weight = QuantumScale*((MagickRealType)(QuantumRange- resample_filter->average_pixel.opacity)); resample_filter->average_pixel.red *= weight; resample_filter->average_pixel.green *= weight; resample_filter->average_pixel.blue *= weight; divisor_c = weight; weight = QuantumScale*((MagickRealType)(QuantumRange- resample_filter->image->background_color.opacity)); resample_filter->average_pixel.red += weight*resample_filter->image->background_color.red; resample_filter->average_pixel.green += weight*resample_filter->image->background_color.green; resample_filter->average_pixel.blue += weight*resample_filter->image->background_color.blue; resample_filter->average_pixel.opacity += resample_filter->image->background_color.opacity; divisor_c += weight; resample_filter->average_pixel.red /= divisor_c; resample_filter->average_pixel.green /= divisor_c; resample_filter->average_pixel.blue /= divisor_c; resample_filter->average_pixel.opacity /= 2; } } *pixel=resample_filter->average_pixel; break; } return(status); } /* Initialize weighted average data collection */ hit = 0; divisor_c = 0.0; divisor_m = 0.0; pixel->red = pixel->green = pixel->blue = 0.0; if (pixel->matte != MagickFalse) pixel->opacity = 0.0; if (pixel->colorspace == CMYKColorspace) pixel->index = 0.0; /* Determine the parellelogram bounding box fitted to the ellipse centered at u0,v0. This area is bounding by the lines... */ v1 = (ssize_t)ceil(v0 - resample_filter->Vlimit); /* range of scan lines */ v2 = (ssize_t)floor(v0 + resample_filter->Vlimit); /* scan line start and width accross the parallelogram */ u1 = u0 + (v1-v0)*resample_filter->slope - resample_filter->Uwidth; uw = (ssize_t)(2.0*resample_filter->Uwidth)+1; #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "v1=%ld; v2=%ld\n", (long)v1, (long)v2); (void) FormatLocaleFile(stderr, "u1=%ld; uw=%ld\n", (long)u1, (long)uw); #else # define DEBUG_HIT_MISS 0 /* only valid if DEBUG_ELLIPSE is enabled */ #endif /* Do weighted resampling of all pixels, within the scaled ellipse, bound by a Parellelogram fitted to the ellipse. */ DDQ = 2*resample_filter->A; for( v=v1; v<=v2; v++ ) { #if DEBUG_HIT_MISS long uu = ceil(u1); /* actual pixel location (for debug only) */ (void) FormatLocaleFile(stderr, "# scan line from pixel %ld, %ld\n", (long)uu, (long)v); #endif u = (ssize_t)ceil(u1); /* first pixel in scanline */ u1 += resample_filter->slope; /* start of next scan line */ /* location of this first pixel, relative to u0,v0 */ U = (double)u-u0; V = (double)v-v0; /* Q = ellipse quotent ( if Q<F then pixel is inside ellipse) */ Q = (resample_filter->A*U + resample_filter->B*V)*U + resample_filter->C*V*V; DQ = resample_filter->A*(2.0*U+1) + resample_filter->B*V; /* get the scanline of pixels for this v */ pixels=GetCacheViewVirtualPixels(resample_filter->view,u,v,(size_t) uw, 1,resample_filter->exception); if (pixels == (const PixelPacket *) NULL) return(MagickFalse); indexes=GetCacheViewVirtualIndexQueue(resample_filter->view); /* count up the weighted pixel colors */ for( u=0; u<uw; u++ ) { #if FILTER_LUT /* Note that the ellipse has been pre-scaled so F = WLUT_WIDTH */ if ( Q < (double)WLUT_WIDTH ) { weight = resample_filter->filter_lut[(int)Q]; #else /* Note that the ellipse has been pre-scaled so F = support^2 */ if ( Q < (double)resample_filter->F ) { weight = GetResizeFilterWeight(resample_filter->filter_def, sqrt(Q)); /* a SquareRoot! Arrggghhhhh... */ #endif pixel->opacity += weight*pixels->opacity; divisor_m += weight; if (pixel->matte != MagickFalse) weight *= QuantumScale*((MagickRealType)(QuantumRange-pixels->opacity)); pixel->red += weight*pixels->red; pixel->green += weight*pixels->green; pixel->blue += weight*pixels->blue; if (pixel->colorspace == CMYKColorspace) pixel->index += weight*(*indexes); divisor_c += weight; hit++; #if DEBUG_HIT_MISS /* mark the pixel according to hit/miss of the ellipse */ (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 3\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } else { (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu-.1,(double)v-.1,(long)uu+.1,(long)v+.1); (void) FormatLocaleFile(stderr, "set arrow from %lf,%lf to %lf,%lf nohead ls 1\n", (long)uu+.1,(double)v-.1,(long)uu-.1,(long)v+.1); } uu++; #else } #endif pixels++; indexes++; Q += DQ; DQ += DDQ; } } #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "Hit=%ld; Total=%ld;\n", (long)hit, (long)uw*(v2-v1) ); #endif /* Result sanity check -- this should NOT happen */ if ( hit == 0 ) { /* not enough pixels in resampling, resort to direct interpolation */ #if DEBUG_NO_PIXEL_HIT pixel->opacity = pixel->red = pixel->green = pixel->blue = 0; pixel->red = QuantumRange; /* show pixels for which EWA fails */ #else status=InterpolateMagickPixelPacket(resample_filter->image, resample_filter->view,resample_filter->interpolate,u0,v0,pixel, resample_filter->exception); #endif return status; } /* Finialize results of resampling */ divisor_m = 1.0/divisor_m; pixel->opacity = (MagickRealType) ClampToQuantum(divisor_m*pixel->opacity); divisor_c = 1.0/divisor_c; pixel->red = (MagickRealType) ClampToQuantum(divisor_c*pixel->red); pixel->green = (MagickRealType) ClampToQuantum(divisor_c*pixel->green); pixel->blue = (MagickRealType) ClampToQuantum(divisor_c*pixel->blue); if (pixel->colorspace == CMYKColorspace) pixel->index = (MagickRealType) ClampToQuantum(divisor_c*pixel->index); return(MagickTrue); } #if EWA && EWA_CLAMP /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % - C l a m p U p A x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampUpAxes() function converts the input vectors into a major and % minor axis unit vectors, and their magnitude. This allows us to % ensure that the ellipse generated is never smaller than the unit % circle and thus never too small for use in EWA resampling. % % This purely mathematical 'magic' was provided by Professor Nicolas % Robidoux and his Masters student Chantal Racette. % % Reference: "We Recommend Singular Value Decomposition", David Austin % http://www.ams.org/samplings/feature-column/fcarc-svd % % By generating major and minor axis vectors, we can actually use the % ellipse in its "canonical form", by remapping the dx,dy of the % sampled point into distances along the major and minor axis unit % vectors. % % Reference: http://en.wikipedia.org/wiki/Ellipse#Canonical_form */ static inline void ClampUpAxes(const double dux, const double dvx, const double duy, const double dvy, double *major_mag, double *minor_mag, double *major_unit_x, double *major_unit_y, double *minor_unit_x, double *minor_unit_y) { /* * ClampUpAxes takes an input 2x2 matrix * * [ a b ] = [ dux duy ] * [ c d ] = [ dvx dvy ] * * and computes from it the major and minor axis vectors [major_x, * major_y] and [minor_x,minor_y] of the smallest ellipse containing * both the unit disk and the ellipse which is the image of the unit * disk by the linear transformation * * [ dux duy ] [S] = [s] * [ dvx dvy ] [T] = [t] * * (The vector [S,T] is the difference between a position in output * space and [X,Y]; the vector [s,t] is the difference between a * position in input space and [x,y].) */ /* * Output: * * major_mag is the half-length of the major axis of the "new" * ellipse. * * minor_mag is the half-length of the minor axis of the "new" * ellipse. * * major_unit_x is the x-coordinate of the major axis direction vector * of both the "old" and "new" ellipses. * * major_unit_y is the y-coordinate of the major axis direction vector. * * minor_unit_x is the x-coordinate of the minor axis direction vector. * * minor_unit_y is the y-coordinate of the minor axis direction vector. * * Unit vectors are useful for computing projections, in particular, * to compute the distance between a point in output space and the * center of a unit disk in output space, using the position of the * corresponding point [s,t] in input space. Following the clamping, * the square of this distance is * * ( ( s * major_unit_x + t * major_unit_y ) / major_mag )^2 * + * ( ( s * minor_unit_x + t * minor_unit_y ) / minor_mag )^2 * * If such distances will be computed for many [s,t]'s, it makes * sense to actually compute the reciprocal of major_mag and * minor_mag and multiply them by the above unit lengths. * * Now, if you want to modify the input pair of tangent vectors so * that it defines the modified ellipse, all you have to do is set * * newdux = major_mag * major_unit_x * newdvx = major_mag * major_unit_y * newduy = minor_mag * minor_unit_x = minor_mag * -major_unit_y * newdvy = minor_mag * minor_unit_y = minor_mag * major_unit_x * * and use these tangent vectors as if they were the original ones. * Usually, this is a drastic change in the tangent vectors even if * the singular values are not clamped; for example, the minor axis * vector always points in a direction which is 90 degrees * counterclockwise from the direction of the major axis vector. */ /* * Discussion: * * GOAL: Fix things so that the pullback, in input space, of a disk * of radius r in output space is an ellipse which contains, at * least, a disc of radius r. (Make this hold for any r>0.) * * ESSENCE OF THE METHOD: Compute the product of the first two * factors of an SVD of the linear transformation defining the * ellipse and make sure that both its columns have norm at least 1. * Because rotations and reflexions map disks to themselves, it is * not necessary to compute the third (rightmost) factor of the SVD. * * DETAILS: Find the singular values and (unit) left singular * vectors of Jinv, clampling up the singular values to 1, and * multiply the unit left singular vectors by the new singular * values in order to get the minor and major ellipse axis vectors. * * Image resampling context: * * The Jacobian matrix of the transformation at the output point * under consideration is defined as follows: * * Consider the transformation (x,y) -> (X,Y) from input locations * to output locations. (Anthony Thyssen, elsewhere in resample.c, * uses the notation (u,v) -> (x,y).) * * The Jacobian matrix of the transformation at (x,y) is equal to * * J = [ A, B ] = [ dX/dx, dX/dy ] * [ C, D ] [ dY/dx, dY/dy ] * * that is, the vector [A,C] is the tangent vector corresponding to * input changes in the horizontal direction, and the vector [B,D] * is the tangent vector corresponding to input changes in the * vertical direction. * * In the context of resampling, it is natural to use the inverse * Jacobian matrix Jinv because resampling is generally performed by * pulling pixel locations in the output image back to locations in * the input image. Jinv is * * Jinv = [ a, b ] = [ dx/dX, dx/dY ] * [ c, d ] [ dy/dX, dy/dY ] * * Note: Jinv can be computed from J with the following matrix * formula: * * Jinv = 1/(A*D-B*C) [ D, -B ] * [ -C, A ] * * What we do is modify Jinv so that it generates an ellipse which * is as close as possible to the original but which contains the * unit disk. This can be accomplished as follows: * * Let * * Jinv = U Sigma V^T * * be an SVD decomposition of Jinv. (The SVD is not unique, but the * final ellipse does not depend on the particular SVD.) * * We could clamp up the entries of the diagonal matrix Sigma so * that they are at least 1, and then set * * Jinv = U newSigma V^T. * * However, we do not need to compute V for the following reason: * V^T is an orthogonal matrix (that is, it represents a combination * of rotations and reflexions) so that it maps the unit circle to * itself. For this reason, the exact value of V does not affect the * final ellipse, and we can choose V to be the identity * matrix. This gives * * Jinv = U newSigma. * * In the end, we return the two diagonal entries of newSigma * together with the two columns of U. */ /* * ClampUpAxes was written by Nicolas Robidoux and Chantal Racette * of Laurentian University with insightful suggestions from Anthony * Thyssen and funding from the National Science and Engineering * Research Council of Canada. It is distinguished from its * predecessors by its efficient handling of degenerate cases. * * The idea of clamping up the EWA ellipse's major and minor axes so * that the result contains the reconstruction kernel filter support * is taken from Andreas Gustaffson's Masters thesis "Interactive * Image Warping", Helsinki University of Technology, Faculty of * Information Technology, 59 pages, 1993 (see Section 3.6). * * The use of the SVD to clamp up the singular values of the * Jacobian matrix of the pullback transformation for EWA resampling * is taken from the astrophysicist Craig DeForest. It is * implemented in his PDL::Transform code (PDL = Perl Data * Language). */ const double a = dux; const double b = duy; const double c = dvx; const double d = dvy; /* * n is the matrix Jinv * transpose(Jinv). Eigenvalues of n are the * squares of the singular values of Jinv. */ const double aa = a*a; const double bb = b*b; const double cc = c*c; const double dd = d*d; /* * Eigenvectors of n are left singular vectors of Jinv. */ const double n11 = aa+bb; const double n12 = a*c+b*d; const double n21 = n12; const double n22 = cc+dd; const double det = a*d-b*c; const double twice_det = det+det; const double frobenius_squared = n11+n22; const double discriminant = (frobenius_squared+twice_det)*(frobenius_squared-twice_det); const double sqrt_discriminant = sqrt(discriminant); /* * s1 is the largest singular value of the inverse Jacobian * matrix. In other words, its reciprocal is the smallest singular * value of the Jacobian matrix itself. * If s1 = 0, both singular values are 0, and any orthogonal pair of * left and right factors produces a singular decomposition of Jinv. */ /* * Initially, we only compute the squares of the singular values. */ const double s1s1 = 0.5*(frobenius_squared+sqrt_discriminant); /* * s2 the smallest singular value of the inverse Jacobian * matrix. Its reciprocal is the largest singular value of the * Jacobian matrix itself. */ const double s2s2 = 0.5*(frobenius_squared-sqrt_discriminant); const double s1s1minusn11 = s1s1-n11; const double s1s1minusn22 = s1s1-n22; /* * u1, the first column of the U factor of a singular decomposition * of Jinv, is a (non-normalized) left singular vector corresponding * to s1. It has entries u11 and u21. We compute u1 from the fact * that it is an eigenvector of n corresponding to the eigenvalue * s1^2. */ const double s1s1minusn11_squared = s1s1minusn11*s1s1minusn11; const double s1s1minusn22_squared = s1s1minusn22*s1s1minusn22; /* * The following selects the largest row of n-s1^2 I as the one * which is used to find the eigenvector. If both s1^2-n11 and * s1^2-n22 are zero, n-s1^2 I is the zero matrix. In that case, * any vector is an eigenvector; in addition, norm below is equal to * zero, and, in exact arithmetic, this is the only case in which * norm = 0. So, setting u1 to the simple but arbitrary vector [1,0] * if norm = 0 safely takes care of all cases. */ const double temp_u11 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? n12 : s1s1minusn22 ); const double temp_u21 = ( (s1s1minusn11_squared>=s1s1minusn22_squared) ? s1s1minusn11 : n21 ); const double norm = sqrt(temp_u11*temp_u11+temp_u21*temp_u21); /* * Finalize the entries of first left singular vector (associated * with the largest singular value). */ const double u11 = ( (norm>0.0) ? temp_u11/norm : 1.0 ); const double u21 = ( (norm>0.0) ? temp_u21/norm : 0.0 ); /* * Clamp the singular values up to 1. */ *major_mag = ( (s1s1<=1.0) ? 1.0 : sqrt(s1s1) ); *minor_mag = ( (s2s2<=1.0) ? 1.0 : sqrt(s2s2) ); /* * Return the unit major and minor axis direction vectors. */ *major_unit_x = u11; *major_unit_y = u21; *minor_unit_x = -u21; *minor_unit_y = u11; } #endif /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S c a l e R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleResampleFilter() does all the calculations needed to resample an image % at a specific scale, defined by two scaling vectors. This not using % a orthogonal scaling, but two distorted scaling vectors, to allow the % generation of a angled ellipse. % % As only two deritive scaling vectors are used the center of the ellipse % must be the center of the lookup. That is any curvature that the % distortion may produce is discounted. % % The input vectors are produced by either finding the derivitives of the % distortion function, or the partial derivitives from a distortion mapping. % They do not need to be the orthogonal dx,dy scaling vectors, but can be % calculated from other derivatives. For example you could use dr,da/r % polar coordinate vector scaling vectors % % If u,v = DistortEquation(x,y) OR u = Fu(x,y); v = Fv(x,y) % Then the scaling vectors are determined from the deritives... % du/dx, dv/dx and du/dy, dv/dy % If the resulting scaling vectors is othogonally aligned then... % dv/dx = 0 and du/dy = 0 % Producing an othogonally alligned ellipse in source space for the area to % be resampled. % % Note that scaling vectors are different to argument order. Argument order % is the general order the deritives are extracted from the distortion % equations, and not the scaling vectors. As such the middle two vaules % may be swapped from what you expect. Caution is advised. % % WARNING: It is assumed that any SetResampleFilter() method call will % always be performed before the ScaleResampleFilter() method, so that the % size of the ellipse will match the support for the resampling filter being % used. % % The format of the ScaleResampleFilter method is: % % void ScaleResampleFilter(const ResampleFilter *resample_filter, % const double dux,const double duy,const double dvx,const double dvy) % % A description of each parameter follows: % % o resample_filter: the resampling resample_filterrmation defining the % image being resampled % % o dux,duy,dvx,dvy: % The deritives or scaling vectors defining the EWA ellipse. % NOTE: watch the order, which is based on the order deritives % are usally determined from distortion equations (see above). % The middle two values may need to be swapped if you are thinking % in terms of scaling vectors. % */ MagickExport void ScaleResampleFilter(ResampleFilter *resample_filter, const double dux,const double duy,const double dvx,const double dvy) { double A,B,C,F; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); resample_filter->limit_reached = MagickFalse; /* A 'point' filter forces use of interpolation instead of area sampling */ if ( resample_filter->filter == PointFilter ) return; /* EWA turned off - nothing to do */ #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "# -----\n" ); (void) FormatLocaleFile(stderr, "dux=%lf; dvx=%lf; duy=%lf; dvy=%lf;\n", dux, dvx, duy, dvy); #endif /* Find Ellipse Coefficents such that A*u^2 + B*u*v + C*v^2 = F With u,v relative to point around which we are resampling. And the given scaling dx,dy vectors in u,v space du/dx,dv/dx and du/dy,dv/dy */ #if EWA /* Direct conversion of derivatives into elliptical coefficients However when magnifying images, the scaling vectors will be small resulting in a ellipse that is too small to sample properly. As such we need to clamp the major/minor axis to a minumum of 1.0 to prevent it getting too small. */ #if EWA_CLAMP { double major_mag, minor_mag, major_x, major_y, minor_x, minor_y; ClampUpAxes(dux,dvx,duy,dvy, &major_mag, &minor_mag, &major_x, &major_y, &minor_x, &minor_y); major_x *= major_mag; major_y *= major_mag; minor_x *= minor_mag; minor_y *= minor_mag; #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "major_x=%lf; major_y=%lf; minor_x=%lf; minor_y=%lf;\n", major_x, major_y, minor_x, minor_y); #endif A = major_y*major_y+minor_y*minor_y; B = -2.0*(major_x*major_y+minor_x*minor_y); C = major_x*major_x+minor_x*minor_x; F = major_mag*minor_mag; F *= F; /* square it */ } #else /* raw unclamped EWA */ A = dvx*dvx+dvy*dvy; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy; F = dux*dvy-duy*dvx; F *= F; /* square it */ #endif /* EWA_CLAMP */ #else /* HQ_EWA */ /* This Paul Heckbert's "Higher Quality EWA" formula, from page 60 in his thesis, which adds a unit circle to the elliptical area so as to do both Reconstruction and Prefiltering of the pixels in the resampling. It also means it is always likely to have at least 4 pixels within the area of the ellipse, for weighted averaging. No scaling will result with F == 4.0 and a circle of radius 2.0, and F smaller than this means magnification is being used. NOTE: This method produces a very blury result at near unity scale while producing perfect results for strong minitification and magnifications. However filter support is fixed to 2.0 (no good for Windowed Sinc filters) */ A = dvx*dvx+dvy*dvy+1; B = -2.0*(dux*dvx+duy*dvy); C = dux*dux+duy*duy+1; F = A*C - B*B/4; #endif #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "A=%lf; B=%lf; C=%lf; F=%lf\n", A,B,C,F); /* Figure out the various information directly about the ellipse. This information currently not needed at this time, but may be needed later for better limit determination. It is also good to have as a record for future debugging */ { double alpha, beta, gamma, Major, Minor; double Eccentricity, Ellipse_Area, Ellipse_Angle; alpha = A+C; beta = A-C; gamma = sqrt(beta*beta + B*B ); if ( alpha - gamma <= MagickEpsilon ) Major = MagickHuge; else Major = sqrt(2*F/(alpha - gamma)); Minor = sqrt(2*F/(alpha + gamma)); (void) FormatLocaleFile(stderr, "# Major=%lf; Minor=%lf\n", Major, Minor ); /* other information about ellipse include... */ Eccentricity = Major/Minor; Ellipse_Area = MagickPI*Major*Minor; Ellipse_Angle = atan2(B, A-C); (void) FormatLocaleFile(stderr, "# Angle=%lf Area=%lf\n", RadiansToDegrees(Ellipse_Angle), Ellipse_Area); } #endif /* If one or both of the scaling vectors is impossibly large (producing a very large raw F value), we may as well not bother doing any form of resampling since resampled area is very large. In this case some alternative means of pixel sampling, such as the average of the whole image is needed to get a reasonable result. Calculate only as needed. */ if ( (4*A*C - B*B) > MagickHuge ) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse to match the filters support (that is, multiply F by the square of the support). */ F *= resample_filter->support; F *= resample_filter->support; /* Orthogonal bounds of the ellipse */ resample_filter->Ulimit = sqrt(C*F/(A*C-0.25*B*B)); resample_filter->Vlimit = sqrt(A*F/(A*C-0.25*B*B)); /* Horizontally aligned parallelogram fitted to Ellipse */ resample_filter->Uwidth = sqrt(F/A); /* Half of the parallelogram width */ resample_filter->slope = -B/(2.0*A); /* Reciprocal slope of the parallelogram */ #if DEBUG_ELLIPSE (void) FormatLocaleFile(stderr, "Ulimit=%lf; Vlimit=%lf; UWidth=%lf; Slope=%lf;\n", resample_filter->Ulimit, resample_filter->Vlimit, resample_filter->Uwidth, resample_filter->slope ); #endif /* Check the absolute area of the parallelogram involved. * This limit needs more work, as it is too slow for larger images * with tiled views of the horizon. */ if ( (resample_filter->Uwidth * resample_filter->Vlimit) > (4.0*resample_filter->image_area)) { resample_filter->limit_reached = MagickTrue; return; } /* Scale ellipse formula to directly index the Filter Lookup Table */ { register double scale; #if FILTER_LUT /* scale so that F = WLUT_WIDTH; -- hardcoded */ scale = (double)WLUT_WIDTH/F; #else /* scale so that F = resample_filter->F (support^2) */ scale = resample_filter->F/F; #endif resample_filter->A = A*scale; resample_filter->B = B*scale; resample_filter->C = C*scale; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilter() set the resampling filter lookup table based on a % specific filter. Note that the filter is used as a radial filter not as a % two pass othogonally aligned resampling filter. % % The default Filter, is Gaussian, which is the standard filter used by the % original paper on the Elliptical Weighted Everage Algorithm. However other % filters can also be used. % % The format of the SetResampleFilter method is: % % void SetResampleFilter(ResampleFilter *resample_filter, % const FilterTypes filter,const double blur) % % A description of each parameter follows: % % o resample_filter: resampling resample_filterrmation structure % % o filter: the resize filter for elliptical weighting LUT % % o blur: filter blur factor (radial scaling) for elliptical weighting LUT % */ MagickExport void SetResampleFilter(ResampleFilter *resample_filter, const FilterTypes filter,const double blur) { ResizeFilter *resize_filter; assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); resample_filter->do_interpolate = MagickFalse; resample_filter->filter = filter; if ( filter == PointFilter ) { resample_filter->do_interpolate = MagickTrue; return; /* EWA turned off - nothing more to do */ } /* Set a default cylindrical filter of a 'low blur' Jinc windowed Jinc */ if ( filter == UndefinedFilter ) resample_filter->filter = RobidouxFilter; resize_filter = AcquireResizeFilter(resample_filter->image, resample_filter->filter,blur,MagickTrue,resample_filter->exception); if (resize_filter == (ResizeFilter *) NULL) { (void) ThrowMagickException(resample_filter->exception,GetMagickModule(), ModuleError, "UnableToSetFilteringValue", "Fall back to default EWA gaussian filter"); resample_filter->filter = PointFilter; } /* Get the practical working support for the filter, * after any API call blur factors have been accoded for. */ #if EWA resample_filter->support = GetResizeFilterSupport(resize_filter); #else resample_filter->support = 2.0; /* fixed support size for HQ-EWA */ #endif #if FILTER_LUT /* Fill the LUT with the weights from the selected filter function */ { register int Q; double r_scale; /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = (double) GetResizeFilterWeight(resize_filter,sqrt((double)Q)*r_scale); /* finished with the resize filter */ resize_filter = DestroyResizeFilter(resize_filter); } #else /* save the filter and the scaled ellipse bounds needed for filter */ resample_filter->filter_def = resize_filter; resample_filter->F = resample_filter->support*resample_filter->support; #endif /* Adjust the scaling of the default unit circle This assumes that any real scaling changes will always take place AFTER the filter method has been initialized. */ ScaleResampleFilter(resample_filter, 1.0, 0.0, 0.0, 1.0); #if 0 /* This is old code kept as a reference only. It is very wrong, and I don't understand exactly what it was attempting to do. */ /* Create Normal Gaussian 2D Filter Weighted Lookup Table. A normal EWA guassual lookup would use exp(Q*ALPHA) where Q = distance squared from 0.0 (center) to 1.0 (edge) and ALPHA = -4.0*ln(2.0) ==> -2.77258872223978123767 The table is of length 1024, and equates to support radius of 2.0 thus needs to be scaled by ALPHA*4/1024 and any blur factor squared The above came from some reference code provided by Fred Weinhaus and seems to have been a guess that was appropriate for its use in a 3d perspective landscape mapping program. */ r_scale = -2.77258872223978123767/(WLUT_WIDTH*blur*blur); for(Q=0; Q<WLUT_WIDTH; Q++) resample_filter->filter_lut[Q] = exp((double)Q*r_scale); resample_filter->support = WLUT_WIDTH; break; #endif #if FILTER_LUT #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp single #endif { register int Q; double r_scale; /* Scale radius so the filter LUT covers the full support range */ r_scale = resample_filter->support*sqrt(1.0/(double)WLUT_WIDTH); if (IsMagickTrue(GetImageArtifact(resample_filter->image,"resample:verbose")) ) { /* Debug output of the filter weighting LUT Gnuplot the LUT with hoizontal adjusted to 'r' using... plot [0:2][-.2:1] "lut.dat" using (sqrt($0/1024)*2):1 with lines The filter values is normalized for comparision */ printf("#\n"); printf("# Resampling Filter LUT (%d values)\n", WLUT_WIDTH); printf("#\n"); printf("# Note: values in table are using a squared radius lookup.\n"); printf("# And the whole table represents the filters support.\n"); printf("\n"); /* generates a 'break' in gnuplot if multiple outputs */ for(Q=0; Q<WLUT_WIDTH; Q++) printf("%8.*g %.*g\n", GetMagickPrecision(),sqrt((double)Q)*r_scale, GetMagickPrecision(),resample_filter->filter_lut[Q] ); } /* output the above once only for each image, and each setting */ (void) DeleteImageArtifact(resample_filter->image,"resample:verbose"); } #endif /* FILTER_LUT */ return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r I n t e r p o l a t e M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterInterpolateMethod() sets the resample filter interpolation % method. % % The format of the SetResampleFilterInterpolateMethod method is: % % MagickBooleanType SetResampleFilterInterpolateMethod( % ResampleFilter *resample_filter,const InterpolateMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the interpolation method. % */ MagickExport MagickBooleanType SetResampleFilterInterpolateMethod( ResampleFilter *resample_filter,const InterpolatePixelMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->interpolate=method; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t R e s a m p l e F i l t e r V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetResampleFilterVirtualPixelMethod() changes the virtual pixel method % associated with the specified resample filter. % % The format of the SetResampleFilterVirtualPixelMethod method is: % % MagickBooleanType SetResampleFilterVirtualPixelMethod( % ResampleFilter *resample_filter,const VirtualPixelMethod method) % % A description of each parameter follows: % % o resample_filter: the resample filter. % % o method: the virtual pixel method. % */ MagickExport MagickBooleanType SetResampleFilterVirtualPixelMethod( ResampleFilter *resample_filter,const VirtualPixelMethod method) { assert(resample_filter != (ResampleFilter *) NULL); assert(resample_filter->signature == MagickSignature); assert(resample_filter->image != (Image *) NULL); if (resample_filter->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", resample_filter->image->filename); resample_filter->virtual_pixel=method; if (method != UndefinedVirtualPixelMethod) (void) SetCacheViewVirtualPixelMethod(resample_filter->view,method); return(MagickTrue); }
DRB034-truedeplinear-var-yes.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: [email protected], [email protected], [email protected], [email protected], [email protected]) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* A linear expression is used as array subscription. Data race pair: a[2i+1]@66:5 vs. a[i]@66:14 */ #include <stdlib.h> int main(int argc, char * argv[]) { int i; int len = 2000; int a[len]; int _ret_val_0; if (argc>1) { len=atoi(argv[1]); } #pragma cetus private(i) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<len; i ++ ) { a[i]=i; } #pragma cetus private(i) #pragma loop name main#1 for (i=0; i<(len/2); i ++ ) { a[(2*i)+1]=(a[i]+1); } #pragma cetus private(i) #pragma loop name main#2 for (i=0; i<len; i ++ ) { printf("%d\n", a[i]); } _ret_val_0=0; return _ret_val_0; }
stream_int_omp.c
/*-----------------------------------------------------------------------*/ /* Program: Stream */ /* Revision: $Id: stream_omp.c,v 5.4 2009/02/19 13:57:12 mccalpin Exp mccalpin $ */ /* Original code developed by John D. McCalpin */ /* Programmers: John D. McCalpin */ /* Joe R. Zagar */ /* */ /* This program measures memory transfer rates in MB/s for simple */ /* computational kernels coded in C. */ /*-----------------------------------------------------------------------*/ /* Copyright 1991-2003: John D. McCalpin */ /*-----------------------------------------------------------------------*/ /* License: */ /* 1. You are free to use this program and/or to redistribute */ /* this program. */ /* 2. You are free to modify this program for your own use, */ /* including commercial use, subject to the publication */ /* restrictions in item 3. */ /* 3. You are free to publish results obtained from running this */ /* program, or from works that you derive from this program, */ /* with the following limitations: */ /* 3a. In order to be referred to as "STREAM benchmark results", */ /* published results must be in conformance to the STREAM */ /* Run Rules, (briefly reviewed below) published at */ /* http://www.cs.virginia.edu/stream/ref.html */ /* and incorporated herein by reference. */ /* As the copyright holder, John McCalpin retains the */ /* right to determine conformity with the Run Rules. */ /* 3b. Results based on modified source code or on runs not in */ /* accordance with the STREAM Run Rules must be clearly */ /* labelled whenever they are published. Examples of */ /* proper labelling include: */ /* "tuned STREAM benchmark results" */ /* "based on a variant of the STREAM benchmark code" */ /* Other comparable, clear and reasonable labelling is */ /* acceptable. */ /* 3c. Submission of results to the STREAM benchmark web site */ /* is encouraged, but not required. */ /* 4. Use of this program or creation of derived works based on this */ /* program constitutes acceptance of these licensing restrictions. */ /* 5. Absolutely no warranty is expressed or implied. */ /*-----------------------------------------------------------------------*/ # include <stdio.h> # include <math.h> # include <float.h> # include <limits.h> # include <sys/time.h> /* INSTRUCTIONS: * * 1) Stream requires a good bit of memory to run. Adjust the * value of 'N' (below) to give a 'timing calibration' of * at least 20 clock-ticks. This will provide rate estimates * that should be good to about 5% precision. */ # define NN 20000 # define NTIMES 10 # define OFFSET 0 /* * 3) Compile the code with full optimization. Many compilers * generate unreasonably bad code before the optimizer tightens * things up. If the results are unreasonably good, on the * other hand, the optimizer might be too smart for me! * * Try compiling with: * cc -O stream_omp.c -o stream_omp * * This is known to work on Cray, SGI, IBM, and Sun machines. * * * 4) Mail the results to [email protected] * Be sure to include: * a) computer hardware model number and software revision * b) the compiler flags * c) all of the output from the test case. * Thanks! * */ # define HLINE "-------------------------------------------------------------\n" # ifndef MYMIN # define MYMIN(x,y) ((x)<(y)?(x):(y)) # endif # ifndef MYMAX # define MYMAX(x,y) ((x)>(y)?(x):(y)) # endif static long long int a[NN+OFFSET], b[NN+OFFSET], c[NN+OFFSET]; static double avgtime[4] = {0}, maxtime[4] = {0}, mintime[4] = {FLT_MAX,FLT_MAX,FLT_MAX,FLT_MAX}; static char *label[4] = {"Copy: ", "Scale: ", "Add: ", "Triad: "}; static double bytes[4] = { 2 * sizeof(long long int) * NN, 2 * sizeof(long long int) * NN, 3 * sizeof(long long int) * NN, 3 * sizeof(long long int) * NN }; extern double mysecond(); extern void checkSTREAMresults(); #ifdef TUNED extern void tuned_STREAM_Copy(); extern void tuned_STREAM_Scale(long long int scalar); extern void tuned_STREAM_Add(); extern void tuned_STREAM_Triad(long long int scalar); #endif int app_main() { int quantum, checktick(); int BytesPerWord; register int j, k; long long int scalar; double t, times[4][NTIMES]; /* --- SETUP --- determine precision and check timing --- */ printf(HLINE); BytesPerWord = sizeof(long long int); printf("This system uses %d bytes per LONG LONG INT PRECISION word.\n", BytesPerWord); printf(HLINE); printf("Array size = %d, Offset = %d\n" , NN, OFFSET); printf("Total memory required = %.1f MB.\n", (3.0 * BytesPerWord) * ( (double) NN / 1048576.0)); printf("Each test is run %d times, but only\n", NTIMES); printf("the *best* time for each is used.\n"); #ifdef _OPENMP printf(HLINE); #pragma omp parallel private(k) { k = omp_get_num_threads(); printf ("Number of Threads requested = %i\n",k); } #endif /* Get initial value for system clock. */ #pragma omp parallel for for (j=0; j<NN; j++) { a[j] = 1; b[j] = 2; c[j] = 0; } printf(HLINE); if ( (quantum = checktick()) >= 1) printf("Your clock granularity/precision appears to be " "%d microseconds.\n", quantum); else printf("Your clock granularity appears to be " "less than one microsecond.\n"); t = mysecond(); #pragma omp parallel for for (j = 0; j < NN; j++) a[j] = 2 * a[j]; t = 1.0E6 * (mysecond() - t); printf("Each test below will take on the order" " of %d microseconds.\n", (int) t ); printf(" (= %d clock ticks)\n", (int) (t/quantum) ); printf("Increase the size of the arrays if this shows that\n"); printf("you are not getting at least 20 clock ticks per test.\n"); printf(HLINE); printf("WARNING -- The above is only a rough guideline.\n"); printf("For best results, please be sure you know the\n"); printf("precision of your system timer.\n"); printf(HLINE); /* --- MAIN LOOP --- repeat test cases NTIMES times --- */ scalar = 3; int unitCount = 8; #define THREADSPERUNIT 32 int itersPerUnit = NN/unitCount; for (k=0; k<NTIMES; k++) { times[0][k] = mysecond(); #pragma omp target teams distribute parallel for dist_schedule(static,itersPerUnit) schedule(static,1) num_teams(unitCount) num_threads(THREADSPERUNIT) for (j=0; j<NN; j++) c[j] = a[j]; times[0][k] = mysecond() - times[0][k]; times[1][k] = mysecond(); printf("finished loop %d iter %d\n", 1, k); #pragma omp target teams distribute parallel for dist_schedule(static,itersPerUnit) schedule(static,1) num_teams(unitCount) num_threads(THREADSPERUNIT) for (j=0; j<NN; j++) b[j] = scalar*c[j]; printf("finished loop %d iter %d\n", 2, k); times[1][k] = mysecond() - times[1][k]; times[2][k] = mysecond(); #pragma omp target teams distribute parallel for dist_schedule(static,itersPerUnit) schedule(static,1) num_teams(unitCount) num_threads(THREADSPERUNIT) for (j=0; j<NN; j++) c[j] = a[j]+b[j]; printf("finished loop %d iter %d\n", 3, k); times[2][k] = mysecond() - times[2][k]; times[3][k] = mysecond(); #pragma omp target teams distribute parallel for dist_schedule(static,itersPerUnit) schedule(static,1) num_teams(unitCount) num_threads(THREADSPERUNIT) for (j=0; j<NN; j++) a[j] = b[j]+scalar*c[j]; printf("finished loop %d iter %d\n", 4, k); times[3][k] = mysecond() - times[3][k]; } /* --- SUMMARY --- */ for (k=1; k<NTIMES; k++) /* note -- skip first iteration */ { for (j=0; j<4; j++) { avgtime[j] = avgtime[j] + times[j][k]; mintime[j] = MYMIN(mintime[j], times[j][k]); maxtime[j] = MYMAX(maxtime[j], times[j][k]); } } printf("Function Rate (MB/s) Avg time Min time Max time\n"); for (j=0; j<4; j++) { avgtime[j] = avgtime[j]/(double)(NTIMES-1); printf("%s%11.4f %11.4f %11.4f %11.4f\n", label[j], 1.0E-06 * bytes[j]/mintime[j], avgtime[j], mintime[j], maxtime[j]); } printf(HLINE); /* --- Check Results --- */ checkSTREAMresults(); printf(HLINE); return 0; } # define M 20 int checktick() { int i, minDelta, Delta; double t1, t2, timesfound[M]; /* Collect a sequence of M unique time values from the system. */ for (i = 0; i < M; i++) { t1 = mysecond(); while( ((t2=mysecond()) - t1) < 1.0E-6 ) ; timesfound[i] = t1 = t2; } /* * Determine the minimum difference between these M values. * This result will be our estimate (in microseconds) for the * clock granularity. */ minDelta = 1000000; for (i = 1; i < M; i++) { Delta = (int)( 1.0E6 * (timesfound[i]-timesfound[i-1])); minDelta = MYMIN(minDelta, MYMAX(Delta,0)); } return(minDelta); } /* A gettimeofday routine to give access to the wall clock timer on most UNIX-like systems. */ #include <sys/time.h> double mysecond() { struct timeval tp; struct timezone tzp; int i; i = gettimeofday(&tp,&tzp); return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 ); } void checkSTREAMresults () { long long int aj,bj,cj,scalar; long long int asum,bsum,csum; long long int epsilon; int j,k; /* reproduce initialization */ aj = 1; bj = 2; cj = 0; /* a[] is modified during timing check */ aj = 2 * aj; /* now execute timing loop */ scalar = 3; for (k=0; k<NTIMES; k++) { cj = aj; bj = scalar*cj; cj = aj+bj; aj = bj+scalar*cj; } aj = aj * (long long int) (NN); bj = bj * (long long int) (NN); cj = cj * (long long int) (NN); asum = 0; bsum = 0; csum = 0; for (j=0; j<NN; j++) { asum += a[j]; bsum += b[j]; csum += c[j]; } #ifdef VERBOSE printf ("Results Comparison: \n"); printf (" Expected : %lld %lld %lld \n",aj,bj,cj); printf (" Observed : %lld %lld %lld \n",asum,bsum,csum); #endif #define abs(a) ((a) >= 0 ? (a) : -(a)) epsilon = 0; if (abs(aj-asum)/asum > epsilon) { printf ("Failed Validation on array a[]\n"); printf (" Expected : %lld \n",aj); printf (" Observed : %lld \n",asum); } else if (abs(bj-bsum)/bsum > epsilon) { printf ("Failed Validation on array b[]\n"); printf (" Expected : %lld \n",bj); printf (" Observed : %lld \n",bsum); } else if (abs(cj-csum)/csum > epsilon) { printf ("Failed Validation on array c[]\n"); printf (" Expected : %lld \n",cj); printf (" Observed : %lld \n",csum); } else { printf ("Solution Validates\n"); } } #if 0 void tuned_STREAM_Copy() { int j; #pragma omp parallel for schedule(static, 1) for (j=0; j<NN; j++) c[j] = a[j]; } void tuned_STREAM_Scale(long long int scalar) { int j; #pragma omp parallel for schedule(static, 1) for (j=0; j<NN; j++) b[j] = scalar*c[j]; } void tuned_STREAM_Add() { int j; #pragma omp parallel for schedule(static, 1) for (j=0; j<NN; j++) c[j] = a[j]+b[j]; } void tuned_STREAM_Triad(long long int scalar) { int j; #pragma omp parallel for schedule(static, 1) for (j=0; j<NN; j++) a[j] = b[j]+scalar*c[j]; } #endif
stresslet_direct.c
#include "mex.h" #include "math.h" #define X prhs[0] // Source locations #define F prhs[1] // Source strengths #define U plhs[0] // Output #ifndef VERBOSE #define VERBOSE 0 #endif #define PI 3.141592653589793 inline double dot(double * a,double * b) { return ( a[0]*b[0] + a[1]*b[1] + a[2]*b[2] ); } /* no input checking is done */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[] ) { // input dims const int N = mxGetM(X); const double* restrict x = mxGetPr(X); const double* restrict f = mxGetPr(F); U = mxCreateDoubleMatrix(N, 3, mxREAL); double* restrict u = mxGetPr(U); if(VERBOSE) mexPrintf("[FS Stresslet Direct ] MEX N=%d ",N); // call kernel #ifdef _OPENMP #pragma omp parallel for #endif for(int m=0; m<N; m++) { double p[] = {0,0,0}; for(int n = 0; n<N; n++){ double q[] = {f[n], f[n+N], f[n+2*N]}; double s[] = {f[n+3*N], f[n+4*N], f[n+5*N]}; double r[] = {x[m]-x[n], x[m+N]-x[n+N],x[m+2*N]-x[n+2*N]}; double ri = 1.0/sqrt(dot(r,r)); double ri5 = ri*ri*ri*ri*ri; double rdots = dot(r,s); double rdotq = dot(r,q); double c = -6.0* rdots * rdotq * ri5; if(m==n) continue; p[0] += c*r[0]; p[1] += c*r[1]; p[2] += c*r[2]; } u[m ] = p[0]; u[m+ N] = p[1]; u[m+2*N] = p[2]; } }
CutPursuit_SPG.h
#pragma once #include "CutPursuit.h" #include "Common.h" namespace CP { template <typename T> class CutPursuit_SPG : public CutPursuit<T> { public: ~CutPursuit_SPG(){ }; //============================================================================================= //============================= COMPUTE ENERGY =========================================== //============================================================================================= virtual std::pair<T,T> compute_energy() override { VertexAttributeMap<T> vertex_attribute_map = boost::get(boost::vertex_bundle, this->main_graph); EdgeAttributeMap<T> edge_attribute_map = boost::get(boost::edge_bundle, this->main_graph); //the first element pair_energy of is the fidelity and the second the penalty std::pair<T,T> pair_energy; T energy = 0; //#pragma omp parallel for private(i_dim) if (this->parameter.parallel) schedule(static) reduction(+:energy,i) for (uint32_t ind_ver = 0; ind_ver < this->nVertex; ind_ver++) { VertexDescriptor<T> i_ver = boost::vertex(ind_ver, this->main_graph); for(uint32_t i_dim=0; i_dim<this->dim; i_dim++) { energy += .5*vertex_attribute_map(i_ver).weight * pow(vertex_attribute_map(i_ver).observation[i_dim] - vertex_attribute_map(i_ver).value[i_dim],2); } } pair_energy.first = energy; energy = 0; EdgeIterator<T> i_edg, i_edg_end = boost::edges(this->main_graph).second; for (i_edg = boost::edges(this->main_graph).first; i_edg != i_edg_end; ++i_edg) { if (!edge_attribute_map(*i_edg).realEdge) { continue; } energy += .5 * edge_attribute_map(*i_edg).isActive * this->parameter.reg_strenth * edge_attribute_map(*i_edg).weight; } pair_energy.second = energy; return pair_energy; } //============================================================================================= //============================= SPLIT =========================================== //============================================================================================= virtual uint32_t split() override { // split the graph by trying to find the best binary partition // each components is split into B and notB // for each components we associate the value h_1 and h_2 to vertices in B or notB // the affectation as well as h_1 and h_2 are computed alternatively //tic(); //--------loading structures--------------------------------------------------------------- uint32_t nb_comp = this->components.size(); VertexAttributeMap<T> vertex_attribute_map = boost::get(boost::vertex_bundle, this->main_graph); VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph); uint32_t saturation; //stores wether each vertex is B or not std::vector<bool> binary_label(this->nVertex); //initialize the binary partition with kmeans this->init_labels(binary_label, true); //centers is the value of each binary component in the optimal partition VectorOfCentroids<T> centers(nb_comp, this->dim); //-----main loop---------------------------------------------------------------- // the optimal flow is iteratively approximated T unary_weight = pow(this->parameter.weight_decay, -float(this->parameter.flow_steps)); for (uint32_t i_step = 0; i_step < this->parameter.flow_steps; i_step++) { unary_weight = unary_weight * this->parameter.weight_decay; //the regularization strength at this step //compute h_1 and h_2 centers = VectorOfCentroids<T>(nb_comp, this->dim); this->compute_centers(centers, nb_comp,binary_label); this->set_capacities(centers, unary_weight); // update the capacities of the flow graph boost::boykov_kolmogorov_max_flow( this->main_graph, get(&EdgeAttribute<T>::capacity , this->main_graph), get(&EdgeAttribute<T>::residualCapacity, this->main_graph), get(&EdgeAttribute<T>::edge_reverse , this->main_graph), get(&VertexAttribute<T>::color , this->main_graph), get(boost::vertex_index , this->main_graph), this->source, this->sink); for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++) { if (this->saturated_components[ind_com]) { continue; } for (uint32_t i_ver = 0; i_ver < this->components[ind_com].size(); i_ver++) { binary_label[vertex_index_map(this->components[ind_com][i_ver])] = (vertex_attribute_map(this->components[ind_com][i_ver]).color == vertex_attribute_map(this->sink).color); } } } saturation = this->activate_edges(false); return saturation; } //============================================================================================= //============================= INIT_L2 ====== =========================================== //============================================================================================= inline void init_labels(std::vector<bool> & binary_label, bool spatial_part) { //-----initialize the labelling for each components with kmeans------------------------------ VertexAttributeMap<T> vertex_attribute_map = boost::get(boost::vertex_bundle, this->main_graph); VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph); uint32_t nb_comp = this->components.size(); // ind_com; //#pragma omp parallel for private(ind_com) //if (nb_comp>=8) schedule(dynamic) int dim_spat; if (spatial_part) { dim_spat = this->dim-0; } else { dim_spat = this->dim; } #pragma omp parallel for if (nb_comp >= omp_get_num_threads()) schedule(dynamic) for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++) { std::vector< std::vector<T> > kernels(2, std::vector<T>(this->dim)); T total_weight[2]; T best_energy; T current_energy; uint32_t comp_size = this->components[ind_com].size(); std::vector<bool> potential_label(comp_size); std::vector<T> energy_array(comp_size); if (this->saturated_components[ind_com] || comp_size <= 1) { continue; } for (uint32_t init_kmeans = 0; init_kmeans < this->parameter.kmeans_resampling; init_kmeans++) {//proceed to several initilialisation of kmeans and pick up the best one //----- initialization with KM++ ------------------ uint32_t first_kernel = std::rand() % comp_size, second_kernel = 0; // first kernel attributed for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { kernels[0][i_dim] = vertex_attribute_map(this->components[ind_com][first_kernel ]).observation[i_dim]; } best_energy = 0; //now compute the square distance of each vertex to this kernel #pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(best_energy) schedule(static) for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++) { energy_array[i_ver] = 0; for(uint32_t i_dim=0; i_dim < dim_spat; i_dim++) { energy_array[i_ver] += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]- kernels[0][i_dim],2) * vertex_attribute_map(this->components[ind_com][i_ver]).weight; } best_energy += energy_array[i_ver]; } // we now generate a random number to determinate which node will be the second kernel T random_sample = ((T)(rand())) / ((T)(RAND_MAX)); current_energy = best_energy * random_sample; for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++) { current_energy -= energy_array[i_ver]; if (current_energy < 0) { //we have selected the second kernel second_kernel = i_ver; break; } } for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { // now fill the second kernel kernels[1][i_dim] = vertex_attribute_map(this->components[ind_com][second_kernel]).observation[i_dim]; } //----main kmeans loop----- for (uint32_t ite_kmeans = 0; ite_kmeans < this->parameter.kmeans_ite; ite_kmeans++) { //--affectation step: associate each node with its closest kernel------------------- #pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(potential_label) schedule(static) for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++) { std::vector<T> distance_kernels(2); for(uint32_t i_dim=0; i_dim < dim_spat; i_dim++) { distance_kernels[0] += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim]- kernels[0][i_dim],2); distance_kernels[1] += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim] - kernels[1][i_dim],2); } potential_label[i_ver] = distance_kernels[0] > distance_kernels[1]; } //-----computation of the new kernels---------------------------- total_weight[0] = 0.; total_weight[1] = 0.; for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { kernels[0][i_dim] = 0; kernels[1][i_dim] = 0; } #pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(potential_label) schedule(static) for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++) { if (vertex_attribute_map(this->components[ind_com][i_ver]).weight==0) { continue; } if (potential_label[i_ver]) { total_weight[0] += vertex_attribute_map(this->components[ind_com][i_ver]).weight; for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { kernels[0][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim] * vertex_attribute_map(this->components[ind_com][i_ver]).weight ; } } else { total_weight[1] += vertex_attribute_map(this->components[ind_com][i_ver]).weight; for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { kernels[1][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim] * vertex_attribute_map(this->components[ind_com][i_ver]).weight; } } } if ((total_weight[0] == 0)||(total_weight[1] == 0)) { break; } for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { kernels[0][i_dim] = kernels[0][i_dim] / total_weight[0]; kernels[1][i_dim] = kernels[1][i_dim] / total_weight[1]; } } //----compute the associated energy ------ current_energy = 0; #pragma omp parallel for if (nb_comp < omp_get_num_threads()) shared(potential_label) schedule(static) for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++) { for(uint32_t i_dim=0; i_dim < dim_spat; i_dim++) { if (potential_label[i_ver]) { current_energy += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim] - kernels[0][i_dim],2) * vertex_attribute_map(this->components[ind_com][i_ver]).weight; } else { current_energy += pow(vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim] - kernels[1][i_dim],2) * vertex_attribute_map(this->components[ind_com][i_ver]).weight; } } } if (current_energy < best_energy) { best_energy = current_energy; for (uint32_t i_ver = 0; i_ver < comp_size; i_ver++) { binary_label[vertex_index_map(this->components[ind_com][i_ver])] = potential_label[i_ver]; } } } } } //============================================================================================= //============================= COMPUTE_CENTERS_L2 ========================================== //============================================================================================= inline void compute_centers(VectorOfCentroids<T> & centers, const uint32_t & nb_comp , const std::vector<bool> & binary_label) { //compute for each component the values of h_1 and h_2 #pragma omp parallel for if (nb_comp >= omp_get_num_threads()) schedule(dynamic) for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++) { if (this->saturated_components[ind_com]) { continue; } compute_center(centers.centroids[ind_com], ind_com, binary_label); } return; } //============================================================================================= //============================= COMPUTE_CENTER_L2 ========================================== //============================================================================================= inline void compute_center( std::vector< std::vector<T> > & center, const uint32_t & ind_com , const std::vector<bool> & binary_label) { //compute for each component the values of the centroids corresponding to the optimal binary partition VertexAttributeMap<T> vertex_attribute_map = boost::get(boost::vertex_bundle, this->main_graph); VertexIndexMap<T> vertex_index_map = boost::get(boost::vertex_index, this->main_graph); T total_weight[2]; total_weight[0] = 0.; total_weight[1] = 0.; //#pragma omp parallel for if (this->parameter.parallel) for (uint32_t i_ver = 0; i_ver < this->components[ind_com].size(); i_ver++) { if (vertex_attribute_map(this->components[ind_com][i_ver]).weight==0) { continue; } if (binary_label[vertex_index_map(this->components[ind_com][i_ver])]) { total_weight[0] += vertex_attribute_map(this->components[ind_com][i_ver]).weight; for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { center[0][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim] * vertex_attribute_map(this->components[ind_com][i_ver]).weight ; } } else { total_weight[1] += vertex_attribute_map(this->components[ind_com][i_ver]).weight; for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { center[1][i_dim] += vertex_attribute_map(this->components[ind_com][i_ver]).observation[i_dim] * vertex_attribute_map(this->components[ind_com][i_ver]).weight; } } } if ((total_weight[0] == 0)||(total_weight[1] == 0)) { //the component is saturated //this->saturateComponent(ind_com, false); for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { center[0][i_dim] = vertex_attribute_map(this->components[ind_com][0]).value[i_dim]; center[1][i_dim] = vertex_attribute_map(this->components[ind_com][0]).value[i_dim]; } } else { for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { center[0][i_dim] = center[0][i_dim] / total_weight[0]; center[1][i_dim] = center[1][i_dim] / total_weight[1]; } } return; } //============================================================================================= //============================= SET_CAPACITIES ========================================== //============================================================================================= inline void set_capacities(const VectorOfCentroids<T> & centers, T unary_weight) { VertexAttributeMap<T> vertex_attribute_map = boost::get(boost::vertex_bundle, this->main_graph); EdgeAttributeMap<T> edge_attribute_map = boost::get(boost::edge_bundle, this->main_graph); //----first compute the capacity in sink/node edges------------------------------------ //#pragma omp parallel for if (this->parameter.parallel) schedule(dynamic) uint32_t nb_comp = this->components.size(); #pragma omp parallel for if (nb_comp >= omp_get_num_threads()) schedule(dynamic) for (uint32_t ind_com = 0; ind_com < nb_comp; ind_com++) { VertexDescriptor<T> desc_v; EdgeDescriptor desc_source2v, desc_v2sink, desc_v2source; T cost_B, cost_notB; //the cost of being in B or not B, local for each component if (this->saturated_components[ind_com]) { continue; } for (uint32_t i_ver = 0; i_ver < this->components[ind_com].size(); i_ver++) { desc_v = this->components[ind_com][i_ver]; // because of the adjacency structure NEVER access edge (source,v) directly! desc_v2source = boost::edge(desc_v, this->source,this->main_graph).first; desc_source2v = edge_attribute_map(desc_v2source).edge_reverse; //use edge_reverse instead desc_v2sink = boost::edge(desc_v, this->sink,this->main_graph).first; cost_B = 0; cost_notB = 0; if (vertex_attribute_map(desc_v).weight==0) { //no observation - no cut edge_attribute_map(desc_source2v).capacity = 0; edge_attribute_map(desc_v2sink).capacity = 0; continue; } for(uint32_t i_dim=0; i_dim < this->dim; i_dim++) { cost_B += 0.5*vertex_attribute_map(desc_v).weight * (pow(centers.centroids[ind_com][0][i_dim],2) - 2 * (centers.centroids[ind_com][0][i_dim] * vertex_attribute_map(desc_v).observation[i_dim])); cost_notB += 0.5*vertex_attribute_map(desc_v).weight * (pow(centers.centroids[ind_com][1][i_dim],2) - 2 * (centers.centroids[ind_com][1][i_dim] * vertex_attribute_map(desc_v).observation[i_dim])); } if (cost_B>cost_notB) { edge_attribute_map(desc_source2v).capacity = (cost_B - cost_notB) ; edge_attribute_map(desc_v2sink).capacity = 0.; } else { edge_attribute_map(desc_source2v).capacity = 0.; edge_attribute_map(desc_v2sink).capacity = (cost_notB - cost_B) ; } } } //----then set the vertex to vertex edges --------------------------------------------- EdgeIterator<T> i_edg, i_edg_end; for (boost::tie(i_edg, i_edg_end) = boost::edges(this->main_graph); i_edg != i_edg_end; ++i_edg) { if (!edge_attribute_map(*i_edg).realEdge) { continue; } if (!edge_attribute_map(*i_edg).isActive) { edge_attribute_map(*i_edg).capacity = edge_attribute_map(*i_edg).weight * this->parameter.reg_strenth / unary_weight; } else { edge_attribute_map(*i_edg).capacity = 0; } } } //============================================================================================= //================================= COMPUTE_VALUE ========================================= //============================================================================================= virtual std::pair<std::vector<T>, T> compute_value(const uint32_t & ind_com) override { VertexAttributeMap<T> vertex_attribute_map = boost::get(boost::vertex_bundle, this->main_graph); T total_weight = 0; std::vector<T> compValue(this->dim); std::fill((compValue.begin()),(compValue.end()),0); #pragma omp parallel for if (this->parameter.parallel) schedule(static) for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ++ind_ver) { total_weight += vertex_attribute_map(this->components[ind_com][ind_ver]).weight; for(uint32_t i_dim=0; i_dim<this->dim; i_dim++) { compValue[i_dim] += vertex_attribute_map(this->components[ind_com][ind_ver]).observation[i_dim] * vertex_attribute_map(this->components[ind_com][ind_ver]).weight; } vertex_attribute_map(this->components[ind_com][ind_ver]).in_component = ind_com; } for(uint32_t i_dim=0; i_dim<this->dim; i_dim++) { compValue[i_dim] = compValue[i_dim] / total_weight; } for (uint32_t ind_ver = 0; ind_ver < this->components[ind_com].size(); ++ind_ver) { for(uint32_t i_dim=0; i_dim<this->dim; i_dim++) { vertex_attribute_map(this->components[ind_com][ind_ver]).value[i_dim] = compValue[i_dim]; } } return std::pair<std::vector<T>, T>(compValue, total_weight); } //============================================================================================= //================================= COMPUTE_MERGE_GAIN ========================================= //============================================================================================= virtual std::pair<std::vector<T>, T> compute_merge_gain(const VertexDescriptor<T> & comp1 , const VertexDescriptor<T> & comp2) override { VertexAttributeMap<T> reduced_vertex_attribute_map = boost::get(boost::vertex_bundle, this->reduced_graph); std::vector<T> merge_value(this->dim); T gain = 0; // compute the value obtained by mergeing the two connected components for(uint32_t i_dim=0; i_dim<this->dim; i_dim++) { merge_value[i_dim] = (reduced_vertex_attribute_map(comp1).weight * reduced_vertex_attribute_map(comp1).value[i_dim] +reduced_vertex_attribute_map(comp2).weight * reduced_vertex_attribute_map(comp2).value[i_dim]) /(reduced_vertex_attribute_map(comp1).weight +reduced_vertex_attribute_map(comp2).weight); gain += 0.5 * (pow(merge_value[i_dim],2) * (reduced_vertex_attribute_map(comp1).weight +reduced_vertex_attribute_map(comp2).weight) - pow(reduced_vertex_attribute_map(comp1).value[i_dim],2) * reduced_vertex_attribute_map(comp1).weight - pow(reduced_vertex_attribute_map(comp2).value[i_dim],2) * reduced_vertex_attribute_map(comp2).weight); } return std::pair<std::vector<T>, T>(merge_value, gain); } }; }
GB_unop__identity_uint16_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint16_fp64) // op(A') function: GB (_unop_tran__identity_uint16_fp64) // C type: uint16_t // A type: double // cast: uint16_t cij = GB_cast_to_uint16_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT16 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint16_fp64) ( uint16_t *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; Cx [p] = z ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; uint16_t z = GB_cast_to_uint16_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint16_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__abs_int16_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int16_uint32 // op(A') function: GB_tran__abs_int16_uint32 // C type: int16_t // A type: uint32_t // cast: int16_t cij = (int16_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint32_t #define GB_CTYPE \ int16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, x) \ int16_t z = (int16_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT16 || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int16_uint32 ( int16_t *restrict Cx, const uint32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int16_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pi.c
// Copyright © 2016 Martin Ueding <[email protected]> // Licensed under the MIT license. #include <omp.h> #include <stdint.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char **argv) { uint_fast64_t trials = 1e9; uint_fast64_t accepted = 0; #pragma omp parallel reduction(+ : accepted) { unsigned int seed = omp_get_thread_num(); #pragma omp for for (uint_fast64_t i = 0; i < trials; i++) { double x = (double)rand_r(&seed) / RAND_MAX; double y = (double)rand_r(&seed) / RAND_MAX; double radius_squared = x * x + y * y; if (radius_squared < 1.0) { ++accepted; } } } printf("π = %.20g\n", 4.0 * accepted / trials); return 0; }
Parser.h
//===--- Parser.h - C Language Parser ---------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Parser interface. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_PARSE_PARSER_H #define LLVM_CLANG_PARSE_PARSER_H #include "clang/AST/OpenMPClause.h" #include "clang/AST/Availability.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/OperatorPrecedence.h" #include "clang/Basic/Specifiers.h" #include "clang/Lex/CodeCompletionHandler.h" #include "clang/Lex/Preprocessor.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/Sema.h" #include "llvm/ADT/SmallVector.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/PrettyStackTrace.h" #include "llvm/Support/SaveAndRestore.h" #include <memory> #include <stack> namespace clang { class PragmaHandler; class Scope; class BalancedDelimiterTracker; class CorrectionCandidateCallback; class DeclGroupRef; class DiagnosticBuilder; struct LoopHint; class Parser; class ParsingDeclRAIIObject; class ParsingDeclSpec; class ParsingDeclarator; class ParsingFieldDeclarator; class ColonProtectionRAIIObject; class InMessageExpressionRAIIObject; class PoisonSEHIdentifiersRAIIObject; class OMPClause; class ObjCTypeParamList; class ObjCTypeParameter; /// Parser - This implements a parser for the C family of languages. After /// parsing units of the grammar, productions are invoked to handle whatever has /// been read. /// class Parser : public CodeCompletionHandler { friend class ColonProtectionRAIIObject; friend class ParsingOpenMPDirectiveRAII; friend class InMessageExpressionRAIIObject; friend class PoisonSEHIdentifiersRAIIObject; friend class ObjCDeclContextSwitch; friend class ParenBraceBracketBalancer; friend class BalancedDelimiterTracker; Preprocessor &PP; /// Tok - The current token we are peeking ahead. All parsing methods assume /// that this is valid. Token Tok; // PrevTokLocation - The location of the token we previously // consumed. This token is used for diagnostics where we expected to // see a token following another token (e.g., the ';' at the end of // a statement). SourceLocation PrevTokLocation; /// Tracks an expected type for the current token when parsing an expression. /// Used by code completion for ranking. PreferredTypeBuilder PreferredType; unsigned short ParenCount = 0, BracketCount = 0, BraceCount = 0; unsigned short MisplacedModuleBeginCount = 0; /// Actions - These are the callbacks we invoke as we parse various constructs /// in the file. Sema &Actions; DiagnosticsEngine &Diags; /// ScopeCache - Cache scopes to reduce malloc traffic. enum { ScopeCacheSize = 16 }; unsigned NumCachedScopes; Scope *ScopeCache[ScopeCacheSize]; /// Identifiers used for SEH handling in Borland. These are only /// allowed in particular circumstances // __except block IdentifierInfo *Ident__exception_code, *Ident___exception_code, *Ident_GetExceptionCode; // __except filter expression IdentifierInfo *Ident__exception_info, *Ident___exception_info, *Ident_GetExceptionInfo; // __finally IdentifierInfo *Ident__abnormal_termination, *Ident___abnormal_termination, *Ident_AbnormalTermination; /// Contextual keywords for Microsoft extensions. IdentifierInfo *Ident__except; mutable IdentifierInfo *Ident_sealed; /// Ident_super - IdentifierInfo for "super", to support fast /// comparison. IdentifierInfo *Ident_super; /// Ident_vector, Ident_bool - cached IdentifierInfos for "vector" and /// "bool" fast comparison. Only present if AltiVec or ZVector are enabled. IdentifierInfo *Ident_vector; IdentifierInfo *Ident_bool; /// Ident_pixel - cached IdentifierInfos for "pixel" fast comparison. /// Only present if AltiVec enabled. IdentifierInfo *Ident_pixel; /// Objective-C contextual keywords. IdentifierInfo *Ident_instancetype; /// Identifier for "introduced". IdentifierInfo *Ident_introduced; /// Identifier for "deprecated". IdentifierInfo *Ident_deprecated; /// Identifier for "obsoleted". IdentifierInfo *Ident_obsoleted; /// Identifier for "unavailable". IdentifierInfo *Ident_unavailable; /// Identifier for "message". IdentifierInfo *Ident_message; /// Identifier for "strict". IdentifierInfo *Ident_strict; /// Identifier for "replacement". IdentifierInfo *Ident_replacement; /// Identifiers used by the 'external_source_symbol' attribute. IdentifierInfo *Ident_language, *Ident_defined_in, *Ident_generated_declaration; /// C++11 contextual keywords. mutable IdentifierInfo *Ident_final; mutable IdentifierInfo *Ident_GNU_final; mutable IdentifierInfo *Ident_override; // C++2a contextual keywords. mutable IdentifierInfo *Ident_import; mutable IdentifierInfo *Ident_module; // C++ type trait keywords that can be reverted to identifiers and still be // used as type traits. llvm::SmallDenseMap<IdentifierInfo *, tok::TokenKind> RevertibleTypeTraits; std::unique_ptr<PragmaHandler> AlignHandler; std::unique_ptr<PragmaHandler> GCCVisibilityHandler; std::unique_ptr<PragmaHandler> OptionsHandler; std::unique_ptr<PragmaHandler> PackHandler; std::unique_ptr<PragmaHandler> MSStructHandler; std::unique_ptr<PragmaHandler> UnusedHandler; std::unique_ptr<PragmaHandler> WeakHandler; std::unique_ptr<PragmaHandler> RedefineExtnameHandler; std::unique_ptr<PragmaHandler> FPContractHandler; std::unique_ptr<PragmaHandler> OpenCLExtensionHandler; std::unique_ptr<PragmaHandler> OpenMPHandler; std::unique_ptr<PragmaHandler> PCSectionHandler; std::unique_ptr<PragmaHandler> MSCommentHandler; std::unique_ptr<PragmaHandler> MSDetectMismatchHandler; std::unique_ptr<PragmaHandler> MSPointersToMembers; std::unique_ptr<PragmaHandler> MSVtorDisp; std::unique_ptr<PragmaHandler> MSInitSeg; std::unique_ptr<PragmaHandler> MSDataSeg; std::unique_ptr<PragmaHandler> MSBSSSeg; std::unique_ptr<PragmaHandler> MSConstSeg; std::unique_ptr<PragmaHandler> MSCodeSeg; std::unique_ptr<PragmaHandler> MSSection; std::unique_ptr<PragmaHandler> MSRuntimeChecks; std::unique_ptr<PragmaHandler> MSIntrinsic; std::unique_ptr<PragmaHandler> MSOptimize; std::unique_ptr<PragmaHandler> CUDAForceHostDeviceHandler; std::unique_ptr<PragmaHandler> OptimizeHandler; std::unique_ptr<PragmaHandler> LoopHintHandler; std::unique_ptr<PragmaHandler> UnrollHintHandler; std::unique_ptr<PragmaHandler> NoUnrollHintHandler; std::unique_ptr<PragmaHandler> UnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> NoUnrollAndJamHintHandler; std::unique_ptr<PragmaHandler> FPHandler; std::unique_ptr<PragmaHandler> STDCFENVHandler; std::unique_ptr<PragmaHandler> STDCCXLIMITHandler; std::unique_ptr<PragmaHandler> STDCUnknownHandler; std::unique_ptr<PragmaHandler> AttributePragmaHandler; std::unique_ptr<CommentHandler> CommentSemaHandler; /// Whether the '>' token acts as an operator or not. This will be /// true except when we are parsing an expression within a C++ /// template argument list, where the '>' closes the template /// argument list. bool GreaterThanIsOperator; /// ColonIsSacred - When this is false, we aggressively try to recover from /// code like "foo : bar" as if it were a typo for "foo :: bar". This is not /// safe in case statements and a few other things. This is managed by the /// ColonProtectionRAIIObject RAII object. bool ColonIsSacred; /// Parsing OpenMP directive mode. bool OpenMPDirectiveParsing = false; /// When true, we are directly inside an Objective-C message /// send expression. /// /// This is managed by the \c InMessageExpressionRAIIObject class, and /// should not be set directly. bool InMessageExpression; /// Gets set to true after calling ProduceSignatureHelp, it is for a /// workaround to make sure ProduceSignatureHelp is only called at the deepest /// function call. bool CalledSignatureHelp = false; /// The "depth" of the template parameters currently being parsed. unsigned TemplateParameterDepth; /// RAII class that manages the template parameter depth. class TemplateParameterDepthRAII { unsigned &Depth; unsigned AddedLevels; public: explicit TemplateParameterDepthRAII(unsigned &Depth) : Depth(Depth), AddedLevels(0) {} ~TemplateParameterDepthRAII() { Depth -= AddedLevels; } void operator++() { ++Depth; ++AddedLevels; } void addDepth(unsigned D) { Depth += D; AddedLevels += D; } void setAddedDepth(unsigned D) { Depth = Depth - AddedLevels + D; AddedLevels = D; } unsigned getDepth() const { return Depth; } unsigned getOriginalDepth() const { return Depth - AddedLevels; } }; /// Factory object for creating ParsedAttr objects. AttributeFactory AttrFactory; /// Gathers and cleans up TemplateIdAnnotations when parsing of a /// top-level declaration is finished. SmallVector<TemplateIdAnnotation *, 16> TemplateIds; /// Identifiers which have been declared within a tentative parse. SmallVector<IdentifierInfo *, 8> TentativelyDeclaredIdentifiers; /// Tracker for '<' tokens that might have been intended to be treated as an /// angle bracket instead of a less-than comparison. /// /// This happens when the user intends to form a template-id, but typoes the /// template-name or forgets a 'template' keyword for a dependent template /// name. /// /// We track these locations from the point where we see a '<' with a /// name-like expression on its left until we see a '>' or '>>' that might /// match it. struct AngleBracketTracker { /// Flags used to rank candidate template names when there is more than one /// '<' in a scope. enum Priority : unsigned short { /// A non-dependent name that is a potential typo for a template name. PotentialTypo = 0x0, /// A dependent name that might instantiate to a template-name. DependentName = 0x2, /// A space appears before the '<' token. SpaceBeforeLess = 0x0, /// No space before the '<' token NoSpaceBeforeLess = 0x1, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue*/ DependentName) }; struct Loc { Expr *TemplateName; SourceLocation LessLoc; AngleBracketTracker::Priority Priority; unsigned short ParenCount, BracketCount, BraceCount; bool isActive(Parser &P) const { return P.ParenCount == ParenCount && P.BracketCount == BracketCount && P.BraceCount == BraceCount; } bool isActiveOrNested(Parser &P) const { return isActive(P) || P.ParenCount > ParenCount || P.BracketCount > BracketCount || P.BraceCount > BraceCount; } }; SmallVector<Loc, 8> Locs; /// Add an expression that might have been intended to be a template name. /// In the case of ambiguity, we arbitrarily select the innermost such /// expression, for example in 'foo < bar < baz', 'bar' is the current /// candidate. No attempt is made to track that 'foo' is also a candidate /// for the case where we see a second suspicious '>' token. void add(Parser &P, Expr *TemplateName, SourceLocation LessLoc, Priority Prio) { if (!Locs.empty() && Locs.back().isActive(P)) { if (Locs.back().Priority <= Prio) { Locs.back().TemplateName = TemplateName; Locs.back().LessLoc = LessLoc; Locs.back().Priority = Prio; } } else { Locs.push_back({TemplateName, LessLoc, Prio, P.ParenCount, P.BracketCount, P.BraceCount}); } } /// Mark the current potential missing template location as having been /// handled (this happens if we pass a "corresponding" '>' or '>>' token /// or leave a bracket scope). void clear(Parser &P) { while (!Locs.empty() && Locs.back().isActiveOrNested(P)) Locs.pop_back(); } /// Get the current enclosing expression that might hve been intended to be /// a template name. Loc *getCurrent(Parser &P) { if (!Locs.empty() && Locs.back().isActive(P)) return &Locs.back(); return nullptr; } }; AngleBracketTracker AngleBrackets; IdentifierInfo *getSEHExceptKeyword(); /// True if we are within an Objective-C container while parsing C-like decls. /// /// This is necessary because Sema thinks we have left the container /// to parse the C-like decls, meaning Actions.getObjCDeclContext() will /// be NULL. bool ParsingInObjCContainer; /// Whether to skip parsing of function bodies. /// /// This option can be used, for example, to speed up searches for /// declarations/definitions when indexing. bool SkipFunctionBodies; /// The location of the expression statement that is being parsed right now. /// Used to determine if an expression that is being parsed is a statement or /// just a regular sub-expression. SourceLocation ExprStatementTokLoc; /// Flags describing a context in which we're parsing a statement. enum class ParsedStmtContext { /// This context permits declarations in language modes where declarations /// are not statements. AllowDeclarationsInC = 0x1, /// This context permits standalone OpenMP directives. AllowStandaloneOpenMPDirectives = 0x2, /// This context is at the top level of a GNU statement expression. InStmtExpr = 0x4, /// The context of a regular substatement. SubStmt = 0, /// The context of a compound-statement. Compound = AllowDeclarationsInC | AllowStandaloneOpenMPDirectives, LLVM_MARK_AS_BITMASK_ENUM(InStmtExpr) }; /// Act on an expression statement that might be the last statement in a /// GNU statement expression. Checks whether we are actually at the end of /// a statement expression and builds a suitable expression statement. StmtResult handleExprStmt(ExprResult E, ParsedStmtContext StmtCtx); public: Parser(Preprocessor &PP, Sema &Actions, bool SkipFunctionBodies); ~Parser() override; const LangOptions &getLangOpts() const { return PP.getLangOpts(); } const TargetInfo &getTargetInfo() const { return PP.getTargetInfo(); } Preprocessor &getPreprocessor() const { return PP; } Sema &getActions() const { return Actions; } AttributeFactory &getAttrFactory() { return AttrFactory; } const Token &getCurToken() const { return Tok; } Scope *getCurScope() const { return Actions.getCurScope(); } void incrementMSManglingNumber() const { return Actions.incrementMSManglingNumber(); } Decl *getObjCDeclContext() const { return Actions.getObjCDeclContext(); } // Type forwarding. All of these are statically 'void*', but they may all be // different actual classes based on the actions in place. typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef SmallVector<TemplateParameterList *, 4> TemplateParameterLists; typedef Sema::FullExprArg FullExprArg; // Parsing methods. /// Initialize - Warm up the parser. /// void Initialize(); /// Parse the first top-level declaration in a translation unit. bool ParseFirstTopLevelDecl(DeclGroupPtrTy &Result); /// ParseTopLevelDecl - Parse one top-level declaration. Returns true if /// the EOF was encountered. bool ParseTopLevelDecl(DeclGroupPtrTy &Result, bool IsFirstDecl = false); bool ParseTopLevelDecl() { DeclGroupPtrTy Result; return ParseTopLevelDecl(Result); } /// ConsumeToken - Consume the current 'peek token' and lex the next one. /// This does not work with special tokens: string literals, code completion, /// annotation tokens and balanced tokens must be handled using the specific /// consume methods. /// Returns the location of the consumed token. SourceLocation ConsumeToken() { assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } bool TryConsumeToken(tok::TokenKind Expected) { if (Tok.isNot(Expected)) return false; assert(!isTokenSpecial() && "Should consume special tokens with Consume*Token"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return true; } bool TryConsumeToken(tok::TokenKind Expected, SourceLocation &Loc) { if (!TryConsumeToken(Expected)) return false; Loc = PrevTokLocation; return true; } /// ConsumeAnyToken - Dispatch to the right Consume* method based on the /// current token type. This should only be used in cases where the type of /// the token really isn't known, e.g. in error recovery. SourceLocation ConsumeAnyToken(bool ConsumeCodeCompletionTok = false) { if (isTokenParen()) return ConsumeParen(); if (isTokenBracket()) return ConsumeBracket(); if (isTokenBrace()) return ConsumeBrace(); if (isTokenStringLiteral()) return ConsumeStringToken(); if (Tok.is(tok::code_completion)) return ConsumeCodeCompletionTok ? ConsumeCodeCompletionToken() : handleUnexpectedCodeCompletionToken(); if (Tok.isAnnotation()) return ConsumeAnnotationToken(); return ConsumeToken(); } SourceLocation getEndOfPreviousToken() { return PP.getLocForEndOfToken(PrevTokLocation); } /// Retrieve the underscored keyword (_Nonnull, _Nullable) that corresponds /// to the given nullability kind. IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability) { return Actions.getNullabilityKeyword(nullability); } private: //===--------------------------------------------------------------------===// // Low-Level token peeking and consumption methods. // /// isTokenParen - Return true if the cur token is '(' or ')'. bool isTokenParen() const { return Tok.isOneOf(tok::l_paren, tok::r_paren); } /// isTokenBracket - Return true if the cur token is '[' or ']'. bool isTokenBracket() const { return Tok.isOneOf(tok::l_square, tok::r_square); } /// isTokenBrace - Return true if the cur token is '{' or '}'. bool isTokenBrace() const { return Tok.isOneOf(tok::l_brace, tok::r_brace); } /// isTokenStringLiteral - True if this token is a string-literal. bool isTokenStringLiteral() const { return tok::isStringLiteral(Tok.getKind()); } /// isTokenSpecial - True if this token requires special consumption methods. bool isTokenSpecial() const { return isTokenStringLiteral() || isTokenParen() || isTokenBracket() || isTokenBrace() || Tok.is(tok::code_completion) || Tok.isAnnotation(); } /// Returns true if the current token is '=' or is a type of '='. /// For typos, give a fixit to '=' bool isTokenEqualOrEqualTypo(); /// Return the current token to the token stream and make the given /// token the current token. void UnconsumeToken(Token &Consumed) { Token Next = Tok; PP.EnterToken(Consumed, /*IsReinject*/true); PP.Lex(Tok); PP.EnterToken(Next, /*IsReinject*/true); } SourceLocation ConsumeAnnotationToken() { assert(Tok.isAnnotation() && "wrong consume method"); SourceLocation Loc = Tok.getLocation(); PrevTokLocation = Tok.getAnnotationEndLoc(); PP.Lex(Tok); return Loc; } /// ConsumeParen - This consume method keeps the paren count up-to-date. /// SourceLocation ConsumeParen() { assert(isTokenParen() && "wrong consume method"); if (Tok.getKind() == tok::l_paren) ++ParenCount; else if (ParenCount) { AngleBrackets.clear(*this); --ParenCount; // Don't let unbalanced )'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBracket - This consume method keeps the bracket count up-to-date. /// SourceLocation ConsumeBracket() { assert(isTokenBracket() && "wrong consume method"); if (Tok.getKind() == tok::l_square) ++BracketCount; else if (BracketCount) { AngleBrackets.clear(*this); --BracketCount; // Don't let unbalanced ]'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeBrace - This consume method keeps the brace count up-to-date. /// SourceLocation ConsumeBrace() { assert(isTokenBrace() && "wrong consume method"); if (Tok.getKind() == tok::l_brace) ++BraceCount; else if (BraceCount) { AngleBrackets.clear(*this); --BraceCount; // Don't let unbalanced }'s drive the count negative. } PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// ConsumeStringToken - Consume the current 'peek token', lexing a new one /// and returning the token kind. This method is specific to strings, as it /// handles string literal concatenation, as per C99 5.1.1.2, translation /// phase #6. SourceLocation ConsumeStringToken() { assert(isTokenStringLiteral() && "Should only consume string literals with this method"); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } /// Consume the current code-completion token. /// /// This routine can be called to consume the code-completion token and /// continue processing in special cases where \c cutOffParsing() isn't /// desired, such as token caching or completion with lookahead. SourceLocation ConsumeCodeCompletionToken() { assert(Tok.is(tok::code_completion)); PrevTokLocation = Tok.getLocation(); PP.Lex(Tok); return PrevTokLocation; } ///\ brief When we are consuming a code-completion token without having /// matched specific position in the grammar, provide code-completion results /// based on context. /// /// \returns the source location of the code-completion token. SourceLocation handleUnexpectedCodeCompletionToken(); /// Abruptly cut off parsing; mainly used when we have reached the /// code-completion point. void cutOffParsing() { if (PP.isCodeCompletionEnabled()) PP.setCodeCompletionReached(); // Cut off parsing by acting as if we reached the end-of-file. Tok.setKind(tok::eof); } /// Determine if we're at the end of the file or at a transition /// between modules. bool isEofOrEom() { tok::TokenKind Kind = Tok.getKind(); return Kind == tok::eof || Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include; } /// Checks if the \p Level is valid for use in a fold expression. bool isFoldOperator(prec::Level Level) const; /// Checks if the \p Kind is a valid operator for fold expressions. bool isFoldOperator(tok::TokenKind Kind) const; /// Initialize all pragma handlers. void initializePragmaHandlers(); /// Destroy and reset all pragma handlers. void resetPragmaHandlers(); /// Handle the annotation token produced for #pragma unused(...) void HandlePragmaUnused(); /// Handle the annotation token produced for /// #pragma GCC visibility... void HandlePragmaVisibility(); /// Handle the annotation token produced for /// #pragma pack... void HandlePragmaPack(); /// Handle the annotation token produced for /// #pragma ms_struct... void HandlePragmaMSStruct(); /// Handle the annotation token produced for /// #pragma comment... void HandlePragmaMSComment(); void HandlePragmaMSPointersToMembers(); void HandlePragmaMSVtorDisp(); void HandlePragmaMSPragma(); bool HandlePragmaMSSection(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSSegment(StringRef PragmaName, SourceLocation PragmaLocation); bool HandlePragmaMSInitSeg(StringRef PragmaName, SourceLocation PragmaLocation); /// Handle the annotation token produced for /// #pragma align... void HandlePragmaAlign(); /// Handle the annotation token produced for /// #pragma clang __debug dump... void HandlePragmaDump(); /// Handle the annotation token produced for /// #pragma weak id... void HandlePragmaWeak(); /// Handle the annotation token produced for /// #pragma weak id = id... void HandlePragmaWeakAlias(); /// Handle the annotation token produced for /// #pragma redefine_extname... void HandlePragmaRedefineExtname(); /// Handle the annotation token produced for /// #pragma STDC FP_CONTRACT... void HandlePragmaFPContract(); /// Handle the annotation token produced for /// #pragma STDC FENV_ACCESS... void HandlePragmaFEnvAccess(); /// \brief Handle the annotation token produced for /// #pragma clang fp ... void HandlePragmaFP(); /// Handle the annotation token produced for /// #pragma OPENCL EXTENSION... void HandlePragmaOpenCLExtension(); /// Handle the annotation token produced for /// #pragma clang __debug captured StmtResult HandlePragmaCaptured(); /// Handle the annotation token produced for /// #pragma clang loop and #pragma unroll. bool HandlePragmaLoopHint(LoopHint &Hint); bool ParsePragmaAttributeSubjectMatchRuleSet( attr::ParsedSubjectMatchRuleSet &SubjectMatchRules, SourceLocation &AnyLoc, SourceLocation &LastMatchRuleEndLoc); void HandlePragmaAttribute(); /// GetLookAheadToken - This peeks ahead N tokens and returns that token /// without consuming any tokens. LookAhead(0) returns 'Tok', LookAhead(1) /// returns the token after Tok, etc. /// /// Note that this differs from the Preprocessor's LookAhead method, because /// the Parser always has one token lexed that the preprocessor doesn't. /// const Token &GetLookAheadToken(unsigned N) { if (N == 0 || Tok.is(tok::eof)) return Tok; return PP.LookAhead(N-1); } public: /// NextToken - This peeks ahead one token and returns it without /// consuming it. const Token &NextToken() { return PP.LookAhead(0); } /// getTypeAnnotation - Read a parsed type out of an annotation token. static ParsedType getTypeAnnotation(const Token &Tok) { return ParsedType::getFromOpaquePtr(Tok.getAnnotationValue()); } private: static void setTypeAnnotation(Token &Tok, ParsedType T) { Tok.setAnnotationValue(T.getAsOpaquePtr()); } static NamedDecl *getNonTypeAnnotation(const Token &Tok) { return static_cast<NamedDecl*>(Tok.getAnnotationValue()); } static void setNonTypeAnnotation(Token &Tok, NamedDecl *ND) { Tok.setAnnotationValue(ND); } static IdentifierInfo *getIdentifierAnnotation(const Token &Tok) { return static_cast<IdentifierInfo*>(Tok.getAnnotationValue()); } static void setIdentifierAnnotation(Token &Tok, IdentifierInfo *ND) { Tok.setAnnotationValue(ND); } /// Read an already-translated primary expression out of an annotation /// token. static ExprResult getExprAnnotation(const Token &Tok) { return ExprResult::getFromOpaquePointer(Tok.getAnnotationValue()); } /// Set the primary expression corresponding to the given annotation /// token. static void setExprAnnotation(Token &Tok, ExprResult ER) { Tok.setAnnotationValue(ER.getAsOpaquePointer()); } public: // If NeedType is true, then TryAnnotateTypeOrScopeToken will try harder to // find a type name by attempting typo correction. bool TryAnnotateTypeOrScopeToken(); bool TryAnnotateTypeOrScopeTokenAfterScopeSpec(CXXScopeSpec &SS, bool IsNewScope); bool TryAnnotateCXXScopeToken(bool EnteringContext = false); private: enum AnnotatedNameKind { /// Annotation has failed and emitted an error. ANK_Error, /// The identifier is a tentatively-declared name. ANK_TentativeDecl, /// The identifier is a template name. FIXME: Add an annotation for that. ANK_TemplateName, /// The identifier can't be resolved. ANK_Unresolved, /// Annotation was successful. ANK_Success }; AnnotatedNameKind TryAnnotateName(CorrectionCandidateCallback *CCC = nullptr); /// Push a tok::annot_cxxscope token onto the token stream. void AnnotateScopeToken(CXXScopeSpec &SS, bool IsNewAnnotation); /// TryAltiVecToken - Check for context-sensitive AltiVec identifier tokens, /// replacing them with the non-context-sensitive keywords. This returns /// true if the token was replaced. bool TryAltiVecToken(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid) { if (!getLangOpts().AltiVec && !getLangOpts().ZVector) return false; if (Tok.getIdentifierInfo() != Ident_vector && Tok.getIdentifierInfo() != Ident_bool && (!getLangOpts().AltiVec || Tok.getIdentifierInfo() != Ident_pixel)) return false; return TryAltiVecTokenOutOfLine(DS, Loc, PrevSpec, DiagID, isInvalid); } /// TryAltiVecVectorToken - Check for context-sensitive AltiVec vector /// identifier token, replacing it with the non-context-sensitive __vector. /// This returns true if the token was replaced. bool TryAltiVecVectorToken() { if ((!getLangOpts().AltiVec && !getLangOpts().ZVector) || Tok.getIdentifierInfo() != Ident_vector) return false; return TryAltiVecVectorTokenOutOfLine(); } bool TryAltiVecVectorTokenOutOfLine(); bool TryAltiVecTokenOutOfLine(DeclSpec &DS, SourceLocation Loc, const char *&PrevSpec, unsigned &DiagID, bool &isInvalid); /// Returns true if the current token is the identifier 'instancetype'. /// /// Should only be used in Objective-C language modes. bool isObjCInstancetype() { assert(getLangOpts().ObjC); if (Tok.isAnnotation()) return false; if (!Ident_instancetype) Ident_instancetype = PP.getIdentifierInfo("instancetype"); return Tok.getIdentifierInfo() == Ident_instancetype; } /// TryKeywordIdentFallback - For compatibility with system headers using /// keywords as identifiers, attempt to convert the current token to an /// identifier and optionally disable the keyword for the remainder of the /// translation unit. This returns false if the token was not replaced, /// otherwise emits a diagnostic and returns true. bool TryKeywordIdentFallback(bool DisableKeyword); /// Get the TemplateIdAnnotation from the token. TemplateIdAnnotation *takeTemplateIdAnnotation(const Token &tok); /// TentativeParsingAction - An object that is used as a kind of "tentative /// parsing transaction". It gets instantiated to mark the token position and /// after the token consumption is done, Commit() or Revert() is called to /// either "commit the consumed tokens" or revert to the previously marked /// token position. Example: /// /// TentativeParsingAction TPA(*this); /// ConsumeToken(); /// .... /// TPA.Revert(); /// class TentativeParsingAction { Parser &P; PreferredTypeBuilder PrevPreferredType; Token PrevTok; size_t PrevTentativelyDeclaredIdentifierCount; unsigned short PrevParenCount, PrevBracketCount, PrevBraceCount; bool isActive; public: explicit TentativeParsingAction(Parser& p) : P(p) { PrevPreferredType = P.PreferredType; PrevTok = P.Tok; PrevTentativelyDeclaredIdentifierCount = P.TentativelyDeclaredIdentifiers.size(); PrevParenCount = P.ParenCount; PrevBracketCount = P.BracketCount; PrevBraceCount = P.BraceCount; P.PP.EnableBacktrackAtThisPos(); isActive = true; } void Commit() { assert(isActive && "Parsing action was finished!"); P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.PP.CommitBacktrackedTokens(); isActive = false; } void Revert() { assert(isActive && "Parsing action was finished!"); P.PP.Backtrack(); P.PreferredType = PrevPreferredType; P.Tok = PrevTok; P.TentativelyDeclaredIdentifiers.resize( PrevTentativelyDeclaredIdentifierCount); P.ParenCount = PrevParenCount; P.BracketCount = PrevBracketCount; P.BraceCount = PrevBraceCount; isActive = false; } ~TentativeParsingAction() { assert(!isActive && "Forgot to call Commit or Revert!"); } }; /// A TentativeParsingAction that automatically reverts in its destructor. /// Useful for disambiguation parses that will always be reverted. class RevertingTentativeParsingAction : private Parser::TentativeParsingAction { public: RevertingTentativeParsingAction(Parser &P) : Parser::TentativeParsingAction(P) {} ~RevertingTentativeParsingAction() { Revert(); } }; class UnannotatedTentativeParsingAction; /// ObjCDeclContextSwitch - An object used to switch context from /// an objective-c decl context to its enclosing decl context and /// back. class ObjCDeclContextSwitch { Parser &P; Decl *DC; SaveAndRestore<bool> WithinObjCContainer; public: explicit ObjCDeclContextSwitch(Parser &p) : P(p), DC(p.getObjCDeclContext()), WithinObjCContainer(P.ParsingInObjCContainer, DC != nullptr) { if (DC) P.Actions.ActOnObjCTemporaryExitContainerContext(cast<DeclContext>(DC)); } ~ObjCDeclContextSwitch() { if (DC) P.Actions.ActOnObjCReenterContainerContext(cast<DeclContext>(DC)); } }; /// ExpectAndConsume - The parser expects that 'ExpectedTok' is next in the /// input. If so, it is consumed and false is returned. /// /// If a trivial punctuator misspelling is encountered, a FixIt error /// diagnostic is issued and false is returned after recovery. /// /// If the input is malformed, this emits the specified diagnostic and true is /// returned. bool ExpectAndConsume(tok::TokenKind ExpectedTok, unsigned Diag = diag::err_expected, StringRef DiagMsg = ""); /// The parser expects a semicolon and, if present, will consume it. /// /// If the next token is not a semicolon, this emits the specified diagnostic, /// or, if there's just some closing-delimiter noise (e.g., ')' or ']') prior /// to the semicolon, consumes that extra token. bool ExpectAndConsumeSemi(unsigned DiagID); /// The kind of extra semi diagnostic to emit. enum ExtraSemiKind { OutsideFunction = 0, InsideStruct = 1, InstanceVariableList = 2, AfterMemberFunctionDefinition = 3 }; /// Consume any extra semi-colons until the end of the line. void ConsumeExtraSemi(ExtraSemiKind Kind, DeclSpec::TST T = TST_unspecified); /// Return false if the next token is an identifier. An 'expected identifier' /// error is emitted otherwise. /// /// The parser tries to recover from the error by checking if the next token /// is a C++ keyword when parsing Objective-C++. Return false if the recovery /// was successful. bool expectIdentifier(); public: //===--------------------------------------------------------------------===// // Scope manipulation /// ParseScope - Introduces a new scope for parsing. The kind of /// scope is determined by ScopeFlags. Objects of this type should /// be created on the stack to coincide with the position where the /// parser enters the new scope, and this object's constructor will /// create that new scope. Similarly, once the object is destroyed /// the parser will exit the scope. class ParseScope { Parser *Self; ParseScope(const ParseScope &) = delete; void operator=(const ParseScope &) = delete; public: // ParseScope - Construct a new object to manage a scope in the // parser Self where the new Scope is created with the flags // ScopeFlags, but only when we aren't about to enter a compound statement. ParseScope(Parser *Self, unsigned ScopeFlags, bool EnteredScope = true, bool BeforeCompoundStmt = false) : Self(Self) { if (EnteredScope && !BeforeCompoundStmt) Self->EnterScope(ScopeFlags); else { if (BeforeCompoundStmt) Self->incrementMSManglingNumber(); this->Self = nullptr; } } // Exit - Exit the scope associated with this object now, rather // than waiting until the object is destroyed. void Exit() { if (Self) { Self->ExitScope(); Self = nullptr; } } ~ParseScope() { Exit(); } }; /// EnterScope - Start a new scope. void EnterScope(unsigned ScopeFlags); /// ExitScope - Pop a scope off the scope stack. void ExitScope(); private: /// RAII object used to modify the scope flags for the current scope. class ParseScopeFlags { Scope *CurScope; unsigned OldFlags; ParseScopeFlags(const ParseScopeFlags &) = delete; void operator=(const ParseScopeFlags &) = delete; public: ParseScopeFlags(Parser *Self, unsigned ScopeFlags, bool ManageFlags = true); ~ParseScopeFlags(); }; //===--------------------------------------------------------------------===// // Diagnostic Emission and Error recovery. public: DiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID); DiagnosticBuilder Diag(const Token &Tok, unsigned DiagID); DiagnosticBuilder Diag(unsigned DiagID) { return Diag(Tok, DiagID); } private: void SuggestParentheses(SourceLocation Loc, unsigned DK, SourceRange ParenRange); void CheckNestedObjCContexts(SourceLocation AtLoc); public: /// Control flags for SkipUntil functions. enum SkipUntilFlags { StopAtSemi = 1 << 0, ///< Stop skipping at semicolon /// Stop skipping at specified token, but don't skip the token itself StopBeforeMatch = 1 << 1, StopAtCodeCompletion = 1 << 2 ///< Stop at code completion }; friend constexpr SkipUntilFlags operator|(SkipUntilFlags L, SkipUntilFlags R) { return static_cast<SkipUntilFlags>(static_cast<unsigned>(L) | static_cast<unsigned>(R)); } /// SkipUntil - Read tokens until we get to the specified token, then consume /// it (unless StopBeforeMatch is specified). Because we cannot guarantee /// that the token will ever occur, this skips to the next token, or to some /// likely good stopping point. If Flags has StopAtSemi flag, skipping will /// stop at a ';' character. /// /// If SkipUntil finds the specified token, it returns true, otherwise it /// returns false. bool SkipUntil(tok::TokenKind T, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { return SkipUntil(llvm::makeArrayRef(T), Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2}; return SkipUntil(TokArray, Flags); } bool SkipUntil(tok::TokenKind T1, tok::TokenKind T2, tok::TokenKind T3, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)) { tok::TokenKind TokArray[] = {T1, T2, T3}; return SkipUntil(TokArray, Flags); } bool SkipUntil(ArrayRef<tok::TokenKind> Toks, SkipUntilFlags Flags = static_cast<SkipUntilFlags>(0)); /// SkipMalformedDecl - Read tokens until we get to some likely good stopping /// point for skipping past a simple-declaration. void SkipMalformedDecl(); /// The location of the first statement inside an else that might /// have a missleading indentation. If there is no /// MisleadingIndentationChecker on an else active, this location is invalid. SourceLocation MisleadingIndentationElseLoc; private: //===--------------------------------------------------------------------===// // Lexing and parsing of C++ inline methods. struct ParsingClass; /// [class.mem]p1: "... the class is regarded as complete within /// - function bodies /// - default arguments /// - exception-specifications (TODO: C++0x) /// - and brace-or-equal-initializers for non-static data members /// (including such things in nested classes)." /// LateParsedDeclarations build the tree of those elements so they can /// be parsed after parsing the top-level class. class LateParsedDeclaration { public: virtual ~LateParsedDeclaration(); virtual void ParseLexedMethodDeclarations(); virtual void ParseLexedMemberInitializers(); virtual void ParseLexedMethodDefs(); virtual void ParseLexedAttributes(); }; /// Inner node of the LateParsedDeclaration tree that parses /// all its members recursively. class LateParsedClass : public LateParsedDeclaration { public: LateParsedClass(Parser *P, ParsingClass *C); ~LateParsedClass() override; void ParseLexedMethodDeclarations() override; void ParseLexedMemberInitializers() override; void ParseLexedMethodDefs() override; void ParseLexedAttributes() override; private: Parser *Self; ParsingClass *Class; }; /// Contains the lexed tokens of an attribute with arguments that /// may reference member variables and so need to be parsed at the /// end of the class declaration after parsing all other member /// member declarations. /// FIXME: Perhaps we should change the name of LateParsedDeclaration to /// LateParsedTokens. struct LateParsedAttribute : public LateParsedDeclaration { Parser *Self; CachedTokens Toks; IdentifierInfo &AttrName; IdentifierInfo *MacroII = nullptr; SourceLocation AttrNameLoc; SmallVector<Decl*, 2> Decls; explicit LateParsedAttribute(Parser *P, IdentifierInfo &Name, SourceLocation Loc) : Self(P), AttrName(Name), AttrNameLoc(Loc) {} void ParseLexedAttributes() override; void addDecl(Decl *D) { Decls.push_back(D); } }; // A list of late-parsed attributes. Used by ParseGNUAttributes. class LateParsedAttrList: public SmallVector<LateParsedAttribute *, 2> { public: LateParsedAttrList(bool PSoon = false) : ParseSoon(PSoon) { } bool parseSoon() { return ParseSoon; } private: bool ParseSoon; // Are we planning to parse these shortly after creation? }; /// Contains the lexed tokens of a member function definition /// which needs to be parsed at the end of the class declaration /// after parsing all other member declarations. struct LexedMethod : public LateParsedDeclaration { Parser *Self; Decl *D; CachedTokens Toks; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; explicit LexedMethod(Parser* P, Decl *MD) : Self(P), D(MD), TemplateScope(false) {} void ParseLexedMethodDefs() override; }; /// LateParsedDefaultArgument - Keeps track of a parameter that may /// have a default argument that cannot be parsed yet because it /// occurs within a member function declaration inside the class /// (C++ [class.mem]p2). struct LateParsedDefaultArgument { explicit LateParsedDefaultArgument(Decl *P, std::unique_ptr<CachedTokens> Toks = nullptr) : Param(P), Toks(std::move(Toks)) { } /// Param - The parameter declaration for this parameter. Decl *Param; /// Toks - The sequence of tokens that comprises the default /// argument expression, not including the '=' or the terminating /// ')' or ','. This will be NULL for parameters that have no /// default argument. std::unique_ptr<CachedTokens> Toks; }; /// LateParsedMethodDeclaration - A method declaration inside a class that /// contains at least one entity whose parsing needs to be delayed /// until the class itself is completely-defined, such as a default /// argument (C++ [class.mem]p2). struct LateParsedMethodDeclaration : public LateParsedDeclaration { explicit LateParsedMethodDeclaration(Parser *P, Decl *M) : Self(P), Method(M), TemplateScope(false), ExceptionSpecTokens(nullptr) {} void ParseLexedMethodDeclarations() override; Parser* Self; /// Method - The method declaration. Decl *Method; /// Whether this member function had an associated template /// scope. When true, D is a template declaration. /// otherwise, it is a member function declaration. bool TemplateScope; /// DefaultArgs - Contains the parameters of the function and /// their default arguments. At least one of the parameters will /// have a default argument, but all of the parameters of the /// method will be stored so that they can be reintroduced into /// scope at the appropriate times. SmallVector<LateParsedDefaultArgument, 8> DefaultArgs; /// The set of tokens that make up an exception-specification that /// has not yet been parsed. CachedTokens *ExceptionSpecTokens; }; /// LateParsedMemberInitializer - An initializer for a non-static class data /// member whose parsing must to be delayed until the class is completely /// defined (C++11 [class.mem]p2). struct LateParsedMemberInitializer : public LateParsedDeclaration { LateParsedMemberInitializer(Parser *P, Decl *FD) : Self(P), Field(FD) { } void ParseLexedMemberInitializers() override; Parser *Self; /// Field - The field declaration. Decl *Field; /// CachedTokens - The sequence of tokens that comprises the initializer, /// including any leading '='. CachedTokens Toks; }; /// LateParsedDeclarationsContainer - During parsing of a top (non-nested) /// C++ class, its method declarations that contain parts that won't be /// parsed until after the definition is completed (C++ [class.mem]p2), /// the method declarations and possibly attached inline definitions /// will be stored here with the tokens that will be parsed to create those /// entities. typedef SmallVector<LateParsedDeclaration*,2> LateParsedDeclarationsContainer; /// Representation of a class that has been parsed, including /// any member function declarations or definitions that need to be /// parsed after the corresponding top-level class is complete. struct ParsingClass { ParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : TopLevelClass(TopLevelClass), TemplateScope(false), IsInterface(IsInterface), TagOrTemplate(TagOrTemplate) { } /// Whether this is a "top-level" class, meaning that it is /// not nested within another class. bool TopLevelClass : 1; /// Whether this class had an associated template /// scope. When true, TagOrTemplate is a template declaration; /// otherwise, it is a tag declaration. bool TemplateScope : 1; /// Whether this class is an __interface. bool IsInterface : 1; /// The class or class template whose definition we are parsing. Decl *TagOrTemplate; /// LateParsedDeclarations - Method declarations, inline definitions and /// nested classes that contain pieces whose parsing will be delayed until /// the top-level class is fully defined. LateParsedDeclarationsContainer LateParsedDeclarations; }; /// The stack of classes that is currently being /// parsed. Nested and local classes will be pushed onto this stack /// when they are parsed, and removed afterward. std::stack<ParsingClass *> ClassStack; ParsingClass &getCurrentClass() { assert(!ClassStack.empty() && "No lexed method stacks!"); return *ClassStack.top(); } /// RAII object used to manage the parsing of a class definition. class ParsingClassDefinition { Parser &P; bool Popped; Sema::ParsingClassState State; public: ParsingClassDefinition(Parser &P, Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface) : P(P), Popped(false), State(P.PushParsingClass(TagOrTemplate, TopLevelClass, IsInterface)) { } /// Pop this class of the stack. void Pop() { assert(!Popped && "Nested class has already been popped"); Popped = true; P.PopParsingClass(State); } ~ParsingClassDefinition() { if (!Popped) P.PopParsingClass(State); } }; /// Contains information about any template-specific /// information that has been parsed prior to parsing declaration /// specifiers. struct ParsedTemplateInfo { ParsedTemplateInfo() : Kind(NonTemplate), TemplateParams(nullptr), TemplateLoc() { } ParsedTemplateInfo(TemplateParameterLists *TemplateParams, bool isSpecialization, bool lastParameterListWasEmpty = false) : Kind(isSpecialization? ExplicitSpecialization : Template), TemplateParams(TemplateParams), LastParameterListWasEmpty(lastParameterListWasEmpty) { } explicit ParsedTemplateInfo(SourceLocation ExternLoc, SourceLocation TemplateLoc) : Kind(ExplicitInstantiation), TemplateParams(nullptr), ExternLoc(ExternLoc), TemplateLoc(TemplateLoc), LastParameterListWasEmpty(false){ } /// The kind of template we are parsing. enum { /// We are not parsing a template at all. NonTemplate = 0, /// We are parsing a template declaration. Template, /// We are parsing an explicit specialization. ExplicitSpecialization, /// We are parsing an explicit instantiation. ExplicitInstantiation } Kind; /// The template parameter lists, for template declarations /// and explicit specializations. TemplateParameterLists *TemplateParams; /// The location of the 'extern' keyword, if any, for an explicit /// instantiation SourceLocation ExternLoc; /// The location of the 'template' keyword, for an explicit /// instantiation. SourceLocation TemplateLoc; /// Whether the last template parameter list was empty. bool LastParameterListWasEmpty; SourceRange getSourceRange() const LLVM_READONLY; }; void LexTemplateFunctionForLateParsing(CachedTokens &Toks); void ParseLateTemplatedFuncDef(LateParsedTemplate &LPT); static void LateTemplateParserCallback(void *P, LateParsedTemplate &LPT); static void LateTemplateParserCleanupCallback(void *P); Sema::ParsingClassState PushParsingClass(Decl *TagOrTemplate, bool TopLevelClass, bool IsInterface); void DeallocateParsedClasses(ParsingClass *Class); void PopParsingClass(Sema::ParsingClassState); enum CachedInitKind { CIK_DefaultArgument, CIK_DefaultInitializer }; NamedDecl *ParseCXXInlineMethodDef(AccessSpecifier AS, ParsedAttributes &AccessAttrs, ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo, const VirtSpecifiers &VS, SourceLocation PureSpecLoc); void ParseCXXNonStaticMemberInitializer(Decl *VarD); void ParseLexedAttributes(ParsingClass &Class); void ParseLexedAttributeList(LateParsedAttrList &LAs, Decl *D, bool EnterScope, bool OnDefinition); void ParseLexedAttribute(LateParsedAttribute &LA, bool EnterScope, bool OnDefinition); void ParseLexedMethodDeclarations(ParsingClass &Class); void ParseLexedMethodDeclaration(LateParsedMethodDeclaration &LM); void ParseLexedMethodDefs(ParsingClass &Class); void ParseLexedMethodDef(LexedMethod &LM); void ParseLexedMemberInitializers(ParsingClass &Class); void ParseLexedMemberInitializer(LateParsedMemberInitializer &MI); void ParseLexedObjCMethodDefs(LexedMethod &LM, bool parseMethod); bool ConsumeAndStoreFunctionPrologue(CachedTokens &Toks); bool ConsumeAndStoreInitializer(CachedTokens &Toks, CachedInitKind CIK); bool ConsumeAndStoreConditional(CachedTokens &Toks); bool ConsumeAndStoreUntil(tok::TokenKind T1, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true) { return ConsumeAndStoreUntil(T1, T1, Toks, StopAtSemi, ConsumeFinalToken); } bool ConsumeAndStoreUntil(tok::TokenKind T1, tok::TokenKind T2, CachedTokens &Toks, bool StopAtSemi = true, bool ConsumeFinalToken = true); //===--------------------------------------------------------------------===// // C99 6.9: External Definitions. struct ParsedAttributesWithRange : ParsedAttributes { ParsedAttributesWithRange(AttributeFactory &factory) : ParsedAttributes(factory) {} void clear() { ParsedAttributes::clear(); Range = SourceRange(); } SourceRange Range; }; struct ParsedAttributesViewWithRange : ParsedAttributesView { ParsedAttributesViewWithRange() : ParsedAttributesView() {} void clearListOnly() { ParsedAttributesView::clearListOnly(); Range = SourceRange(); } SourceRange Range; }; DeclGroupPtrTy ParseExternalDeclaration(ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr); bool isDeclarationAfterDeclarator(); bool isStartOfFunctionDefinition(const ParsingDeclarator &Declarator); DeclGroupPtrTy ParseDeclarationOrFunctionDefinition( ParsedAttributesWithRange &attrs, ParsingDeclSpec *DS = nullptr, AccessSpecifier AS = AS_none); DeclGroupPtrTy ParseDeclOrFunctionDefInternal(ParsedAttributesWithRange &attrs, ParsingDeclSpec &DS, AccessSpecifier AS); void SkipFunctionBody(); Decl *ParseFunctionDefinition(ParsingDeclarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), LateParsedAttrList *LateParsedAttrs = nullptr); void ParseKNRParamDeclarations(Declarator &D); // EndLoc, if non-NULL, is filled with the location of the last token of // the simple-asm. ExprResult ParseSimpleAsm(SourceLocation *EndLoc = nullptr); ExprResult ParseAsmStringLiteral(); // Objective-C External Declarations void MaybeSkipAttributes(tok::ObjCKeywordKind Kind); DeclGroupPtrTy ParseObjCAtDirectives(ParsedAttributesWithRange &Attrs); DeclGroupPtrTy ParseObjCAtClassDeclaration(SourceLocation atLoc); Decl *ParseObjCAtInterfaceDeclaration(SourceLocation AtLoc, ParsedAttributes &prefixAttrs); class ObjCTypeParamListScope; ObjCTypeParamList *parseObjCTypeParamList(); ObjCTypeParamList *parseObjCTypeParamListOrProtocolRefs( ObjCTypeParamListScope &Scope, SourceLocation &lAngleLoc, SmallVectorImpl<IdentifierLocPair> &protocolIdents, SourceLocation &rAngleLoc, bool mayBeProtocolList = true); void HelperActionsForIvarDeclarations(Decl *interfaceDecl, SourceLocation atLoc, BalancedDelimiterTracker &T, SmallVectorImpl<Decl *> &AllIvarDecls, bool RBraceMissing); void ParseObjCClassInstanceVariables(Decl *interfaceDecl, tok::ObjCKeywordKind visibility, SourceLocation atLoc); bool ParseObjCProtocolReferences(SmallVectorImpl<Decl *> &P, SmallVectorImpl<SourceLocation> &PLocs, bool WarnOnDeclarations, bool ForObjCContainer, SourceLocation &LAngleLoc, SourceLocation &EndProtoLoc, bool consumeLastToken); /// Parse the first angle-bracket-delimited clause for an /// Objective-C object or object pointer type, which may be either /// type arguments or protocol qualifiers. void parseObjCTypeArgsOrProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken, bool warnOnIncompleteProtocols); /// Parse either Objective-C type arguments or protocol qualifiers; if the /// former, also parse protocol qualifiers afterward. void parseObjCTypeArgsAndProtocolQualifiers( ParsedType baseType, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SmallVectorImpl<SourceLocation> &protocolLocs, SourceLocation &protocolRAngleLoc, bool consumeLastToken); /// Parse a protocol qualifier type such as '<NSCopying>', which is /// an anachronistic way of writing 'id<NSCopying>'. TypeResult parseObjCProtocolQualifierType(SourceLocation &rAngleLoc); /// Parse Objective-C type arguments and protocol qualifiers, extending the /// current type with the parsed result. TypeResult parseObjCTypeArgsAndProtocolQualifiers(SourceLocation loc, ParsedType type, bool consumeLastToken, SourceLocation &endLoc); void ParseObjCInterfaceDeclList(tok::ObjCKeywordKind contextKey, Decl *CDecl); DeclGroupPtrTy ParseObjCAtProtocolDeclaration(SourceLocation atLoc, ParsedAttributes &prefixAttrs); struct ObjCImplParsingDataRAII { Parser &P; Decl *Dcl; bool HasCFunction; typedef SmallVector<LexedMethod*, 8> LateParsedObjCMethodContainer; LateParsedObjCMethodContainer LateParsedObjCMethods; ObjCImplParsingDataRAII(Parser &parser, Decl *D) : P(parser), Dcl(D), HasCFunction(false) { P.CurParsedObjCImpl = this; Finished = false; } ~ObjCImplParsingDataRAII(); void finish(SourceRange AtEnd); bool isFinished() const { return Finished; } private: bool Finished; }; ObjCImplParsingDataRAII *CurParsedObjCImpl; void StashAwayMethodOrFunctionBodyTokens(Decl *MDecl); DeclGroupPtrTy ParseObjCAtImplementationDeclaration(SourceLocation AtLoc, ParsedAttributes &Attrs); DeclGroupPtrTy ParseObjCAtEndDeclaration(SourceRange atEnd); Decl *ParseObjCAtAliasDeclaration(SourceLocation atLoc); Decl *ParseObjCPropertySynthesize(SourceLocation atLoc); Decl *ParseObjCPropertyDynamic(SourceLocation atLoc); IdentifierInfo *ParseObjCSelectorPiece(SourceLocation &MethodLocation); // Definitions for Objective-c context sensitive keywords recognition. enum ObjCTypeQual { objc_in=0, objc_out, objc_inout, objc_oneway, objc_bycopy, objc_byref, objc_nonnull, objc_nullable, objc_null_unspecified, objc_NumQuals }; IdentifierInfo *ObjCTypeQuals[objc_NumQuals]; bool isTokIdentifier_in() const; ParsedType ParseObjCTypeName(ObjCDeclSpec &DS, DeclaratorContext Ctx, ParsedAttributes *ParamAttrs); void ParseObjCMethodRequirement(); Decl *ParseObjCMethodPrototype( tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition = true); Decl *ParseObjCMethodDecl(SourceLocation mLoc, tok::TokenKind mType, tok::ObjCKeywordKind MethodImplKind = tok::objc_not_keyword, bool MethodDefinition=true); void ParseObjCPropertyAttribute(ObjCDeclSpec &DS); Decl *ParseObjCMethodDefinition(); public: //===--------------------------------------------------------------------===// // C99 6.5: Expressions. /// TypeCastState - State whether an expression is or may be a type cast. enum TypeCastState { NotTypeCast = 0, MaybeTypeCast, IsTypeCast }; ExprResult ParseExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpressionInExprEvalContext( TypeCastState isTypeCast = NotTypeCast); ExprResult ParseConstantExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseCaseExpression(SourceLocation CaseLoc); ExprResult ParseConstraintExpression(); // Expr that doesn't include commas. ExprResult ParseAssignmentExpression(TypeCastState isTypeCast = NotTypeCast); ExprResult ParseMSAsmIdentifier(llvm::SmallVectorImpl<Token> &LineToks, unsigned &NumLineToksConsumed, bool IsUnevaluated); private: ExprResult ParseExpressionWithLeadingAt(SourceLocation AtLoc); ExprResult ParseExpressionWithLeadingExtension(SourceLocation ExtLoc); ExprResult ParseRHSOfBinaryExpression(ExprResult LHS, prec::Level MinPrec); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand, bool &NotCastExpr, TypeCastState isTypeCast, bool isVectorLiteral = false); ExprResult ParseCastExpression(bool isUnaryExpression, bool isAddressOfOperand = false, TypeCastState isTypeCast = NotTypeCast, bool isVectorLiteral = false); /// Returns true if the next token cannot start an expression. bool isNotExpressionStart(); /// Returns true if the next token would start a postfix-expression /// suffix. bool isPostfixExpressionSuffixStart() { tok::TokenKind K = Tok.getKind(); return (K == tok::l_square || K == tok::l_paren || K == tok::period || K == tok::arrow || K == tok::plusplus || K == tok::minusminus); } bool diagnoseUnknownTemplateId(ExprResult TemplateName, SourceLocation Less); void checkPotentialAngleBracket(ExprResult &PotentialTemplateName); bool checkPotentialAngleBracketDelimiter(const AngleBracketTracker::Loc &, const Token &OpToken); bool checkPotentialAngleBracketDelimiter(const Token &OpToken) { if (auto *Info = AngleBrackets.getCurrent(*this)) return checkPotentialAngleBracketDelimiter(*Info, OpToken); return false; } ExprResult ParsePostfixExpressionSuffix(ExprResult LHS); ExprResult ParseUnaryExprOrTypeTraitExpression(); ExprResult ParseBuiltinPrimaryExpression(); ExprResult ParseExprAfterUnaryExprOrTypeTrait(const Token &OpTok, bool &isCastExpr, ParsedType &CastTy, SourceRange &CastRange); typedef SmallVector<Expr*, 20> ExprListTy; typedef SmallVector<SourceLocation, 20> CommaLocsTy; /// ParseExpressionList - Used for C/C++ (argument-)expression-list. bool ParseExpressionList(SmallVectorImpl<Expr *> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs, llvm::function_ref<void()> ExpressionStarts = llvm::function_ref<void()>()); /// ParseSimpleExpressionList - A simple comma-separated list of expressions, /// used for misc language extensions. bool ParseSimpleExpressionList(SmallVectorImpl<Expr*> &Exprs, SmallVectorImpl<SourceLocation> &CommaLocs); /// ParenParseOption - Control what ParseParenExpression will parse. enum ParenParseOption { SimpleExpr, // Only parse '(' expression ')' FoldExpr, // Also allow fold-expression <anything> CompoundStmt, // Also allow '(' compound-statement ')' CompoundLiteral, // Also allow '(' type-name ')' '{' ... '}' CastExpr // Also allow '(' type-name ')' <anything> }; ExprResult ParseParenExpression(ParenParseOption &ExprType, bool stopIfCastExpr, bool isTypeCast, ParsedType &CastTy, SourceLocation &RParenLoc); ExprResult ParseCXXAmbiguousParenExpression( ParenParseOption &ExprType, ParsedType &CastTy, BalancedDelimiterTracker &Tracker, ColonProtectionRAIIObject &ColonProt); ExprResult ParseCompoundLiteralExpression(ParsedType Ty, SourceLocation LParenLoc, SourceLocation RParenLoc); ExprResult ParseStringLiteralExpression(bool AllowUserDefinedLiteral = false); ExprResult ParseGenericSelectionExpression(); ExprResult ParseObjCBoolLiteral(); ExprResult ParseFoldExpression(ExprResult LHS, BalancedDelimiterTracker &T); //===--------------------------------------------------------------------===// // C++ Expressions ExprResult tryParseCXXIdExpression(CXXScopeSpec &SS, bool isAddressOfOperand, Token &Replacement); ExprResult ParseCXXIdExpression(bool isAddressOfOperand = false); bool areTokensAdjacent(const Token &A, const Token &B); void CheckForTemplateAndDigraph(Token &Next, ParsedType ObjectTypePtr, bool EnteringContext, IdentifierInfo &II, CXXScopeSpec &SS); bool ParseOptionalCXXScopeSpecifier(CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext, bool *MayBePseudoDestructor = nullptr, bool IsTypename = false, IdentifierInfo **LastII = nullptr, bool OnlyNamespace = false, bool InUsingDeclaration = false); //===--------------------------------------------------------------------===// // C++11 5.1.2: Lambda expressions /// Result of tentatively parsing a lambda-introducer. enum class LambdaIntroducerTentativeParse { /// This appears to be a lambda-introducer, which has been fully parsed. Success, /// This is a lambda-introducer, but has not been fully parsed, and this /// function needs to be called again to parse it. Incomplete, /// This is definitely an Objective-C message send expression, rather than /// a lambda-introducer, attribute-specifier, or array designator. MessageSend, /// This is not a lambda-introducer. Invalid, }; // [...] () -> type {...} ExprResult ParseLambdaExpression(); ExprResult TryParseLambdaExpression(); bool ParseLambdaIntroducer(LambdaIntroducer &Intro, LambdaIntroducerTentativeParse *Tentative = nullptr); ExprResult ParseLambdaExpressionAfterIntroducer(LambdaIntroducer &Intro); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Casts ExprResult ParseCXXCasts(); /// Parse a __builtin_bit_cast(T, E), used to implement C++2a std::bit_cast. ExprResult ParseBuiltinBitCast(); //===--------------------------------------------------------------------===// // C++ 5.2p1: C++ Type Identification ExprResult ParseCXXTypeid(); //===--------------------------------------------------------------------===// // C++ : Microsoft __uuidof Expression ExprResult ParseCXXUuidof(); //===--------------------------------------------------------------------===// // C++ 5.2.4: C++ Pseudo-Destructor Expressions ExprResult ParseCXXPseudoDestructor(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, ParsedType ObjectType); //===--------------------------------------------------------------------===// // C++ 9.3.2: C++ 'this' pointer ExprResult ParseCXXThis(); //===--------------------------------------------------------------------===// // C++ 15: C++ Throw Expression ExprResult ParseThrowExpression(); ExceptionSpecificationType tryParseExceptionSpecification( bool Delayed, SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &DynamicExceptions, SmallVectorImpl<SourceRange> &DynamicExceptionRanges, ExprResult &NoexceptExpr, CachedTokens *&ExceptionSpecTokens); // EndLoc is filled with the location of the last token of the specification. ExceptionSpecificationType ParseDynamicExceptionSpecification( SourceRange &SpecificationRange, SmallVectorImpl<ParsedType> &Exceptions, SmallVectorImpl<SourceRange> &Ranges); //===--------------------------------------------------------------------===// // C++0x 8: Function declaration trailing-return-type TypeResult ParseTrailingReturnType(SourceRange &Range, bool MayBeFollowedByDirectInit); //===--------------------------------------------------------------------===// // C++ 2.13.5: C++ Boolean Literals ExprResult ParseCXXBoolLiteral(); //===--------------------------------------------------------------------===// // C++ 5.2.3: Explicit type conversion (functional notation) ExprResult ParseCXXTypeConstructExpression(const DeclSpec &DS); /// ParseCXXSimpleTypeSpecifier - [C++ 7.1.5.2] Simple type specifiers. /// This should only be called when the current token is known to be part of /// simple-type-specifier. void ParseCXXSimpleTypeSpecifier(DeclSpec &DS); bool ParseCXXTypeSpecifierSeq(DeclSpec &DS); //===--------------------------------------------------------------------===// // C++ 5.3.4 and 5.3.5: C++ new and delete bool ParseExpressionListOrTypeId(SmallVectorImpl<Expr*> &Exprs, Declarator &D); void ParseDirectNewDeclarator(Declarator &D); ExprResult ParseCXXNewExpression(bool UseGlobal, SourceLocation Start); ExprResult ParseCXXDeleteExpression(bool UseGlobal, SourceLocation Start); //===--------------------------------------------------------------------===// // C++ if/switch/while/for condition expression. struct ForRangeInfo; Sema::ConditionResult ParseCXXCondition(StmtResult *InitStmt, SourceLocation Loc, Sema::ConditionKind CK, ForRangeInfo *FRI = nullptr); //===--------------------------------------------------------------------===// // C++ Coroutines ExprResult ParseCoyieldExpression(); //===--------------------------------------------------------------------===// // C99 6.7.8: Initialization. /// ParseInitializer /// initializer: [C99 6.7.8] /// assignment-expression /// '{' ... ExprResult ParseInitializer() { if (Tok.isNot(tok::l_brace)) return ParseAssignmentExpression(); return ParseBraceInitializer(); } bool MayBeDesignationStart(); ExprResult ParseBraceInitializer(); ExprResult ParseInitializerWithPotentialDesignator(); //===--------------------------------------------------------------------===// // clang Expressions ExprResult ParseBlockLiteralExpression(); // ^{...} //===--------------------------------------------------------------------===// // Objective-C Expressions ExprResult ParseObjCAtExpression(SourceLocation AtLocation); ExprResult ParseObjCStringLiteral(SourceLocation AtLoc); ExprResult ParseObjCCharacterLiteral(SourceLocation AtLoc); ExprResult ParseObjCNumericLiteral(SourceLocation AtLoc); ExprResult ParseObjCBooleanLiteral(SourceLocation AtLoc, bool ArgValue); ExprResult ParseObjCArrayLiteral(SourceLocation AtLoc); ExprResult ParseObjCDictionaryLiteral(SourceLocation AtLoc); ExprResult ParseObjCBoxedExpr(SourceLocation AtLoc); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc); ExprResult ParseObjCSelectorExpression(SourceLocation AtLoc); ExprResult ParseObjCProtocolExpression(SourceLocation AtLoc); bool isSimpleObjCMessageExpression(); ExprResult ParseObjCMessageExpression(); ExprResult ParseObjCMessageExpressionBody(SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); ExprResult ParseAssignmentExprWithObjCMessageExprStart( SourceLocation LBracloc, SourceLocation SuperLoc, ParsedType ReceiverType, Expr *ReceiverExpr); bool ParseObjCXXMessageReceiver(bool &IsExpr, void *&TypeOrExpr); //===--------------------------------------------------------------------===// // C99 6.8: Statements and Blocks. /// A SmallVector of statements, with stack size 32 (as that is the only one /// used.) typedef SmallVector<Stmt*, 32> StmtVector; /// A SmallVector of expressions, with stack size 12 (the maximum used.) typedef SmallVector<Expr*, 12> ExprVector; /// A SmallVector of types. typedef SmallVector<ParsedType, 12> TypeVector; StmtResult ParseStatement(SourceLocation *TrailingElseLoc = nullptr, ParsedStmtContext StmtCtx = ParsedStmtContext::SubStmt); StmtResult ParseStatementOrDeclaration( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc = nullptr); StmtResult ParseStatementOrDeclarationAfterAttributes( StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); StmtResult ParseExprStatement(ParsedStmtContext StmtCtx); StmtResult ParseLabeledStatement(ParsedAttributesWithRange &attrs, ParsedStmtContext StmtCtx); StmtResult ParseCaseStatement(ParsedStmtContext StmtCtx, bool MissingCase = false, ExprResult Expr = ExprResult()); StmtResult ParseDefaultStatement(ParsedStmtContext StmtCtx); StmtResult ParseCompoundStatement(bool isStmtExpr = false); StmtResult ParseCompoundStatement(bool isStmtExpr, unsigned ScopeFlags); void ParseCompoundStatementLeadingPragmas(); bool ConsumeNullStmt(StmtVector &Stmts); StmtResult ParseCompoundStatementBody(bool isStmtExpr = false); bool ParseParenExprOrCondition(StmtResult *InitStmt, Sema::ConditionResult &CondResult, SourceLocation Loc, Sema::ConditionKind CK); StmtResult ParseIfStatement(SourceLocation *TrailingElseLoc); StmtResult ParseSwitchStatement(SourceLocation *TrailingElseLoc); StmtResult ParseWhileStatement(SourceLocation *TrailingElseLoc); StmtResult ParseDoStatement(); StmtResult ParseForStatement(SourceLocation *TrailingElseLoc); StmtResult ParseGotoStatement(); StmtResult ParseContinueStatement(); StmtResult ParseBreakStatement(); StmtResult ParseReturnStatement(); StmtResult ParseAsmStatement(bool &msAsm); StmtResult ParseMicrosoftAsmStatement(SourceLocation AsmLoc); StmtResult ParsePragmaLoopHint(StmtVector &Stmts, ParsedStmtContext StmtCtx, SourceLocation *TrailingElseLoc, ParsedAttributesWithRange &Attrs); /// Describes the behavior that should be taken for an __if_exists /// block. enum IfExistsBehavior { /// Parse the block; this code is always used. IEB_Parse, /// Skip the block entirely; this code is never used. IEB_Skip, /// Parse the block as a dependent block, which may be used in /// some template instantiations but not others. IEB_Dependent }; /// Describes the condition of a Microsoft __if_exists or /// __if_not_exists block. struct IfExistsCondition { /// The location of the initial keyword. SourceLocation KeywordLoc; /// Whether this is an __if_exists block (rather than an /// __if_not_exists block). bool IsIfExists; /// Nested-name-specifier preceding the name. CXXScopeSpec SS; /// The name we're looking for. UnqualifiedId Name; /// The behavior of this __if_exists or __if_not_exists block /// should. IfExistsBehavior Behavior; }; bool ParseMicrosoftIfExistsCondition(IfExistsCondition& Result); void ParseMicrosoftIfExistsStatement(StmtVector &Stmts); void ParseMicrosoftIfExistsExternalDeclaration(); void ParseMicrosoftIfExistsClassDeclaration(DeclSpec::TST TagType, ParsedAttributes &AccessAttrs, AccessSpecifier &CurAS); bool ParseMicrosoftIfExistsBraceInitializer(ExprVector &InitExprs, bool &InitExprsOk); bool ParseAsmOperandsOpt(SmallVectorImpl<IdentifierInfo *> &Names, SmallVectorImpl<Expr *> &Constraints, SmallVectorImpl<Expr *> &Exprs); //===--------------------------------------------------------------------===// // C++ 6: Statements and Blocks StmtResult ParseCXXTryBlock(); StmtResult ParseCXXTryBlockCommon(SourceLocation TryLoc, bool FnTry = false); StmtResult ParseCXXCatchBlock(bool FnCatch = false); //===--------------------------------------------------------------------===// // MS: SEH Statements and Blocks StmtResult ParseSEHTryBlock(); StmtResult ParseSEHExceptBlock(SourceLocation Loc); StmtResult ParseSEHFinallyBlock(SourceLocation Loc); StmtResult ParseSEHLeaveStatement(); //===--------------------------------------------------------------------===// // Objective-C Statements StmtResult ParseObjCAtStatement(SourceLocation atLoc, ParsedStmtContext StmtCtx); StmtResult ParseObjCTryStmt(SourceLocation atLoc); StmtResult ParseObjCThrowStmt(SourceLocation atLoc); StmtResult ParseObjCSynchronizedStmt(SourceLocation atLoc); StmtResult ParseObjCAutoreleasePoolStmt(SourceLocation atLoc); //===--------------------------------------------------------------------===// // C99 6.7: Declarations. /// A context for parsing declaration specifiers. TODO: flesh this /// out, there are other significant restrictions on specifiers than /// would be best implemented in the parser. enum class DeclSpecContext { DSC_normal, // normal context DSC_class, // class context, enables 'friend' DSC_type_specifier, // C++ type-specifier-seq or C specifier-qualifier-list DSC_trailing, // C++11 trailing-type-specifier in a trailing return type DSC_alias_declaration, // C++11 type-specifier-seq in an alias-declaration DSC_top_level, // top-level/namespace declaration context DSC_template_param, // template parameter context DSC_template_type_arg, // template type argument context DSC_objc_method_result, // ObjC method result context, enables 'instancetype' DSC_condition // condition declaration context }; /// Is this a context in which we are parsing just a type-specifier (or /// trailing-type-specifier)? static bool isTypeSpecifier(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_condition: return false; case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_type_specifier: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return true; } llvm_unreachable("Missing DeclSpecContext case"); } /// Is this a context in which we can perform class template argument /// deduction? static bool isClassTemplateDeductionContext(DeclSpecContext DSC) { switch (DSC) { case DeclSpecContext::DSC_normal: case DeclSpecContext::DSC_template_param: case DeclSpecContext::DSC_class: case DeclSpecContext::DSC_top_level: case DeclSpecContext::DSC_condition: case DeclSpecContext::DSC_type_specifier: return true; case DeclSpecContext::DSC_objc_method_result: case DeclSpecContext::DSC_template_type_arg: case DeclSpecContext::DSC_trailing: case DeclSpecContext::DSC_alias_declaration: return false; } llvm_unreachable("Missing DeclSpecContext case"); } /// Information on a C++0x for-range-initializer found while parsing a /// declaration which turns out to be a for-range-declaration. struct ForRangeInit { SourceLocation ColonLoc; ExprResult RangeExpr; bool ParsedForRangeDecl() { return !ColonLoc.isInvalid(); } }; struct ForRangeInfo : ForRangeInit { StmtResult LoopVar; }; DeclGroupPtrTy ParseDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, SourceLocation *DeclSpecStart = nullptr); DeclGroupPtrTy ParseSimpleDeclaration(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs, bool RequireSemi, ForRangeInit *FRI = nullptr, SourceLocation *DeclSpecStart = nullptr); bool MightBeDeclarator(DeclaratorContext Context); DeclGroupPtrTy ParseDeclGroup(ParsingDeclSpec &DS, DeclaratorContext Context, SourceLocation *DeclEnd = nullptr, ForRangeInit *FRI = nullptr); Decl *ParseDeclarationAfterDeclarator(Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo()); bool ParseAsmAttributesAfterDeclarator(Declarator &D); Decl *ParseDeclarationAfterDeclaratorAndAttributes( Declarator &D, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ForRangeInit *FRI = nullptr); Decl *ParseFunctionStatementBody(Decl *Decl, ParseScope &BodyScope); Decl *ParseFunctionTryBlock(Decl *Decl, ParseScope &BodyScope); /// When in code-completion, skip parsing of the function/method body /// unless the body contains the code-completion point. /// /// \returns true if the function body was skipped. bool trySkippingFunctionBody(); bool ParseImplicitInt(DeclSpec &DS, CXXScopeSpec *SS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC, ParsedAttributesWithRange &Attrs); DeclSpecContext getDeclSpecContextFromDeclaratorContext(DeclaratorContext Context); void ParseDeclarationSpecifiers( DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal, LateParsedAttrList *LateAttrs = nullptr); bool DiagnoseMissingSemiAfterTagDefinition( DeclSpec &DS, AccessSpecifier AS, DeclSpecContext DSContext, LateParsedAttrList *LateAttrs = nullptr); void ParseSpecifierQualifierList( DeclSpec &DS, AccessSpecifier AS = AS_none, DeclSpecContext DSC = DeclSpecContext::DSC_normal); void ParseObjCTypeQualifierList(ObjCDeclSpec &DS, DeclaratorContext Context); void ParseEnumSpecifier(SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, DeclSpecContext DSC); void ParseEnumBody(SourceLocation StartLoc, Decl *TagDecl); void ParseStructUnionBody(SourceLocation StartLoc, DeclSpec::TST TagType, Decl *TagDecl); void ParseStructDeclaration( ParsingDeclSpec &DS, llvm::function_ref<void(ParsingFieldDeclarator &)> FieldsCallback); bool isDeclarationSpecifier(bool DisambiguatingWithExpression = false); bool isTypeSpecifierQualifier(); /// isKnownToBeTypeSpecifier - Return true if we know that the specified token /// is definitely a type-specifier. Return false if it isn't part of a type /// specifier or if we're not sure. bool isKnownToBeTypeSpecifier(const Token &Tok) const; /// Return true if we know that we are definitely looking at a /// decl-specifier, and isn't part of an expression such as a function-style /// cast. Return false if it's no a decl-specifier, or we're not sure. bool isKnownToBeDeclarationSpecifier() { if (getLangOpts().CPlusPlus) return isCXXDeclarationSpecifier() == TPResult::True; return isDeclarationSpecifier(true); } /// isDeclarationStatement - Disambiguates between a declaration or an /// expression statement, when parsing function bodies. /// Returns true for declaration, false for expression. bool isDeclarationStatement() { if (getLangOpts().CPlusPlus) return isCXXDeclarationStatement(); return isDeclarationSpecifier(true); } /// isForInitDeclaration - Disambiguates between a declaration or an /// expression in the context of the C 'clause-1' or the C++ // 'for-init-statement' part of a 'for' statement. /// Returns true for declaration, false for expression. bool isForInitDeclaration() { if (getLangOpts().OpenMP) Actions.startOpenMPLoop(); if (getLangOpts().CPlusPlus) return isCXXSimpleDeclaration(/*AllowForRangeDecl=*/true); return isDeclarationSpecifier(true); } /// Determine whether this is a C++1z for-range-identifier. bool isForRangeIdentifier(); /// Determine whether we are currently at the start of an Objective-C /// class message that appears to be missing the open bracket '['. bool isStartOfObjCClassMessageMissingOpenBracket(); /// Starting with a scope specifier, identifier, or /// template-id that refers to the current class, determine whether /// this is a constructor declarator. bool isConstructorDeclarator(bool Unqualified, bool DeductionGuide = false); /// Specifies the context in which type-id/expression /// disambiguation will occur. enum TentativeCXXTypeIdContext { TypeIdInParens, TypeIdUnambiguous, TypeIdAsTemplateArgument }; /// isTypeIdInParens - Assumes that a '(' was parsed and now we want to know /// whether the parens contain an expression or a type-id. /// Returns true for a type-id and false for an expression. bool isTypeIdInParens(bool &isAmbiguous) { if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdInParens, isAmbiguous); isAmbiguous = false; return isTypeSpecifierQualifier(); } bool isTypeIdInParens() { bool isAmbiguous; return isTypeIdInParens(isAmbiguous); } /// Checks if the current tokens form type-id or expression. /// It is similar to isTypeIdInParens but does not suppose that type-id /// is in parenthesis. bool isTypeIdUnambiguously() { bool IsAmbiguous; if (getLangOpts().CPlusPlus) return isCXXTypeId(TypeIdUnambiguous, IsAmbiguous); return isTypeSpecifierQualifier(); } /// isCXXDeclarationStatement - C++-specialized function that disambiguates /// between a declaration or an expression statement, when parsing function /// bodies. Returns true for declaration, false for expression. bool isCXXDeclarationStatement(); /// isCXXSimpleDeclaration - C++-specialized function that disambiguates /// between a simple-declaration or an expression-statement. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. /// Returns false if the statement is disambiguated as expression. bool isCXXSimpleDeclaration(bool AllowForRangeDecl); /// isCXXFunctionDeclarator - Disambiguates between a function declarator or /// a constructor-style initializer, when parsing declaration statements. /// Returns true for function declarator and false for constructor-style /// initializer. Sets 'IsAmbiguous' to true to indicate that this declaration /// might be a constructor-style initializer. /// If during the disambiguation process a parsing error is encountered, /// the function returns true to let the declaration parsing code handle it. bool isCXXFunctionDeclarator(bool *IsAmbiguous = nullptr); struct ConditionDeclarationOrInitStatementState; enum class ConditionOrInitStatement { Expression, ///< Disambiguated as an expression (either kind). ConditionDecl, ///< Disambiguated as the declaration form of condition. InitStmtDecl, ///< Disambiguated as a simple-declaration init-statement. ForRangeDecl, ///< Disambiguated as a for-range declaration. Error ///< Can't be any of the above! }; /// Disambiguates between the different kinds of things that can happen /// after 'if (' or 'switch ('. This could be one of two different kinds of /// declaration (depending on whether there is a ';' later) or an expression. ConditionOrInitStatement isCXXConditionDeclarationOrInitStatement(bool CanBeInitStmt, bool CanBeForRangeDecl); bool isCXXTypeId(TentativeCXXTypeIdContext Context, bool &isAmbiguous); bool isCXXTypeId(TentativeCXXTypeIdContext Context) { bool isAmbiguous; return isCXXTypeId(Context, isAmbiguous); } /// TPResult - Used as the result value for functions whose purpose is to /// disambiguate C++ constructs by "tentatively parsing" them. enum class TPResult { True, False, Ambiguous, Error }; /// Based only on the given token kind, determine whether we know that /// we're at the start of an expression or a type-specifier-seq (which may /// be an expression, in C++). /// /// This routine does not attempt to resolve any of the trick cases, e.g., /// those involving lookup of identifiers. /// /// \returns \c TPR_true if this token starts an expression, \c TPR_false if /// this token starts a type-specifier-seq, or \c TPR_ambiguous if it cannot /// tell. TPResult isExpressionOrTypeSpecifierSimple(tok::TokenKind Kind); /// isCXXDeclarationSpecifier - Returns TPResult::True if it is a /// declaration specifier, TPResult::False if it is not, /// TPResult::Ambiguous if it could be either a decl-specifier or a /// function-style cast, and TPResult::Error if a parsing error was /// encountered. If it could be a braced C++11 function-style cast, returns /// BracedCastResult. /// Doesn't consume tokens. TPResult isCXXDeclarationSpecifier(TPResult BracedCastResult = TPResult::False, bool *InvalidAsDeclSpec = nullptr); /// Given that isCXXDeclarationSpecifier returns \c TPResult::True or /// \c TPResult::Ambiguous, determine whether the decl-specifier would be /// a type-specifier other than a cv-qualifier. bool isCXXDeclarationSpecifierAType(); /// Determine whether the current token sequence might be /// '<' template-argument-list '>' /// rather than a less-than expression. TPResult isTemplateArgumentList(unsigned TokensToSkip); /// Determine whether an identifier has been tentatively declared as a /// non-type. Such tentative declarations should not be found to name a type /// during a tentative parse, but also should not be annotated as a non-type. bool isTentativelyDeclared(IdentifierInfo *II); // "Tentative parsing" functions, used for disambiguation. If a parsing error // is encountered they will return TPResult::Error. // Returning TPResult::True/False indicates that the ambiguity was // resolved and tentative parsing may stop. TPResult::Ambiguous indicates // that more tentative parsing is necessary for disambiguation. // They all consume tokens, so backtracking should be used after calling them. TPResult TryParseSimpleDeclaration(bool AllowForRangeDecl); TPResult TryParseTypeofSpecifier(); TPResult TryParseProtocolQualifiers(); TPResult TryParsePtrOperatorSeq(); TPResult TryParseOperatorId(); TPResult TryParseInitDeclaratorList(); TPResult TryParseDeclarator(bool mayBeAbstract, bool mayHaveIdentifier = true, bool mayHaveDirectInit = false); TPResult TryParseParameterDeclarationClause(bool *InvalidAsDeclaration = nullptr, bool VersusTemplateArg = false); TPResult TryParseFunctionDeclarator(); TPResult TryParseBracketDeclarator(); TPResult TryConsumeDeclarationSpecifier(); public: TypeResult ParseTypeName(SourceRange *Range = nullptr, DeclaratorContext Context = DeclaratorContext::TypeNameContext, AccessSpecifier AS = AS_none, Decl **OwnedType = nullptr, ParsedAttributes *Attrs = nullptr); private: void ParseBlockId(SourceLocation CaretLoc); /// Are [[]] attributes enabled? bool standardAttributesAllowed() const { const LangOptions &LO = getLangOpts(); return LO.DoubleSquareBracketAttributes; } // Check for the start of an attribute-specifier-seq in a context where an // attribute is not allowed. bool CheckProhibitedCXX11Attribute() { assert(Tok.is(tok::l_square)); if (!standardAttributesAllowed() || NextToken().isNot(tok::l_square)) return false; return DiagnoseProhibitedCXX11Attribute(); } bool DiagnoseProhibitedCXX11Attribute(); void CheckMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation) { if (!standardAttributesAllowed()) return; if ((Tok.isNot(tok::l_square) || NextToken().isNot(tok::l_square)) && Tok.isNot(tok::kw_alignas)) return; DiagnoseMisplacedCXX11Attribute(Attrs, CorrectLocation); } void DiagnoseMisplacedCXX11Attribute(ParsedAttributesWithRange &Attrs, SourceLocation CorrectLocation); void stripTypeAttributesOffDeclSpec(ParsedAttributesWithRange &Attrs, DeclSpec &DS, Sema::TagUseKind TUK); // FixItLoc = possible correct location for the attributes void ProhibitAttributes(ParsedAttributesWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clear(); } void ProhibitAttributes(ParsedAttributesViewWithRange &Attrs, SourceLocation FixItLoc = SourceLocation()) { if (Attrs.Range.isInvalid()) return; DiagnoseProhibitedAttributes(Attrs.Range, FixItLoc); Attrs.clearListOnly(); } void DiagnoseProhibitedAttributes(const SourceRange &Range, SourceLocation FixItLoc); // Forbid C++11 and C2x attributes that appear on certain syntactic locations // which standard permits but we don't supported yet, for example, attributes // appertain to decl specifiers. void ProhibitCXX11Attributes(ParsedAttributesWithRange &Attrs, unsigned DiagID); /// Skip C++11 and C2x attributes and return the end location of the /// last one. /// \returns SourceLocation() if there are no attributes. SourceLocation SkipCXX11Attributes(); /// Diagnose and skip C++11 and C2x attributes that appear in syntactic /// locations where attributes are not allowed. void DiagnoseAndSkipCXX11Attributes(); /// Parses syntax-generic attribute arguments for attributes which are /// known to the implementation, and adds them to the given ParsedAttributes /// list with the given attribute syntax. Returns the number of arguments /// parsed for the attribute. unsigned ParseAttributeArgsCommon(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseGNUAttributes(Declarator &D, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) { ParsedAttributes attrs(AttrFactory); SourceLocation endLoc; ParseGNUAttributes(attrs, &endLoc, LateAttrs, &D); D.takeAttributes(attrs, endLoc); } } void MaybeParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr) { if (Tok.is(tok::kw___attribute)) ParseGNUAttributes(attrs, endLoc, LateAttrs); } void ParseGNUAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr, LateParsedAttrList *LateAttrs = nullptr, Declarator *D = nullptr); void ParseGNUAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax, Declarator *D); IdentifierLoc *ParseIdentifierLoc(); unsigned ParseClangAttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void MaybeParseCXX11Attributes(Declarator &D) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrs(AttrFactory); SourceLocation endLoc; ParseCXX11Attributes(attrs, &endLoc); D.takeAttributes(attrs, endLoc); } } void MaybeParseCXX11Attributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier()) { ParsedAttributesWithRange attrsWithRange(AttrFactory); ParseCXX11Attributes(attrsWithRange, endLoc); attrs.takeAllFrom(attrsWithRange); } } void MaybeParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *endLoc = nullptr, bool OuterMightBeMessageSend = false) { if (standardAttributesAllowed() && isCXX11AttributeSpecifier(false, OuterMightBeMessageSend)) ParseCXX11Attributes(attrs, endLoc); } void ParseCXX11AttributeSpecifier(ParsedAttributes &attrs, SourceLocation *EndLoc = nullptr); void ParseCXX11Attributes(ParsedAttributesWithRange &attrs, SourceLocation *EndLoc = nullptr); /// Parses a C++11 (or C2x)-style attribute argument list. Returns true /// if this results in adding an attribute to the ParsedAttributes list. bool ParseCXX11AttributeArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc); IdentifierInfo *TryParseCXX11AttributeIdentifier(SourceLocation &Loc); void MaybeParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr) { if (getLangOpts().MicrosoftExt && Tok.is(tok::l_square)) ParseMicrosoftAttributes(attrs, endLoc); } void ParseMicrosoftUuidAttributeArgs(ParsedAttributes &Attrs); void ParseMicrosoftAttributes(ParsedAttributes &attrs, SourceLocation *endLoc = nullptr); void MaybeParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr) { const auto &LO = getLangOpts(); if (LO.DeclSpecKeyword && Tok.is(tok::kw___declspec)) ParseMicrosoftDeclSpecs(Attrs, End); } void ParseMicrosoftDeclSpecs(ParsedAttributes &Attrs, SourceLocation *End = nullptr); bool ParseMicrosoftDeclSpecArgs(IdentifierInfo *AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs); void ParseMicrosoftTypeAttributes(ParsedAttributes &attrs); void DiagnoseAndSkipExtendedMicrosoftTypeAttributes(); SourceLocation SkipExtendedMicrosoftTypeAttributes(); void ParseMicrosoftInheritanceClassAttributes(ParsedAttributes &attrs); void ParseBorlandTypeAttributes(ParsedAttributes &attrs); void ParseOpenCLKernelAttributes(ParsedAttributes &attrs); void ParseOpenCLQualifiers(ParsedAttributes &Attrs); /// Parses opencl_unroll_hint attribute if language is OpenCL v2.0 /// or higher. /// \return false if error happens. bool MaybeParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs) { if (getLangOpts().OpenCL) return ParseOpenCLUnrollHintAttribute(Attrs); return true; } /// Parses opencl_unroll_hint attribute. /// \return false if error happens. bool ParseOpenCLUnrollHintAttribute(ParsedAttributes &Attrs); void ParseNullabilityTypeSpecifiers(ParsedAttributes &attrs); VersionTuple ParseVersionTuple(SourceRange &Range); void ParseAvailabilityAttribute(IdentifierInfo &Availability, SourceLocation AvailabilityLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); Optional<AvailabilitySpec> ParseAvailabilitySpec(); ExprResult ParseAvailabilityCheckExpr(SourceLocation StartLoc); void ParseExternalSourceSymbolAttribute(IdentifierInfo &ExternalSourceSymbol, SourceLocation Loc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseObjCBridgeRelatedAttribute(IdentifierInfo &ObjCBridgeRelated, SourceLocation ObjCBridgeRelatedLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeTagForDatatypeAttribute(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseSwiftNewtypeAttribute(IdentifierInfo &SwiftNewtype, SourceLocation SwiftNewtypeLoc, ParsedAttributes &attrs, SourceLocation *endLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseAttributeWithTypeArg(IdentifierInfo &AttrName, SourceLocation AttrNameLoc, ParsedAttributes &Attrs, SourceLocation *EndLoc, IdentifierInfo *ScopeName, SourceLocation ScopeLoc, ParsedAttr::Syntax Syntax); void ParseTypeofSpecifier(DeclSpec &DS); SourceLocation ParseDecltypeSpecifier(DeclSpec &DS); void AnnotateExistingDecltypeSpecifier(const DeclSpec &DS, SourceLocation StartLoc, SourceLocation EndLoc); void ParseUnderlyingTypeSpecifier(DeclSpec &DS); void ParseAtomicSpecifier(DeclSpec &DS); ExprResult ParseAlignArgument(SourceLocation Start, SourceLocation &EllipsisLoc); void ParseAlignmentSpecifier(ParsedAttributes &Attrs, SourceLocation *endLoc = nullptr); void ParsePtrauthQualifier(ParsedAttributes &Attrs); VirtSpecifiers::Specifier isCXX11VirtSpecifier(const Token &Tok) const; VirtSpecifiers::Specifier isCXX11VirtSpecifier() const { return isCXX11VirtSpecifier(Tok); } void ParseOptionalCXX11VirtSpecifierSeq(VirtSpecifiers &VS, bool IsInterface, SourceLocation FriendLoc); bool isCXX11FinalKeyword() const; /// DeclaratorScopeObj - RAII object used in Parser::ParseDirectDeclarator to /// enter a new C++ declarator scope and exit it when the function is /// finished. class DeclaratorScopeObj { Parser &P; CXXScopeSpec &SS; bool EnteredScope; bool CreatedScope; public: DeclaratorScopeObj(Parser &p, CXXScopeSpec &ss) : P(p), SS(ss), EnteredScope(false), CreatedScope(false) {} void EnterDeclaratorScope() { assert(!EnteredScope && "Already entered the scope!"); assert(SS.isSet() && "C++ scope was not set!"); CreatedScope = true; P.EnterScope(0); // Not a decl scope. if (!P.Actions.ActOnCXXEnterDeclaratorScope(P.getCurScope(), SS)) EnteredScope = true; } ~DeclaratorScopeObj() { if (EnteredScope) { assert(SS.isSet() && "C++ scope was cleared ?"); P.Actions.ActOnCXXExitDeclaratorScope(P.getCurScope(), SS); } if (CreatedScope) P.ExitScope(); } }; /// ParseDeclarator - Parse and verify a newly-initialized declarator. void ParseDeclarator(Declarator &D); /// A function that parses a variant of direct-declarator. typedef void (Parser::*DirectDeclParseFunction)(Declarator&); void ParseDeclaratorInternal(Declarator &D, DirectDeclParseFunction DirectDeclParser); enum AttrRequirements { AR_NoAttributesParsed = 0, ///< No attributes are diagnosed. AR_GNUAttributesParsedAndRejected = 1 << 0, ///< Diagnose GNU attributes. AR_GNUAttributesParsed = 1 << 1, AR_CXX11AttributesParsed = 1 << 2, AR_DeclspecAttributesParsed = 1 << 3, AR_AllAttributesParsed = AR_GNUAttributesParsed | AR_CXX11AttributesParsed | AR_DeclspecAttributesParsed, AR_VendorAttributesParsed = AR_GNUAttributesParsed | AR_DeclspecAttributesParsed }; void ParseTypeQualifierListOpt( DeclSpec &DS, unsigned AttrReqs = AR_AllAttributesParsed, bool AtomicAllowed = true, bool IdentifierRequired = false, Optional<llvm::function_ref<void()>> CodeCompletionHandler = None); void ParseDirectDeclarator(Declarator &D); void ParseDecompositionDeclarator(Declarator &D); void ParseParenDeclarator(Declarator &D); void ParseFunctionDeclarator(Declarator &D, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker, bool IsAmbiguous, bool RequiresArg = false); bool ParseRefQualifier(bool &RefQualifierIsLValueRef, SourceLocation &RefQualifierLoc); bool isFunctionDeclaratorIdentifierList(); void ParseFunctionDeclaratorIdentifierList( Declarator &D, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo); void ParseParameterDeclarationClause( Declarator &D, ParsedAttributes &attrs, SmallVectorImpl<DeclaratorChunk::ParamInfo> &ParamInfo, SourceLocation &EllipsisLoc); void ParseBracketDeclarator(Declarator &D); void ParseMisplacedBracketDeclarator(Declarator &D); //===--------------------------------------------------------------------===// // C++ 7: Declarations [dcl.dcl] /// The kind of attribute specifier we have found. enum CXX11AttributeKind { /// This is not an attribute specifier. CAK_NotAttributeSpecifier, /// This should be treated as an attribute-specifier. CAK_AttributeSpecifier, /// The next tokens are '[[', but this is not an attribute-specifier. This /// is ill-formed by C++11 [dcl.attr.grammar]p6. CAK_InvalidAttributeSpecifier }; CXX11AttributeKind isCXX11AttributeSpecifier(bool Disambiguate = false, bool OuterMightBeMessageSend = false); void DiagnoseUnexpectedNamespace(NamedDecl *Context); DeclGroupPtrTy ParseNamespace(DeclaratorContext Context, SourceLocation &DeclEnd, SourceLocation InlineLoc = SourceLocation()); struct InnerNamespaceInfo { SourceLocation NamespaceLoc; SourceLocation InlineLoc; SourceLocation IdentLoc; IdentifierInfo *Ident; }; using InnerNamespaceInfoList = llvm::SmallVector<InnerNamespaceInfo, 4>; void ParseInnerNamespace(const InnerNamespaceInfoList &InnerNSs, unsigned int index, SourceLocation &InlineLoc, ParsedAttributes &attrs, BalancedDelimiterTracker &Tracker); Decl *ParseLinkage(ParsingDeclSpec &DS, DeclaratorContext Context); Decl *ParseExportDeclaration(); DeclGroupPtrTy ParseUsingDirectiveOrDeclaration( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd, ParsedAttributesWithRange &attrs); Decl *ParseUsingDirective(DeclaratorContext Context, SourceLocation UsingLoc, SourceLocation &DeclEnd, ParsedAttributes &attrs); struct UsingDeclarator { SourceLocation TypenameLoc; CXXScopeSpec SS; UnqualifiedId Name; SourceLocation EllipsisLoc; void clear() { TypenameLoc = EllipsisLoc = SourceLocation(); SS.clear(); Name.clear(); } }; bool ParseUsingDeclarator(DeclaratorContext Context, UsingDeclarator &D); DeclGroupPtrTy ParseUsingDeclaration(DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, SourceLocation &DeclEnd, AccessSpecifier AS = AS_none); Decl *ParseAliasDeclarationAfterDeclarator( const ParsedTemplateInfo &TemplateInfo, SourceLocation UsingLoc, UsingDeclarator &D, SourceLocation &DeclEnd, AccessSpecifier AS, ParsedAttributes &Attrs, Decl **OwnedType = nullptr); Decl *ParseStaticAssertDeclaration(SourceLocation &DeclEnd); Decl *ParseNamespaceAlias(SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // C++ 9: classes [class] and C structs/unions. bool isValidAfterTypeSpecifier(bool CouldBeBitfield); void ParseClassSpecifier(tok::TokenKind TagTokKind, SourceLocation TagLoc, DeclSpec &DS, const ParsedTemplateInfo &TemplateInfo, AccessSpecifier AS, bool EnteringContext, DeclSpecContext DSC, ParsedAttributesWithRange &Attributes); void SkipCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, unsigned TagType, Decl *TagDecl); void ParseCXXMemberSpecification(SourceLocation StartLoc, SourceLocation AttrFixitLoc, ParsedAttributesWithRange &Attrs, unsigned TagType, Decl *TagDecl); ExprResult ParseCXXMemberInitializer(Decl *D, bool IsFunction, SourceLocation &EqualLoc); bool ParseCXXMemberDeclaratorBeforeInitializer(Declarator &DeclaratorInfo, VirtSpecifiers &VS, ExprResult &BitfieldSize, LateParsedAttrList &LateAttrs); void MaybeParseAndDiagnoseDeclSpecAfterCXX11VirtSpecifierSeq(Declarator &D, VirtSpecifiers &VS); DeclGroupPtrTy ParseCXXClassMemberDeclaration( AccessSpecifier AS, ParsedAttributes &Attr, const ParsedTemplateInfo &TemplateInfo = ParsedTemplateInfo(), ParsingDeclRAIIObject *DiagsFromTParams = nullptr); DeclGroupPtrTy ParseCXXClassMemberDeclarationWithPragmas( AccessSpecifier &AS, ParsedAttributesWithRange &AccessAttrs, DeclSpec::TST TagType, Decl *Tag); void ParseConstructorInitializer(Decl *ConstructorDecl); MemInitResult ParseMemInitializer(Decl *ConstructorDecl); void HandleMemberFunctionDeclDelays(Declarator& DeclaratorInfo, Decl *ThisDecl); //===--------------------------------------------------------------------===// // C++ 10: Derived classes [class.derived] TypeResult ParseBaseTypeSpecifier(SourceLocation &BaseLoc, SourceLocation &EndLocation); void ParseBaseClause(Decl *ClassDecl); BaseResult ParseBaseSpecifier(Decl *ClassDecl); AccessSpecifier getAccessSpecifierIfPresent() const; bool ParseUnqualifiedIdTemplateId(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, IdentifierInfo *Name, SourceLocation NameLoc, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Id, bool AssumeTemplateId); bool ParseUnqualifiedIdOperator(CXXScopeSpec &SS, bool EnteringContext, ParsedType ObjectType, UnqualifiedId &Result); //===--------------------------------------------------------------------===// // OpenMP: Directives and clauses. /// Parse clauses for '#pragma omp declare simd'. DeclGroupPtrTy ParseOMPDeclareSimdClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parses OpenMP context selectors and calls \p Callback for each /// successfully parsed context selector. bool parseOpenMPContextSelectors(SourceLocation Loc, SmallVectorImpl<Sema::OMPCtxSelectorData> &Data); /// Parse clauses for '#pragma omp declare variant'. void ParseOMPDeclareVariantClauses(DeclGroupPtrTy Ptr, CachedTokens &Toks, SourceLocation Loc); /// Parse clauses for '#pragma omp declare target'. DeclGroupPtrTy ParseOMPDeclareTargetClauses(); /// Parse '#pragma omp end declare target'. void ParseOMPEndDeclareTargetDirective(OpenMPDirectiveKind DKind, SourceLocation Loc); /// Parses declarative OpenMP directives. DeclGroupPtrTy ParseOpenMPDeclarativeDirectiveWithExtDecl( AccessSpecifier &AS, ParsedAttributesWithRange &Attrs, DeclSpec::TST TagType = DeclSpec::TST_unspecified, Decl *TagDecl = nullptr); /// Parse 'omp declare reduction' construct. DeclGroupPtrTy ParseOpenMPDeclareReductionDirective(AccessSpecifier AS); /// Parses initializer for provided omp_priv declaration inside the reduction /// initializer. void ParseOpenMPReductionInitializerForDecl(VarDecl *OmpPrivParm); /// Parses 'omp declare mapper' directive. DeclGroupPtrTy ParseOpenMPDeclareMapperDirective(AccessSpecifier AS); /// Parses variable declaration in 'omp declare mapper' directive. TypeResult parseOpenMPDeclareMapperVarDecl(SourceRange &Range, DeclarationName &Name, AccessSpecifier AS = AS_none); /// Parses simple list of variables. /// /// \param Kind Kind of the directive. /// \param Callback Callback function to be called for the list elements. /// \param AllowScopeSpecifier true, if the variables can have fully /// qualified names. /// bool ParseOpenMPSimpleVarList( OpenMPDirectiveKind Kind, const llvm::function_ref<void(CXXScopeSpec &, DeclarationNameInfo)> & Callback, bool AllowScopeSpecifier); /// Parses declarative or executable directive. /// /// \param StmtCtx The context in which we're parsing the directive. StmtResult ParseOpenMPDeclarativeOrExecutableDirective(ParsedStmtContext StmtCtx); /// Parses clause of kind \a CKind for directive of a kind \a Kind. /// /// \param DKind Kind of current directive. /// \param CKind Kind of current clause. /// \param FirstClause true, if this is the first clause of a kind \a CKind /// in current directive. /// OMPClause *ParseOpenMPClause(OpenMPDirectiveKind DKind, OpenMPClauseKind CKind, bool FirstClause); /// Parses clause with a single expression of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses simple clause of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSimpleClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause with a single expression and an additional argument /// of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, bool ParseOnly); /// Parses clause without any additional arguments. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPClause(OpenMPClauseKind Kind, bool ParseOnly = false); /// Parses clause with the list of variables of a kind \a Kind. /// /// \param Kind Kind of current clause. /// \param ParseOnly true to skip the clause's semantic actions and return /// nullptr. /// OMPClause *ParseOpenMPVarListClause(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, bool ParseOnly); public: /// Parses simple expression in parens for single-expression clauses of OpenMP /// constructs. /// \param RLoc Returned location of right paren. ExprResult ParseOpenMPParensExpr(StringRef ClauseName, SourceLocation &RLoc, bool IsAddressOfOperand = false); /// Data used for parsing list of variables in OpenMP clauses. struct OpenMPVarListDataTy { Expr *TailExpr = nullptr; SourceLocation ColonLoc; SourceLocation RLoc; CXXScopeSpec ReductionOrMapperIdScopeSpec; DeclarationNameInfo ReductionOrMapperId; int ExtraModifier = -1; ///< Additional modifier for linear, map, depend or ///< lastprivate clause. SmallVector<OpenMPMapModifierKind, OMPMapClause::NumberOfModifiers> MapTypeModifiers; SmallVector<SourceLocation, OMPMapClause::NumberOfModifiers> MapTypeModifiersLoc; bool IsMapTypeImplicit = false; SourceLocation DepLinMapLastLoc; }; /// Parses clauses with list. bool ParseOpenMPVarList(OpenMPDirectiveKind DKind, OpenMPClauseKind Kind, SmallVectorImpl<Expr *> &Vars, OpenMPVarListDataTy &Data); bool ParseUnqualifiedId(CXXScopeSpec &SS, bool EnteringContext, bool AllowDestructorName, bool AllowConstructorName, bool AllowDeductionGuide, ParsedType ObjectType, SourceLocation *TemplateKWLoc, UnqualifiedId &Result); /// Parses the mapper modifier in map, to, and from clauses. bool parseMapperModifier(OpenMPVarListDataTy &Data); /// Parses map-type-modifiers in map clause. /// map([ [map-type-modifier[,] [map-type-modifier[,] ...] map-type : ] list) /// where, map-type-modifier ::= always | close | mapper(mapper-identifier) bool parseMapTypeModifiers(OpenMPVarListDataTy &Data); private: //===--------------------------------------------------------------------===// // C++ 14: Templates [temp] // C++ 14.1: Template Parameters [temp.param] Decl *ParseDeclarationStartingWithTemplate(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); Decl *ParseTemplateDeclarationOrSpecialization(DeclaratorContext Context, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS); Decl *ParseSingleDeclarationAfterTemplate( DeclaratorContext Context, const ParsedTemplateInfo &TemplateInfo, ParsingDeclRAIIObject &DiagsFromParams, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); bool ParseTemplateParameters(unsigned Depth, SmallVectorImpl<NamedDecl *> &TemplateParams, SourceLocation &LAngleLoc, SourceLocation &RAngleLoc); bool ParseTemplateParameterList(unsigned Depth, SmallVectorImpl<NamedDecl*> &TemplateParams); bool isStartOfTemplateTypeParameter(); NamedDecl *ParseTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTypeParameter(unsigned Depth, unsigned Position); NamedDecl *ParseTemplateTemplateParameter(unsigned Depth, unsigned Position); NamedDecl *ParseNonTypeTemplateParameter(unsigned Depth, unsigned Position); void DiagnoseMisplacedEllipsis(SourceLocation EllipsisLoc, SourceLocation CorrectLoc, bool AlreadyHasEllipsis, bool IdentifierHasName); void DiagnoseMisplacedEllipsisInDeclarator(SourceLocation EllipsisLoc, Declarator &D); // C++ 14.3: Template arguments [temp.arg] typedef SmallVector<ParsedTemplateArgument, 16> TemplateArgList; bool ParseGreaterThanInTemplateList(SourceLocation &RAngleLoc, bool ConsumeLastToken, bool ObjCGenericList); bool ParseTemplateIdAfterTemplateName(bool ConsumeLastToken, SourceLocation &LAngleLoc, TemplateArgList &TemplateArgs, SourceLocation &RAngleLoc); bool AnnotateTemplateIdToken(TemplateTy Template, TemplateNameKind TNK, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &TemplateName, bool AllowTypeAnnotation = true); void AnnotateTemplateIdTokenAsType(bool IsClassName = false); bool ParseTemplateArgumentList(TemplateArgList &TemplateArgs); ParsedTemplateArgument ParseTemplateTemplateArgument(); ParsedTemplateArgument ParseTemplateArgument(); Decl *ParseExplicitInstantiation(DeclaratorContext Context, SourceLocation ExternLoc, SourceLocation TemplateLoc, SourceLocation &DeclEnd, ParsedAttributes &AccessAttrs, AccessSpecifier AS = AS_none); // C++2a: Template, concept definition [temp] Decl * ParseConceptDefinition(const ParsedTemplateInfo &TemplateInfo, SourceLocation &DeclEnd); //===--------------------------------------------------------------------===// // Modules DeclGroupPtrTy ParseModuleDecl(bool IsFirstDecl); Decl *ParseModuleImport(SourceLocation AtLoc); bool parseMisplacedModuleImport(); bool tryParseMisplacedModuleImport() { tok::TokenKind Kind = Tok.getKind(); if (Kind == tok::annot_module_begin || Kind == tok::annot_module_end || Kind == tok::annot_module_include) return parseMisplacedModuleImport(); return false; } bool ParseModuleName( SourceLocation UseLoc, SmallVectorImpl<std::pair<IdentifierInfo *, SourceLocation>> &Path, bool IsImport); //===--------------------------------------------------------------------===// // C++11/G++: Type Traits [Type-Traits.html in the GCC manual] ExprResult ParseTypeTrait(); /// Parse the given string as a type. /// /// This is a dangerous utility function currently employed only by API notes. /// It is not a general entry-point for safely parsing types from strings. /// /// \param typeStr The string to be parsed as a type. /// \param context The name of the context in which this string is being /// parsed, which will be used in diagnostics. /// \param includeLoc The location at which this parse was triggered. TypeResult parseTypeFromString(StringRef typeStr, StringRef context, SourceLocation includeLoc); //===--------------------------------------------------------------------===// // Embarcadero: Arary and Expression Traits ExprResult ParseArrayTypeTrait(); ExprResult ParseExpressionTrait(); ExprResult ParseBuiltinPtrauthTypeDiscriminator(); //===--------------------------------------------------------------------===// // Preprocessor code-completion pass-through void CodeCompleteDirective(bool InConditional) override; void CodeCompleteInConditionalExclusion() override; void CodeCompleteMacroName(bool IsDefinition) override; void CodeCompletePreprocessorExpression() override; void CodeCompleteMacroArgument(IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned ArgumentIndex) override; void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled) override; void CodeCompleteNaturalLanguage() override; }; } // end namespace clang #endif
sample_for_critical.c
/* Andre Augusto Giannotti Scota (https://sites.google.com/view/a2gs/) */ #include <stdio.h> #include <stdlib.h> #include <omp.h> #include "openmp_util.h" int f(int a, int b, int c) { return(a + b + c); } int main(int argc, char *argv[]) { int x[] = {1 , 2 , 3 , 4 , 5 , 6 , 7 , 8 , 9 , 10}; int y[] = {10, 20, 30, 40, 50, 60, 70, 80, 90, 100}; int z[] = {11, 22, 33, 44, 55, 66, 77, 88, 99, 111}; int sum = 0; /* #pragma omp parallel num_threads(3) */ /* omp_set_num_threads(2); */ #pragma omp parallel for for(int i = 0; i < 10; ++i){ int ctx = f(x[i], y[i], z[i]); /* Critical region. One thread per time */ /* #pragma omp atomic */ #pragma omp critical { printf("Thread Id:[%d] - i: [%d] - Ctx: [%d]\n", omp_get_thread_num(), i, ctx); sum += ctx; } } printf("Sum: [%d]\n", sum); return(0); }
Grid.h
#ifndef GRID_H_ #define GRID_H_ #include <cstring> #include <cstdlib> #include <iostream> template<typename T> class Grid { public: Grid(int X, int Y); ~Grid(); /// Implements periodic boundary conditions inline int periodicIndex(int i, int N) { int pi; if (i >= 0) { pi = i % N; } else { pi = N-1 - (-i-1)%N; } return pi; } inline T& get(int x, int y) { return m_data[periodicIndex(y, m_Y) * m_X + periodicIndex(x, m_X)]; } inline int X() const { return m_X; } inline int Y() const { return m_Y; } private: int m_X; int m_Y; T* m_data; }; template<typename T> Grid<T>::Grid(int X, int Y) : m_X(X), m_Y(Y) { int err = posix_memalign(reinterpret_cast<void**>(&m_data), ALIGNMENT, X*Y*sizeof(T)); if (err) { std::cerr << "Failed to allocate " << X*Y*sizeof(T) << " bytes in " << __FILE__ << std::endl; exit(EXIT_FAILURE); } #pragma omp parallel for collapse(2) for (int y = 0; y < m_Y; ++y) { for (int x = 0; x < m_X; ++x) { T& data = get(x, y); memset(&data, 0, sizeof(T)); } } } template<typename T> Grid<T>::~Grid() { free(m_data); } #endif // GRID_H_
ocp_nlp_sqp.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, * Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, * Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, * Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE.; */ #include "acados/ocp_nlp/ocp_nlp_sqp.h" // external #include <assert.h> #include <math.h> #include <stdio.h> #include <string.h> #include <stdlib.h> #if defined(ACADOS_WITH_OPENMP) #include <omp.h> #endif // blasfeo #include "blasfeo/include/blasfeo_d_aux.h" #include "blasfeo/include/blasfeo_d_aux_ext_dep.h" #include "blasfeo/include/blasfeo_d_blas.h" // acados #include "acados/ocp_nlp/ocp_nlp_common.h" #include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h" #include "acados/ocp_nlp/ocp_nlp_reg_common.h" #include "acados/ocp_qp/ocp_qp_common.h" #include "acados/utils/mem.h" #include "acados/utils/print.h" #include "acados/utils/timing.h" #include "acados/utils/types.h" #include "acados_c/ocp_qp_interface.h" /************************************************ * options ************************************************/ acados_size_t ocp_nlp_sqp_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; acados_size_t size = 0; size += sizeof(ocp_nlp_sqp_opts); size += ocp_nlp_opts_calculate_size(config, dims); return size; } void *ocp_nlp_sqp_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; char *c_ptr = (char *) raw_memory; ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_opts); opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr); c_ptr += ocp_nlp_opts_calculate_size(config, dims); assert((char *) raw_memory + ocp_nlp_sqp_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_sqp_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // int ii; // this first !!! ocp_nlp_opts_initialize_default(config, dims, nlp_opts); // SQP opts opts->max_iter = 20; opts->tol_stat = 1e-8; opts->tol_eq = 1e-8; opts->tol_ineq = 1e-8; opts->tol_comp = 1e-8; opts->ext_qp_res = 0; opts->qp_warm_start = 0; opts->warm_start_first_qp = false; opts->rti_phase = 0; opts->initialize_t_slacks = 0; // overwrite default submodules opts // qp tolerance qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", &opts->tol_stat); qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", &opts->tol_eq); qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", &opts->tol_ineq); qp_solver->opts_set(qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", &opts->tol_comp); return; } void ocp_nlp_sqp_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_update(config, dims, nlp_opts); return; } void ocp_nlp_sqp_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) ) { ocp_nlp_opts_set(config, nlp_opts, field, value); if (!strcmp(field, "qp_warm_start")) { int* i_ptr = (int *) value; opts->qp_warm_start = *i_ptr; } } else // nlp opts { if (!strcmp(field, "max_iter")) { int* max_iter = (int *) value; opts->max_iter = *max_iter; } else if (!strcmp(field, "tol_stat")) { double* tol_stat = (double *) value; opts->tol_stat = *tol_stat; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_stat", value); } else if (!strcmp(field, "tol_eq")) { double* tol_eq = (double *) value; opts->tol_eq = *tol_eq; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_eq", value); } else if (!strcmp(field, "tol_ineq")) { double* tol_ineq = (double *) value; opts->tol_ineq = *tol_ineq; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_ineq", value); } else if (!strcmp(field, "tol_comp")) { double* tol_comp = (double *) value; opts->tol_comp = *tol_comp; // TODO: set accuracy of the qp_solver to the minimum of current QP accuracy and the one specified. config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "tol_comp", value); } else if (!strcmp(field, "ext_qp_res")) { int* ext_qp_res = (int *) value; opts->ext_qp_res = *ext_qp_res; } else if (!strcmp(field, "warm_start_first_qp")) { bool* warm_start_first_qp = (bool *) value; opts->warm_start_first_qp = *warm_start_first_qp; } else if (!strcmp(field, "rti_phase")) { int* rti_phase = (int *) value; if (*rti_phase < 0 || *rti_phase > 0) { printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for rti_phase field."); printf("possible values are: 0\n"); exit(1); } else opts->rti_phase = *rti_phase; } else if (!strcmp(field, "initialize_t_slacks")) { int* initialize_t_slacks = (int *) value; if (*initialize_t_slacks != 0 && *initialize_t_slacks != 1) { printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for initialize_t_slacks field, need int 0 or 1, got %d.", *initialize_t_slacks); exit(1); } opts->initialize_t_slacks = *initialize_t_slacks; } else { ocp_nlp_opts_set(config, nlp_opts, field, value); } } return; } void ocp_nlp_sqp_opts_set_at_stage(void *config_, void *opts_, size_t stage, const char *field, void* value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = (ocp_nlp_sqp_opts *) opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value); return; } /************************************************ * memory ************************************************/ acados_size_t ocp_nlp_sqp_memory_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; acados_size_t size = 0; size += sizeof(ocp_nlp_sqp_memory); // nlp mem size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat int stat_m = opts->max_iter+1; int stat_n = 7; if (opts->ext_qp_res) stat_n += 4; size += stat_n*stat_m*sizeof(double); size += 3*8; // align make_int_multiple_of(8, &size); return size; } void *ocp_nlp_sqp_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_cost_config **cost = config->cost; // ocp_nlp_constraints_config **constraints = config->constraints; char *c_ptr = (char *) raw_memory; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; // initial align align_char_to(8, &c_ptr); ocp_nlp_sqp_memory *mem = (ocp_nlp_sqp_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_memory); align_char_to(8, &c_ptr); // nlp mem mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr); c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat mem->stat = (double *) c_ptr; mem->stat_m = opts->max_iter+1; mem->stat_n = 7; if (opts->ext_qp_res) mem->stat_n += 4; c_ptr += mem->stat_m*mem->stat_n*sizeof(double); mem->status = ACADOS_READY; align_char_to(8, &c_ptr); assert((char *) raw_memory + ocp_nlp_sqp_memory_calculate_size(config, dims, opts) >= c_ptr); return mem; } /************************************************ * workspace ************************************************/ acados_size_t ocp_nlp_sqp_workspace_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; acados_size_t size = 0; // sqp size += sizeof(ocp_nlp_sqp_workspace); // nlp size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // tmp qp in size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // tmp qp out size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims); } return size; } static void ocp_nlp_sqp_cast_workspace(ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_sqp_opts *opts, ocp_nlp_sqp_memory *mem, ocp_nlp_sqp_workspace *work) { ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_memory *nlp_mem = mem->nlp_mem; // sqp char *c_ptr = (char *) work; c_ptr += sizeof(ocp_nlp_sqp_workspace); // nlp work->nlp_work = ocp_nlp_workspace_assign(config, dims, nlp_opts, nlp_mem, c_ptr); c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // tmp qp in work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // tmp qp out work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws work->qp_res_ws = ocp_qp_res_workspace_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims); } assert((char *) work + ocp_nlp_sqp_workspace_calculate_size(config, dims, opts) >= c_ptr); return; } /************************************************ * functions ************************************************/ int ocp_nlp_sqp(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { acados_timer timer0, timer1; acados_tic(&timer0); ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_res *nlp_res = nlp_mem->nlp_res; ocp_nlp_sqp_workspace *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; ocp_qp_in *qp_in = nlp_mem->qp_in; ocp_qp_out *qp_out = nlp_mem->qp_out; // zero timers double tmp_time; mem->time_qp_sol = 0.0; mem->time_qp_solver_call = 0.0; mem->time_qp_xcond = 0.0; mem->time_lin = 0.0; mem->time_reg = 0.0; mem->time_glob = 0.0; mem->time_sim = 0.0; mem->time_sim_la = 0.0; mem->time_sim_ad = 0.0; int N = dims->N; int ii, qp_status; int qp_iter = 0; double alpha; #if defined(ACADOS_WITH_OPENMP) // backup number of threads int num_threads_bkp = omp_get_num_threads(); // set number of threads omp_set_num_threads(opts->nlp_opts->num_threads); #endif ocp_nlp_alias_memory_to_submodules(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // if (opts->initialize_t_slacks > 0) ocp_nlp_initialize_t_slacks(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // initialize QP ocp_nlp_initialize_submodules(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // main sqp loop int sqp_iter = 0; nlp_mem->sqp_iter = &sqp_iter; for (; sqp_iter < opts->max_iter; sqp_iter++) { // linearizate NLP and update QP matrices acados_tic(&timer1); ocp_nlp_approximate_qp_matrices(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); mem->time_lin += acados_toc(&timer1); #ifdef MEASURE_TIMINGS // get timings from integrator for (ii=0; ii<N; ii++) { config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], "time_sim", &tmp_time); mem->time_sim += tmp_time; config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], "time_sim_la", &tmp_time); mem->time_sim_la += tmp_time; config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], "time_sim_ad", &tmp_time); mem->time_sim_ad += tmp_time; } #endif // MEASURE_TIMINGS // update QP rhs for SQP (step prim var, abs dual var) ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // compute nlp residuals ocp_nlp_res_compute(dims, nlp_in, nlp_out, nlp_res, nlp_mem); ocp_nlp_res_get_inf_norm(nlp_res, &nlp_out->inf_norm_res); if (nlp_opts->print_level > sqp_iter + 1) { printf("\n\nSQP: ocp_qp_in at iteration %d\n", sqp_iter); print_ocp_qp_in(qp_in); } // save statistics if (sqp_iter < mem->stat_m) { mem->stat[mem->stat_n*sqp_iter+0] = nlp_res->inf_norm_res_stat; mem->stat[mem->stat_n*sqp_iter+1] = nlp_res->inf_norm_res_eq; mem->stat[mem->stat_n*sqp_iter+2] = nlp_res->inf_norm_res_ineq; mem->stat[mem->stat_n*sqp_iter+3] = nlp_res->inf_norm_res_comp; } // exit conditions on residuals if ((nlp_res->inf_norm_res_stat < opts->tol_stat) & (nlp_res->inf_norm_res_eq < opts->tol_eq) & (nlp_res->inf_norm_res_ineq < opts->tol_ineq) & (nlp_res->inf_norm_res_comp < opts->tol_comp)) { #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_SUCCESS; mem->sqp_iter = sqp_iter; mem->time_tot = acados_toc(&timer0); if (nlp_opts->print_level > 0) { printf("%i\t%e\t%e\t%e\t%e\t%d\t%d\t%e\n", sqp_iter, nlp_res->inf_norm_res_stat, nlp_res->inf_norm_res_eq, nlp_res->inf_norm_res_ineq, nlp_res->inf_norm_res_comp, qp_status, qp_iter, alpha); printf("\n\n"); } return mem->status; } // check for nans else if (isnan(nlp_res->inf_norm_res_stat) || isnan(nlp_res->inf_norm_res_eq) || isnan(nlp_res->inf_norm_res_ineq) || isnan(nlp_res->inf_norm_res_comp)) { #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_FAILURE; mem->sqp_iter = sqp_iter; mem->time_tot = acados_toc(&timer0); return mem->status; } // regularize Hessian acados_tic(&timer1); config->regularize->regularize_hessian(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); // (typically) no warm start at first iteration if (sqp_iter == 0 && !opts->warm_start_first_qp) { int tmp_int = 0; config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "warm_start", &tmp_int); } if (0) // DEBUG printing { char filename[100]; sprintf(filename, "qp_prints/qp_in_%d.txt", sqp_iter); FILE *out_file = fopen(filename, "w"); print_ocp_qp_in_to_file(out_file, qp_in); fclose(out_file); } // solve qp acados_tic(&timer1); qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, qp_in, qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); mem->time_qp_sol += acados_toc(&timer1); qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time); mem->time_qp_solver_call += tmp_time; qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_xcond", &tmp_time); mem->time_qp_xcond += tmp_time; // compute correct dual solution in case of Hessian regularization acados_tic(&timer1); config->regularize->correct_dual_sol(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); // restore default warm start if (sqp_iter==0) { config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "warm_start", &opts->qp_warm_start); } if (nlp_opts->print_level > sqp_iter + 1) { printf("\n\nSQP: ocp_qp_out at iteration %d\n", sqp_iter); print_ocp_qp_out(qp_out); } if (0) // DEBUG printing { char filename[100]; sprintf(filename, "qp_prints/qp_out_%d.txt", sqp_iter); FILE *out_file = fopen(filename, "w"); print_ocp_qp_out_to_file(out_file, qp_out); fclose(out_file); } // TODO move into QP solver memory ??? qp_info *qp_info_; ocp_qp_out_get(qp_out, "qp_info", &qp_info_); qp_iter = qp_info_->num_iter; // save statistics of last qp solver call if (sqp_iter+1 < mem->stat_m) { mem->stat[mem->stat_n*(sqp_iter+1)+4] = qp_status; mem->stat[mem->stat_n*(sqp_iter+1)+5] = qp_iter; } // compute external QP residuals (for debugging) if (opts->ext_qp_res) { ocp_qp_res_compute(qp_in, qp_out, work->qp_res, work->qp_res_ws); if (sqp_iter+1 < mem->stat_m) ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*(sqp_iter+1)+7)); } // exit conditions on QP status if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER)) { if (nlp_opts->print_level > 0) { printf("%i\t%e\t%e\t%e\t%e.\n", sqp_iter, nlp_res->inf_norm_res_stat, nlp_res->inf_norm_res_eq, nlp_res->inf_norm_res_ineq, nlp_res->inf_norm_res_comp ); printf("\n\n"); } // increment sqp_iter to return full statistics and improve output below. sqp_iter++; #ifndef ACADOS_SILENT printf("\nQP solver returned error status %d in SQP iteration %d, QP iteration %d.\n", qp_status, sqp_iter, qp_iter); #endif #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif if (nlp_opts->print_level > 1) { printf("\n Failed to solve the following QP:\n"); if (nlp_opts->print_level) print_ocp_qp_in(qp_in); } mem->status = ACADOS_QP_FAILURE; mem->sqp_iter = sqp_iter; mem->time_tot = acados_toc(&timer0); return mem->status; } /* globalization */ // NOTE on timings: currently all within globalization is accounted for within time_glob. // QP solver times could be also attributed there alternatively. Cleanest would be to save them seperately. acados_tic(&timer1); bool do_line_search = true; if (opts->nlp_opts->globalization_use_SOC && opts->nlp_opts->globalization == MERIT_BACKTRACKING) { // NOTE: following Waechter2006: // Do SOC // 1. if "the first trial step size alpha_k,0 has been rejected and // 2. if the infeasibility would have increased when accepting the previous step // NOTE: the "and" is interpreted as an "or" in the current implementation // preliminary line search alpha = ocp_nlp_line_search(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work, 1); if (alpha < 1.0) { // Second Order Correction (SOC): following Nocedal2006: p.557, eq. (18.51) -- (18.56) // Paragraph: APPROACH III: S l1 QP (SEQUENTIAL l1 QUADRATIC PROGRAMMING), // Section 18.8 TRUST-REGION SQP METHODS // - just no trust region radius here. if (nlp_opts->print_level > 0) printf("ocp_nlp_sqp: performing SOC, since alpha %e in prelim. line search\n\n", alpha); int *nb = qp_in->dim->nb; int *ng = qp_in->dim->ng; int *nx = dims->nx; int *nu = dims->nu; int *ns = dims->ns; // int *nv = dims->nv; // int *ni = dims->ni; /* evaluate constraints & dynamics at new step */ // The following (setting up ux + p in tmp_nlp_out and evaluation of constraints + dynamics) // is not needed anymore because done in prelim. line search with early termination) // NOTE: similar to ocp_nlp_evaluate_merit_fun // set up new linearization point in work->tmp_nlp_out // for (ii = 0; ii < N; ii++) // blasfeo_dveccp(nx[ii+1], nlp_out->pi+ii, 0, work->nlp_work->tmp_nlp_out->pi+ii, 0); // for (ii = 0; ii <= N; ii++) // blasfeo_dveccp(2*ni[ii], nlp_out->lam+ii, 0, work->nlp_work->tmp_nlp_out->lam+ii, 0); // // tmp_nlp_out = iterate + step // for (ii = 0; ii <= N; ii++) // blasfeo_daxpy(nv[ii], 1.0, qp_out->ux+ii, 0, nlp_out->ux+ii, 0, work->nlp_work->tmp_nlp_out->ux+ii, 0); // // evaluate // #if defined(ACADOS_WITH_OPENMP) // #pragma omp parallel for // #endif // for (ii=0; ii<N; ii++) // { // config->dynamics[ii]->compute_fun(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], // nlp_opts->dynamics[ii], nlp_mem->dynamics[ii], work->nlp_work->dynamics[ii]); // } // #if defined(ACADOS_WITH_OPENMP) // #pragma omp parallel for // #endif // for (ii=0; ii<=N; ii++) // { // config->constraints[ii]->compute_fun(config->constraints[ii], dims->constraints[ii], // nlp_in->constraints[ii], nlp_opts->constraints[ii], // nlp_mem->constraints[ii], work->nlp_work->constraints[ii]); // } // #if defined(ACADOS_WITH_OPENMP) // #pragma omp parallel for // #endif // update QP rhs // d_i = c_i(x_k + p_k) - \nabla c_i(x_k)^T * p_k struct blasfeo_dvec *tmp_fun_vec; for (ii = 0; ii <= N; ii++) { if (ii < N) { // b -- dynamics tmp_fun_vec = config->dynamics[ii]->memory_get_fun_ptr(nlp_mem->dynamics[ii]); // add - \nabla c_i(x_k)^T * p_k // c_i = f(x_k, u_k) - x_{k+1} (see dynamics module) blasfeo_dgemv_t(nx[ii]+nu[ii], nx[ii+1], -1.0, qp_in->BAbt+ii, 0, 0, qp_out->ux+ii, 0, -1.0, tmp_fun_vec, 0, qp_in->b+ii, 0); // NOTE: not sure why it is - tmp_fun_vec here! blasfeo_dvecad(nx[ii+1], 1.0, qp_out->ux+ii+1, nu[ii+1], qp_in->b+ii, 0); } /* INEQUALITIES */ // d -- constraints tmp_fun_vec = config->constraints[ii]->memory_get_fun_ptr(nlp_mem->constraints[ii]); /* SOC for bounds can be skipped (because linear) */ // NOTE: SOC can also be skipped for truely linear constraint, i.e. ng of nlp, now using ng of QP = (nh+ng) // upper & lower blasfeo_dveccp(ng[ii], tmp_fun_vec, nb[ii], qp_in->d+ii, nb[ii]); // lg blasfeo_dveccp(ng[ii], tmp_fun_vec, 2*nb[ii]+ng[ii], qp_in->d+ii, 2*nb[ii]+ng[ii]); // ug // general linear / linearized! // tmp_ni = D * u + C * x blasfeo_dgemv_t(nu[ii]+nx[ii], ng[ii], 1.0, qp_in->DCt+ii, 0, 0, qp_out->ux+ii, 0, 0.0, &work->nlp_work->tmp_ni, 0, &work->nlp_work->tmp_ni, 0); // d[nb:nb+ng] += tmp_ni (lower) blasfeo_dvecad(ng[ii], 1.0, &work->nlp_work->tmp_ni, 0, qp_in->d+ii, nb[ii]); // d[nb:nb+ng] -= tmp_ni blasfeo_dvecad(ng[ii], -1.0, &work->nlp_work->tmp_ni, 0, qp_in->d+ii, 2*nb[ii]+ng[ii]); // add slack contributions // d[nb:nb+ng] += slack[idx] // qp_in->idxs_rev for (int j = 0; j < nb[ii]+ng[ii]; j++) { int slack_index = qp_in->idxs_rev[ii][j]; if (slack_index >= 0) { // add slack contribution for lower and upper constraint // lower BLASFEO_DVECEL(qp_in->d+ii, j) -= BLASFEO_DVECEL(qp_out->ux+ii, slack_index+nx[ii]+nu[ii]); // upper BLASFEO_DVECEL(qp_in->d+ii, j+nb[ii]+ng[ii]) -= BLASFEO_DVECEL(qp_out->ux+ii, slack_index+nx[ii]+nu[ii]+ns[ii]); } } // NOTE: bounds on slacks can be skipped, since they are linear. // blasfeo_daxpy(2*ns[ii], -1.0, qp_out->ux+ii, nx[ii]+nu[ii], qp_in->d+ii, 2*nb[ii]+2*ng[ii], qp_in->d+ii, 2*nb[ii]+2*ng[ii]); // printf("SOC: qp_in->d final value\n"); // blasfeo_print_exp_dvec(2*nb[ii]+2*ng[ii], qp_in->d+ii, 0); } if (nlp_opts->print_level > sqp_iter + 1) { printf("\n\nSQP: SOC ocp_qp_in at iteration %d\n", sqp_iter); print_ocp_qp_in(qp_in); } if (0) // DEBUG printing { char filename[100]; sprintf(filename, "qp_prints/qp_in_%d_SOC.txt", sqp_iter); FILE *out_file = fopen(filename, "w"); print_ocp_qp_in_to_file(out_file, qp_in); fclose(out_file); } // solve QP // acados_tic(&timer1); qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, qp_in, qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); // tmp_time = acados_toc(&timer1); // mem->time_qp_sol += tmp_time; // qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time); // mem->time_qp_solver_call += tmp_time; // qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_xcond", &tmp_time); // mem->time_qp_xcond += tmp_time; // compute correct dual solution in case of Hessian regularization // acados_tic(&timer1); config->regularize->correct_dual_sol(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); // mem->time_reg += acados_toc(&timer1); ocp_qp_out_get(qp_out, "qp_info", &qp_info_); qp_iter = qp_info_->num_iter; // save statistics of last qp solver call // TODO: SOC QP solver call should be warm / hot started! if (sqp_iter+1 < mem->stat_m) { // mem->stat[mem->stat_n*(sqp_iter+1)+4] = qp_status; // add qp_iter; should maybe be in a seperate statistic mem->stat[mem->stat_n*(sqp_iter+1)+5] += qp_iter; } // compute external QP residuals (for debugging) if (opts->ext_qp_res) { ocp_qp_res_compute(qp_in, qp_out, work->qp_res, work->qp_res_ws); if (sqp_iter+1 < mem->stat_m) ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*(sqp_iter+1)+7)); } if (nlp_opts->print_level > sqp_iter + 1) { printf("\n\nSQP: SOC ocp_qp_out at iteration %d\n", sqp_iter); print_ocp_qp_out(qp_out); } if (0) // DEBUG printing { char filename[100]; sprintf(filename, "qp_prints/qp_out_%d_SOC.txt", sqp_iter); FILE *out_file = fopen(filename, "w"); print_ocp_qp_out_to_file(out_file, qp_out); fclose(out_file); } // exit conditions on QP status if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER)) { #ifndef ACADOS_SILENT printf("\nQP solver returned error status %d in SQP iteration %d for SOC QP in QP iteration %d.\n", qp_status, sqp_iter, qp_iter); #endif #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif if (nlp_opts->print_level > 1) { printf("\nFailed to solve the following QP:\n"); if (nlp_opts->print_level > sqp_iter + 1) print_ocp_qp_in(qp_in); } mem->status = ACADOS_QP_FAILURE; mem->sqp_iter = sqp_iter; mem->time_tot = acados_toc(&timer0); return mem->status; } } // if alpha prelim. line search < 1.0 else { do_line_search = false; } } if (do_line_search) { alpha = ocp_nlp_line_search(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work, 0); } mem->time_glob += acados_toc(&timer1); mem->stat[mem->stat_n*(sqp_iter+1)+6] = alpha; // update variables ocp_nlp_update_variables_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work, alpha); if (nlp_opts->print_level > 0) { if (sqp_iter%10 == 0) { printf("# it\tstat\t\teq\t\tineq\t\tcomp\t\tqp_stat\tqp_iter\talpha\n"); } printf("%i\t%e\t%e\t%e\t%e\t%d\t%d\t%e\n", sqp_iter, nlp_res->inf_norm_res_stat, nlp_res->inf_norm_res_eq, nlp_res->inf_norm_res_ineq, nlp_res->inf_norm_res_comp, qp_status, qp_iter, alpha); } } // end SQP loop if (nlp_opts->print_level > 0) printf("\n\n"); // ocp_nlp_out_print(dims, nlp_out); // maximum number of iterations reached #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif mem->status = ACADOS_MAXITER; mem->sqp_iter = sqp_iter; mem->time_tot = acados_toc(&timer0); #ifndef ACADOS_SILENT printf("\n ocp_nlp_sqp: maximum iterations reached\n"); #endif return mem->status; } void ocp_nlp_sqp_memory_reset_qp_solver(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_dims *dims = dims_; ocp_nlp_sqp_workspace *work = work_; ocp_nlp_workspace *nlp_work = work->nlp_work; // printf("in ocp_nlp_sqp_memory_reset_qp_solver\n\n"); config->qp_solver->memory_reset(qp_solver, dims->qp_solver, nlp_mem->qp_in, nlp_mem->qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); } int ocp_nlp_sqp_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; // ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_sqp_workspace *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; int N = dims->N; int status = ACADOS_SUCCESS; int ii; // TODO(all) add flag to enable/disable checks for (ii = 0; ii <= N; ii++) { int module_val; config->constraints[ii]->dims_get(config->constraints[ii], dims->constraints[ii], "ns", &module_val); if (dims->ns[ii] != module_val) { printf("ocp_nlp_sqp_precompute: inconsistent dimension ns for stage %d with constraint module, got %d, module: %d.", ii, dims->ns[ii], module_val); exit(1); } } // precompute for (ii = 0; ii < N; ii++) { // set T config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); // dynamics precompute status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], opts->nlp_opts->dynamics[ii], nlp_mem->dynamics[ii], nlp_work->dynamics[ii]); if (status != ACADOS_SUCCESS) return status; } return status; } void ocp_nlp_sqp_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_, char *field, int stage, int index, void *sens_nlp_out_) { acados_timer timer0; acados_tic(&timer0); ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; ocp_nlp_sqp_memory *mem = mem_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_out *sens_nlp_out = sens_nlp_out_; ocp_nlp_sqp_workspace *work = work_; ocp_nlp_sqp_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in); d_ocp_qp_set_rhs_zero(work->tmp_qp_in); double one = 1.0; if ((!strcmp("ex", field)) & (stage==0)) { d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in); d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in); // d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in); config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); // d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out); // exit(1); /* copy tmp_qp_out into sens_nlp_out */ int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; // int *nz = dims->nz; for (i = 0; i <= N; i++) { blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0); if (i < N) blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0); } } else { printf("\nerror: field %s at stage %d not available in ocp_nlp_sqp_eval_param_sens\n", field, stage); exit(1); } mem->time_solution_sensitivities = acados_toc(&timer0); return; } // TODO rename memory_get ??? void ocp_nlp_sqp_get(void *config_, void *dims_, void *mem_, const char *field, void *return_value_) { ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; ocp_nlp_sqp_memory *mem = mem_; if (!strcmp("sqp_iter", field)) { int *value = return_value_; *value = mem->sqp_iter; } else if (!strcmp("status", field)) { int *value = return_value_; *value = mem->status; } else if (!strcmp("time_tot", field) || !strcmp("tot_time", field)) { double *value = return_value_; *value = mem->time_tot; } else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field)) { double *value = return_value_; *value = mem->time_qp_sol; } else if (!strcmp("time_qp_solver", field) || !strcmp("time_qp_solver_call", field)) { double *value = return_value_; *value = mem->time_qp_solver_call; } else if (!strcmp("time_qp_xcond", field)) { double *value = return_value_; *value = mem->time_qp_xcond; } else if (!strcmp("time_lin", field)) { double *value = return_value_; *value = mem->time_lin; } else if (!strcmp("time_reg", field)) { double *value = return_value_; *value = mem->time_reg; } else if (!strcmp("time_glob", field)) { double *value = return_value_; *value = mem->time_glob; } else if (!strcmp("time_solution_sensitivities", field)) { double *value = return_value_; *value = mem->time_solution_sensitivities; } else if (!strcmp("time_sim", field)) { double *value = return_value_; *value = mem->time_sim; } else if (!strcmp("time_sim_la", field)) { double *value = return_value_; *value = mem->time_sim_la; } else if (!strcmp("time_sim_ad", field)) { double *value = return_value_; *value = mem->time_sim_ad; } else if (!strcmp("stat", field)) { double **value = return_value_; *value = mem->stat; } else if (!strcmp("statistics", field)) { int n_row = mem->stat_m<mem->sqp_iter+1 ? mem->stat_m : mem->sqp_iter+1; double *value = return_value_; for (int ii=0; ii<n_row; ii++) { value[ii+0] = ii; for (int jj=0; jj<mem->stat_n; jj++) value[ii+(jj+1)*n_row] = mem->stat[jj+ii*mem->stat_n]; } } else if (!strcmp("stat_m", field)) { int *value = return_value_; *value = mem->stat_m; } else if (!strcmp("stat_n", field)) { int *value = return_value_; *value = mem->stat_n; } else if (!strcmp("nlp_mem", field)) { void **value = return_value_; *value = mem->nlp_mem; } else if (!strcmp("qp_xcond_dims", field)) { void **value = return_value_; *value = dims->qp_solver->xcond_dims; } else if (!strcmp("nlp_res", field)) { ocp_nlp_res **value = return_value_; *value = mem->nlp_mem->nlp_res; } else if (!strcmp("qp_xcond_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_in; } else if (!strcmp("qp_xcond_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_out; } else if (!strcmp("qp_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_in; } else if (!strcmp("qp_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_out; } else if (!strcmp("qp_iter", field)) { config->qp_solver->memory_get(config->qp_solver, mem->nlp_mem->qp_solver_mem, "iter", return_value_); } else if (!strcmp("qp_status", field)) { config->qp_solver->memory_get(config->qp_solver, mem->nlp_mem->qp_solver_mem, "status", return_value_); } else if (!strcmp("res_stat", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_stat; } else if (!strcmp("res_eq", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_eq; } else if (!strcmp("res_ineq", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_ineq; } else if (!strcmp("res_comp", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_comp; } else if (!strcmp("cost_value", field)) { double *value = return_value_; *value = mem->nlp_mem->cost_value; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_get\n", field); exit(1); } } void ocp_nlp_sqp_opts_get(void *config_, void *dims_, void *opts_, const char *field, void *return_value_) { // ocp_nlp_config *config = config_; ocp_nlp_sqp_opts *opts = opts_; if (!strcmp("nlp_opts", field)) { void **value = return_value_; *value = opts->nlp_opts; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_opts_get\n", field); exit(1); } } void ocp_nlp_sqp_work_get(void *config_, void *dims_, void *work_, const char *field, void *return_value_) { // ocp_nlp_config *config = config_; ocp_nlp_sqp_workspace *work = work_; if (!strcmp("nlp_work", field)) { void **value = return_value_; *value = work->nlp_work; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_work_get\n", field); exit(1); } } void ocp_nlp_sqp_config_initialize_default(void *config_) { ocp_nlp_config *config = (ocp_nlp_config *) config_; config->opts_calculate_size = &ocp_nlp_sqp_opts_calculate_size; config->opts_assign = &ocp_nlp_sqp_opts_assign; config->opts_initialize_default = &ocp_nlp_sqp_opts_initialize_default; config->opts_update = &ocp_nlp_sqp_opts_update; config->opts_set = &ocp_nlp_sqp_opts_set; config->opts_set_at_stage = &ocp_nlp_sqp_opts_set_at_stage; config->memory_calculate_size = &ocp_nlp_sqp_memory_calculate_size; config->memory_assign = &ocp_nlp_sqp_memory_assign; config->workspace_calculate_size = &ocp_nlp_sqp_workspace_calculate_size; config->evaluate = &ocp_nlp_sqp; config->memory_reset_qp_solver = &ocp_nlp_sqp_memory_reset_qp_solver; config->eval_param_sens = &ocp_nlp_sqp_eval_param_sens; config->config_initialize_default = &ocp_nlp_sqp_config_initialize_default; config->precompute = &ocp_nlp_sqp_precompute; config->get = &ocp_nlp_sqp_get; config->opts_get = &ocp_nlp_sqp_opts_get; config->work_get = &ocp_nlp_sqp_work_get; return; } // ??? @rien // for (int_t i = 0; i < N; i++) // { // ocp_nlp_dynamics_opts *dynamics_opts = opts->dynamics[i]; // sim_opts *opts = dynamics_opts->sim_solver; // if (opts->scheme == NULL) // continue; // opts->sens_adj = (opts->scheme->type != exact); // if (nlp_in->freezeSens) { // // freeze inexact sensitivities after first SQP iteration !! // opts->scheme->freeze = true; // } // }
main_seqval.c
/* Copyright (C) 2010 The Trustees of Indiana University. */ /* */ /* Use, modification and distribution is subject to the Boost Software */ /* License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at */ /* http://www.boost.org/LICENSE_1_0.txt) */ /* */ /* Authors: Jeremiah Willcock */ /* Andrew Lumsdaine */ /* These need to be before any possible inclusions of stdint.h or inttypes.h. * */ #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif #include "../generator/make_graph.h" #include "../generator/utils.h" #include "common.h" #include <math.h> #include <mpi.h> #include <assert.h> #include <string.h> #include <stdlib.h> #include <stddef.h> #include <stdio.h> #include <limits.h> #include <stdint.h> #include <inttypes.h> #ifdef SHOWCPUAFF #include <sys/types.h> #include <unistd.h> #endif static int compare_doubles(const void* a, const void* b) { double aa = *(const double*)a; double bb = *(const double*)b; return (aa < bb) ? -1 : (aa == bb) ? 0 : 1; } enum {s_minimum, s_firstquartile, s_median, s_thirdquartile, s_maximum, s_mean, s_std, s_LAST}; static void get_statistics(const double x[], int n, double r[s_LAST]) { double temp; int i; /* Compute mean. */ temp = 0; for (i = 0; i < n; ++i) temp += x[i]; temp /= n; r[s_mean] = temp; /* Compute std. dev. */ temp = 0; for (i = 0; i < n; ++i) temp += (x[i] - r[s_mean]) * (x[i] - r[s_mean]); temp /= n - 1; r[s_std] = sqrt(temp); /* Sort x. */ double* xx = (double*)xmalloc(n * sizeof(double)); memcpy(xx, x, n * sizeof(double)); qsort(xx, n, sizeof(double), compare_doubles); /* Get order statistics. */ r[s_minimum] = xx[0]; r[s_firstquartile] = (xx[(n - 1) / 4] + xx[n / 4]) * .5; r[s_median] = (xx[(n - 1) / 2] + xx[n / 2]) * .5; r[s_thirdquartile] = (xx[n - 1 - (n - 1) / 4] + xx[n - 1 - n / 4]) * .5; r[s_maximum] = xx[n - 1]; /* Clean up. */ free(xx); } static inline int64_t get_pred_from_pred_entry(int64_t val) { return (val << 16) >> 16; } /* Returns true if all values are in range. */ //static int check_value_ranges(const int64_t nglobalverts, const size_t nlocalverts, const int64_t* const pred) { int any_range_errors = 0; { size_t ii; for (ii = 0; ii < nlocalverts; ii += CHUNKSIZE) { ptrdiff_t i_start = ii; ptrdiff_t i_end = ptrdiff_min(ii + CHUNKSIZE, nlocalverts); ptrdiff_t i; assert (i_start >= 0 && i_start <= (ptrdiff_t)nlocalverts); assert (i_end >= 0 && i_end <= (ptrdiff_t)nlocalverts); #pragma omp parallel for reduction(||:any_range_errors) for (i = i_start; i < i_end; ++i) { int64_t p = get_pred_from_pred_entry(pred[i]); if (p < -1 || p >= nglobalverts) { fprintf(stdout, "%d: Validation error: parent of vertex %" PRId64 " is out-of-range value %" PRId64 ".\n", rank, vertex_to_global_for_pred(rank, i), p); any_range_errors = 1; } } } } MPI_Allreduce(MPI_IN_PLACE, &any_range_errors, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD); return !any_range_errors; } /* Returns true if result is valid. Also, updates high 16 bits of each element * of pred to contain the BFS level number (or -1 if not visited) of each * vertex; this is based on the predecessor map if the user didn't provide it. * */ int validate_bfs_result_seq(const tuple_graph* const tg, const int64_t nglobalverts, const size_t nlocalverts, const int64_t root, int64_t* const pred, int64_t* const edge_visit_count_ptr, int64_t const max_used_vertex) { assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges); assert (pred); *edge_visit_count_ptr = 0; /* Ensure it is a valid pointer */ int ranges_ok = check_value_ranges(nglobalverts, nlocalverts, pred); if (root < 0 || root >= nglobalverts) { fprintf(stderr, "%d: Validation error: root vertex %" PRId64 " is invalid.\n", rank, root); ranges_ok = 0; } if (!ranges_ok) return 0; /* Fail */ int validation_passed = 1; int root_owner; size_t root_local; get_vertex_distribution_for_pred(1, &root, &root_owner, &root_local); int root_is_mine = (root_owner == rank); /* Get maximum values so loop counts are consistent across ranks. */ uint64_t maxlocalverts_ui = nlocalverts; MPI_Allreduce(MPI_IN_PLACE, &maxlocalverts_ui, 1, MPI_UINT64_T, MPI_MAX, MPI_COMM_WORLD); size_t maxlocalverts = (size_t)maxlocalverts_ui; ptrdiff_t max_bufsize = tuple_graph_max_bufsize(tg); ptrdiff_t edge_chunk_size = ptrdiff_min(HALF_CHUNKSIZE, max_bufsize); assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges); assert (pred); /* combine results from all processes */ int64_t* restrict pred_vtx = NULL; { int irank; uint64_t i; int64_t nlocalvertsMax=nlocalverts; MPI_Allreduce(MPI_IN_PLACE, &nlocalvertsMax, 1, MPI_UINT64_T, MPI_MAX, MPI_COMM_WORLD); if(rank==0) { pred_vtx = (int64_t*)xmalloc(nglobalverts * sizeof(int64_t)); int64_t* pred_tmp; int64_t nlocalvertsRemote; pred_tmp=pred; nlocalvertsRemote=nlocalverts; for(irank=0;irank<size;irank++) { MPI_Barrier(MPI_COMM_WORLD); if(irank!=0) { MPI_Recv(&nlocalvertsRemote, 1, MPI_UINT64_T, irank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Recv(pred_tmp, nlocalvertsRemote, MPI_UINT64_T, irank, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); //printf("%d %" PRId64 " \n",rank,nlocalvertsRemote); } for(i=0;i<nlocalvertsRemote ;i++) { pred_vtx[vertex_to_global_for_pred(irank,i)]=get_pred_from_pred_entry(pred_tmp[i]); } if(irank==0) pred_tmp = (int64_t*)xmalloc(nlocalvertsMax * sizeof(int64_t)); } xfree(pred_tmp); } else { for(irank=0;irank<size;irank++) { MPI_Barrier(MPI_COMM_WORLD); if(rank==irank) { MPI_Send(&nlocalverts, 1, MPI_UINT64_T, 0, 0, MPI_COMM_WORLD); MPI_Send(pred, nlocalverts, MPI_UINT64_T, 0, 1, MPI_COMM_WORLD); } } } { int irank; uint64_t i; for(irank=0;irank<size;irank++) { MPI_Barrier(MPI_COMM_WORLD); //if(rank==irank) // for(i=0;i<nlocalverts ;i++) // fprintf(stderr, "%d %" PRId64 " %" PRId64 " %" PRId64 "\n", rank,i,get_pred_from_pred_entry(pred[i]),vertex_to_global_for_pred(rank,i)); } } } int64_t nedge_traversed=0; if(rank==0) { uint64_t i, max_bfsvtx=0; /*for(i=0;i<tg->edgememory_size ;i++) { if(tg->edgememory[i].v0>max_bfsvtx) max_bfsvtx=tg->edgememory[i].v0; if(tg->edgememory[i].v1>max_bfsvtx) max_bfsvtx=tg->edgememory[i].v1; }*/ /*int64_t* restrict pred_vtx = (int64_t*)xmalloc((max_used_vertex+1) * sizeof(int64_t)); for(i=0;i<=max_used_vertex ;i++) { pred_vtx[i]=get_pred_from_pred_entry(pred[i]); }*/ nedge_traversed=verify_bfs_tree (pred_vtx, max_used_vertex, root, tg->edgememory, tg->nglobaledges); fprintf(stderr, "verify_bfs_tree nedge_traversed %" PRId64 ".\n", nedge_traversed); if(nedge_traversed<0) { fprintf(stderr, "Validation error: code %" PRId64 ".\n", nedge_traversed); validation_passed=0; } } if(rank==0) { xfree(pred_vtx); } MPI_Allreduce(MPI_IN_PLACE, &nedge_traversed, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); *edge_visit_count_ptr=nedge_traversed; /* Collect the global validation result. */ MPI_Allreduce(MPI_IN_PLACE, &validation_passed, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD); return validation_passed; } int validate_bfs_result_seq2(const tuple_graph* const tg, const int64_t nglobalverts, const size_t nlocalverts, const int64_t root, int64_t* const pred, int64_t* const edge_visit_count_ptr, int64_t const max_used_vertex) { assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges); assert (pred); *edge_visit_count_ptr = 0; /* Ensure it is a valid pointer */ int ranges_ok = check_value_ranges(nglobalverts, nlocalverts, pred); if (root < 0 || root >= nglobalverts) { fprintf(stdout, "%d: Validation error: root vertex %" PRId64 " is invalid.\n", rank, root); ranges_ok = 0; } if (!ranges_ok) return 0; /* Fail */ int validation_passed = 1; int root_owner; size_t root_local; get_vertex_distribution_for_pred(1, &root, &root_owner, &root_local); int root_is_mine = (root_owner == rank); /* Get maximum values so loop counts are consistent across ranks. */ uint64_t maxlocalverts_ui = nlocalverts; MPI_Allreduce(MPI_IN_PLACE, &maxlocalverts_ui, 1, MPI_UINT64_T, MPI_MAX, MPI_COMM_WORLD); size_t maxlocalverts = (size_t)maxlocalverts_ui; ptrdiff_t max_bufsize = tuple_graph_max_bufsize(tg); ptrdiff_t edge_chunk_size = ptrdiff_min(HALF_CHUNKSIZE, max_bufsize); assert (tg->edgememory_size >= 0 && tg->max_edgememory_size >= tg->edgememory_size && tg->max_edgememory_size <= tg->nglobaledges); assert (pred); fprintf(stdout, "%d: validate_bfs_result_seq1\n", rank); /* combine results from all processes */ int64_t* restrict pred_vtx = NULL; { int irank; uint64_t i; int64_t nlocalvertsMax=nlocalverts; MPI_Allreduce(MPI_IN_PLACE, &nlocalvertsMax, 1, MPI_UINT64_T, MPI_MAX, MPI_COMM_WORLD); printf("rank,nlocalverts %d %" PRId64 " max %" PRId64 " \n",rank,nlocalverts,nlocalvertsMax); fprintf(stdout, "%d: validate_bfs_result_seq2\n", rank); if(rank==0) { fprintf(stdout, "%d: validate_bfs_result_seq2.1\n", rank); pred_vtx = (int64_t*)xmalloc(nglobalverts * sizeof(int64_t)); for(i=0;i<nlocalverts ;i++) { pred_vtx[vertex_to_global_for_pred(irank,i)]=get_pred_from_pred_entry(pred[i]); } fprintf(stdout, "%d: validate_bfs_result_seq2.2\n", rank); int64_t* pred_tmp = (int64_t*)xmalloc(nlocalvertsMax * sizeof(int64_t)); int64_t nlocalvertsRemote; fprintf(stdout, "%d: validate_bfs_result_seq2.3\n", rank); MPI_Barrier(MPI_COMM_WORLD); fprintf(stdout, "%d: validate_bfs_result_seq2.4\n", rank); for(irank=1;irank<size;irank++) { MPI_Barrier(MPI_COMM_WORLD); MPI_Recv(&nlocalvertsRemote, 1, MPI_UINT64_T, irank, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE); MPI_Recv(pred_tmp, nlocalvertsRemote, MPI_UINT64_T, irank, 1, MPI_COMM_WORLD, MPI_STATUS_IGNORE); printf("rank,nlocalvertsRemote %d %" PRId64 " \n",rank,nlocalvertsRemote); for(i=0;i<nlocalvertsRemote ;i++) { pred_vtx[vertex_to_global_for_pred(irank,i)]=get_pred_from_pred_entry(pred_tmp[i]); } } xfree(pred_tmp); fprintf(stdout, "%d: validate_bfs_result_seq2.5\n", rank); } else { MPI_Barrier(MPI_COMM_WORLD); for(irank=1;irank<size;irank++) { MPI_Barrier(MPI_COMM_WORLD); if(rank==irank) { MPI_Send(&nlocalverts, 1, MPI_UINT64_T, 0, 0, MPI_COMM_WORLD); printf("Srank,nlocalvertsRemote %d %" PRId64 " \n",rank,nlocalverts); MPI_Send(pred, nlocalverts, MPI_UINT64_T, 0, 1, MPI_COMM_WORLD); } } } fprintf(stdout, "%d: validate_bfs_result_seq3\n", rank); /*{ int irank; uint64_t i; for(irank=0;irank<size;irank++) { MPI_Barrier(MPI_COMM_WORLD); //if(rank==irank) // for(i=0;i<nlocalverts ;i++) // fprintf(stdout, "%d %" PRId64 " %" PRId64 " %" PRId64 "\n", rank,i,get_pred_from_pred_entry(pred[i]),vertex_to_global_for_pred(rank,i)); } }*/ } MPI_Barrier(MPI_COMM_WORLD); int64_t nedge_traversed; if(rank==0) { uint64_t i, max_bfsvtx=0; /*for(i=0;i<tg->edgememory_size ;i++) { if(tg->edgememory[i].v0>max_bfsvtx) max_bfsvtx=tg->edgememory[i].v0; if(tg->edgememory[i].v1>max_bfsvtx) max_bfsvtx=tg->edgememory[i].v1; }*/ /*int64_t* restrict pred_vtx = (int64_t*)xmalloc((max_used_vertex+1) * sizeof(int64_t)); for(i=0;i<=max_used_vertex ;i++) { pred_vtx[i]=get_pred_from_pred_entry(pred[i]); }*/ nedge_traversed=verify_bfs_tree (pred_vtx, max_used_vertex, root, tg->edgememory, tg->nglobaledges); if(nedge_traversed<0) { fprintf(stdout, "Validation error: code %" PRId64 ".\n", nedge_traversed); validation_passed=0; } } if(rank==0) { xfree(pred_vtx); } fprintf(stdout, "%d: validate_bfs_result_seqF\n", rank); MPI_Barrier(MPI_COMM_WORLD); MPI_Allreduce(MPI_IN_PLACE, &nedge_traversed, 1, MPI_INT, MPI_MAX, MPI_COMM_WORLD); *edge_visit_count_ptr=nedge_traversed; /* Collect the global validation result. */ MPI_Allreduce(MPI_IN_PLACE, &validation_passed, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD); MPI_Barrier(MPI_COMM_WORLD); return validation_passed; } int main(int argc, char** argv) { MPI_Init(&argc, &argv); setup_globals(); /* Parse arguments. */ int SCALE = 16; int edgefactor = 16; /* nedges / nvertices, i.e., 2*avg. degree */ int num_bfs_roots = 64; int bDoOneNodePureOpenMP=1; int bRunPerf=1; int bRunVal=1; int bUsePerfForTEPS=1; float timeForPerf=300.0; int numberOfCyclesForPerf=300; //uint8_t refMD5[16]; int64_t* refEdgeCounts = NULL; int64_t* refBFS_Roots = NULL; if ( !(argc == 2 || argc == 3)){ if (rank == 0) fprintf(stdout, "Usage: %s input_file [number of threads]\n", argv[0]); //fprintf(stdout, "Usage: %s SCALE edgefactor\n SCALE = log_2(# vertices) [integer, required]\n edgefactor = (# edges) / (# vertices) = .5 * (average vertex degree) [integer, defaults to 16]\n(Random number seed and Kronecker initiator are in main.c)\n", argv[0]); MPI_Abort(MPI_COMM_WORLD, 1); } if ( argc == 3){ int threads=atoi(argv[2]); #ifdef _OPENMP omp_set_num_threads(threads); #else if(threads!=1) fprintf(stdout, "ERROR: %s compiled without OpenMP\n", argv[0]); #endif } if (rank == 0) fprintf(stdout, "Graph500 version: 2.1.4 replicated csc\n"); { int iRead=0; int i; FILE *input_file; char cbuf[256]; if (rank == 0) fprintf(stdout, "Reading input from %s\n",argv[1]); input_file=fopen(argv[1],"r"); if(input_file==NULL){ if (rank == 0) fprintf(stdout, "Error : can no open %s file\n",argv[1]); MPI_Barrier(MPI_COMM_WORLD); MPI_Abort(MPI_COMM_WORLD, 1); } fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&SCALE); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&edgefactor); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&num_bfs_roots); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&bDoOneNodePureOpenMP); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&bRunPerf); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&bUsePerfForTEPS); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&bRunVal); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%f",&timeForPerf); fgets(cbuf,256,input_file);iRead+=sscanf(cbuf,"%d",&numberOfCyclesForPerf); if(numberOfCyclesForPerf==0) bRunPerf=0; if(timeForPerf==0.0) bRunPerf=0; if(bRunPerf==0) bUsePerfForTEPS=0; //fgets(cbuf,256,input_file); //for (i = 0; i < 16; i++) // iRead+=sscanf(cbuf+i*2,"%2x",&refMD5[i]); //refMD5[i]=cbuf[i]; refEdgeCounts = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t)); refBFS_Roots = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t)); for (i = 0; i < num_bfs_roots; i++){ fgets(cbuf,256,input_file); iRead+=sscanf(cbuf,"%lu %lu ",refBFS_Roots+i,refEdgeCounts+i); } //printf("%d %d\n",rank,iRead); //printf("%d %d\n",rank,SCALE); //printf("%d %d\n",rank,edgefactor); fprintf(stdout, "rank %d/%d\n",rank,size); if (rank == 0){ fprintf(stdout, "\tScale: %d\n",SCALE); fprintf(stdout, "\tEdgefactor %d\n",edgefactor); fprintf(stdout, "\tNumber of BFS roots: %d\n",num_bfs_roots); fprintf(stdout, "\tUse pure OpenMP Implementation for single node: %d\n",bDoOneNodePureOpenMP); fprintf(stdout, "\tRun performance section: %d\n",bRunPerf); fprintf(stdout, "\tRun validation: %d\n",bRunVal); fprintf(stdout, "\tTime for performance section in seconds: %f\n",timeForPerf); fprintf(stdout, "\tUse only performance section for TEPS calculation: %d\n",bUsePerfForTEPS); fprintf(stdout, "\tMax number of cycles: %d\n",numberOfCyclesForPerf); fprintf(stdout, "\tNumber of MPI processes: %d\n",size); #ifdef _OPENMP fprintf(stdout, "\tMax number of threads per MPI process: %d\n",omp_get_max_threads()); #else fprintf(stdout, "\tMax number of threads per MPI process: compiled without OpenMP\n"); #endif //fprintf(stdout, "\tReffrence md5 on initial edge list: "); //for (i = 0; i < 16; i++) // fprintf(stdout, "%2.2x", refMD5[i]); //fprintf(stdout, "\n"); } fclose(input_file); #ifdef SHOWCPUAFF pid_t pid=getpid(); for (i = 0; i < size; i++){ if(i==rank){ fprintf(stdout, "MPI Process %d:\n",rank); sprintf(cbuf,"grep -i cpus_allowed /proc/%d/status",pid); system(cbuf); fprintf(stdout, "\n"); } MPI_Barrier(MPI_COMM_WORLD); } #endif MPI_Barrier(MPI_COMM_WORLD); //MPI_Barrier(MPI_COMM_WORLD); //MPI_Abort(MPI_COMM_WORLD, 1); } // int SCALE = 16; // int edgefactor = 16; /* nedges / nvertices, i.e., 2*avg. degree */ // if (argc >= 2) SCALE = atoi(argv[1]); // if (argc >= 3) edgefactor = atoi(argv[2]); // if (argc <= 1 || argc >= 4 || SCALE == 0 || edgefactor == 0) { // if (rank == 0) { // fprintf(stdout, "Usage: %s SCALE edgefactor\n SCALE = log_2(# vertices) [integer, required]\n edgefactor = (# edges) / (# vertices) = .5 * (average vertex degree) [integer, defaults to 16]\n(Random number seed and Kronecker initiator are in main.c)\n", argv[0]); // } // MPI_Abort(MPI_COMM_WORLD, 1); // } uint64_t seed1 = 2, seed2 = 3; const char* filename = getenv("TMPFILE"); /* If filename is NULL, store data in memory */ tuple_graph tg; tg.nglobaledges = (int64_t)(edgefactor) << SCALE; int64_t nglobalverts = (int64_t)(1) << SCALE; tg.data_in_file = (filename != NULL); if (tg.data_in_file) { MPI_File_set_errhandler(MPI_FILE_NULL, MPI_ERRORS_ARE_FATAL); MPI_File_open(MPI_COMM_WORLD, (char*)filename, MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_EXCL | MPI_MODE_DELETE_ON_CLOSE | MPI_MODE_UNIQUE_OPEN, MPI_INFO_NULL, &tg.edgefile); MPI_File_set_size(tg.edgefile, tg.nglobaledges * sizeof(packed_edge)); MPI_File_set_view(tg.edgefile, 0, packed_edge_mpi_type, packed_edge_mpi_type, "native", MPI_INFO_NULL); MPI_File_set_atomicity(tg.edgefile, 0); } /* Make the raw graph edges. */ /* Get roots for BFS runs, plus maximum vertex with non-zero degree (used by * validator). */ //int num_bfs_roots = 64; int64_t* bfs_roots = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t)); int64_t max_used_vertex = 0; MPI_Barrier(MPI_COMM_WORLD); double make_graph_start = MPI_Wtime(); { /* Spread the two 64-bit numbers into five nonzero values in the correct * range. */ uint_fast32_t seed[5]; make_mrg_seed(seed1, seed2, seed); /* As the graph is being generated, also keep a bitmap of vertices with * incident edges. We keep a grid of processes, each row of which has a * separate copy of the bitmap (distributed among the processes in the * row), and then do an allreduce at the end. This scheme is used to avoid * non-local communication and reading the file separately just to find BFS * roots. */ MPI_Offset nchunks_in_file = (tg.nglobaledges + FILE_CHUNKSIZE - 1) / FILE_CHUNKSIZE; int64_t bitmap_size_in_bytes = int64_min(BITMAPSIZE, (nglobalverts + CHAR_BIT - 1) / CHAR_BIT); if (bitmap_size_in_bytes * size * CHAR_BIT < nglobalverts) { bitmap_size_in_bytes = (nglobalverts + size * CHAR_BIT - 1) / (size * CHAR_BIT); } int ranks_per_row = ((nglobalverts + CHAR_BIT - 1) / CHAR_BIT + bitmap_size_in_bytes - 1) / bitmap_size_in_bytes; int nrows = size / ranks_per_row; int my_row = -1, my_col = -1; unsigned char* restrict has_edge = NULL; MPI_Comm cart_comm; { int dims[2] = {size / ranks_per_row, ranks_per_row}; int periods[2] = {0, 0}; MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 1, &cart_comm); } int in_generating_rectangle = 0; if (cart_comm != MPI_COMM_NULL) { in_generating_rectangle = 1; { int dims[2], periods[2], coords[2]; MPI_Cart_get(cart_comm, 2, dims, periods, coords); my_row = coords[0]; my_col = coords[1]; } MPI_Comm this_col; MPI_Comm_split(cart_comm, my_col, my_row, &this_col); MPI_Comm_free(&cart_comm); has_edge = (unsigned char*)xMPI_Alloc_mem(bitmap_size_in_bytes); memset(has_edge, 0, bitmap_size_in_bytes); /* Every rank in a given row creates the same vertices (for updating the * bitmap); only one writes them to the file (or final memory buffer). */ packed_edge* buf = (packed_edge*)xmalloc(FILE_CHUNKSIZE * sizeof(packed_edge)); MPI_Offset block_limit = (nchunks_in_file + nrows - 1) / nrows; /* fprintf(stdout, "%d: nchunks_in_file = %" PRId64 ", block_limit = %" PRId64 " in grid of %d rows, %d cols\n", rank, (int64_t)nchunks_in_file, (int64_t)block_limit, nrows, ranks_per_row); */ if (tg.data_in_file) { tg.edgememory_size = 0; tg.edgememory = NULL; } else { int my_pos = my_row + my_col * nrows; int last_pos = (tg.nglobaledges % ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row) != 0) ? (tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row) : -1; int64_t edges_left = tg.nglobaledges % FILE_CHUNKSIZE; int64_t nedges = FILE_CHUNKSIZE * (tg.nglobaledges / ((int64_t)FILE_CHUNKSIZE * nrows * ranks_per_row)) + FILE_CHUNKSIZE * (my_pos < (tg.nglobaledges / FILE_CHUNKSIZE) % (nrows * ranks_per_row)) + (my_pos == last_pos ? edges_left : 0); /* fprintf(stdout, "%d: nedges = %" PRId64 " of %" PRId64 "\n", rank, (int64_t)nedges, (int64_t)tg.nglobaledges); */ tg.edgememory_size = nedges; tg.edgememory = (packed_edge*)xmalloc(nedges * sizeof(packed_edge)); } MPI_Offset block_idx; for (block_idx = 0; block_idx < block_limit; ++block_idx) { /* fprintf(stdout, "%d: On block %d of %d\n", rank, (int)block_idx, (int)block_limit); */ MPI_Offset start_edge_index = int64_min(FILE_CHUNKSIZE * (block_idx * nrows + my_row), tg.nglobaledges); MPI_Offset edge_count = int64_min(tg.nglobaledges - start_edge_index, FILE_CHUNKSIZE); packed_edge* actual_buf = (!tg.data_in_file && block_idx % ranks_per_row == my_col) ? tg.edgememory + FILE_CHUNKSIZE * (block_idx / ranks_per_row) : buf; /* fprintf(stdout, "%d: My range is [%" PRId64 ", %" PRId64 ") %swriting into index %" PRId64 "\n", rank, (int64_t)start_edge_index, (int64_t)(start_edge_index + edge_count), (my_col == (block_idx % ranks_per_row)) ? "" : "not ", (int64_t)(FILE_CHUNKSIZE * (block_idx / ranks_per_row))); */ if (!tg.data_in_file && block_idx % ranks_per_row == my_col) { assert (FILE_CHUNKSIZE * (block_idx / ranks_per_row) + edge_count <= tg.edgememory_size); } generate_kronecker_range(seed, SCALE, start_edge_index, start_edge_index + edge_count, actual_buf); if (tg.data_in_file && my_col == (block_idx % ranks_per_row)) { /* Try to spread writes among ranks */ MPI_File_write_at(tg.edgefile, start_edge_index, actual_buf, edge_count, packed_edge_mpi_type, MPI_STATUS_IGNORE); } ptrdiff_t i; #ifdef _OPENMP #pragma omp parallel for #endif for (i = 0; i < edge_count; ++i) { int64_t src = get_v0_from_edge(&actual_buf[i]); int64_t tgt = get_v1_from_edge(&actual_buf[i]); if (src == tgt) continue; if (src / bitmap_size_in_bytes / CHAR_BIT == my_col) { #ifdef _OPENMP #pragma omp atomic #endif has_edge[(src / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (src % CHAR_BIT)); } if (tgt / bitmap_size_in_bytes / CHAR_BIT == my_col) { #ifdef _OPENMP #pragma omp atomic #endif has_edge[(tgt / CHAR_BIT) % bitmap_size_in_bytes] |= (1 << (tgt % CHAR_BIT)); } } } free(buf); #if 0 /* The allreduce for each root acts like we did this: */ MPI_Allreduce(MPI_IN_PLACE, has_edge, bitmap_size_in_bytes, MPI_UNSIGNED_CHAR, MPI_BOR, this_col); #endif MPI_Comm_free(&this_col); } else { tg.edgememory = NULL; tg.edgememory_size = 0; } MPI_Allreduce(&tg.edgememory_size, &tg.max_edgememory_size, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD); /* Find roots and max used vertex */ { uint64_t counter = 0; int bfs_root_idx; for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) { int64_t root; while (1) { double d[2]; make_random_numbers(2, seed1, seed2, counter, d); root = (int64_t)((d[0] + d[1]) * nglobalverts) % nglobalverts; counter += 2; if (counter > 2 * nglobalverts) break; int is_duplicate = 0; int i; for (i = 0; i < bfs_root_idx; ++i) { if (root == bfs_roots[i]) { is_duplicate = 1; break; } } if (is_duplicate) continue; /* Everyone takes the same path here */ int root_ok = 0; if (in_generating_rectangle && (root / CHAR_BIT / bitmap_size_in_bytes) == my_col) { root_ok = (has_edge[(root / CHAR_BIT) % bitmap_size_in_bytes] & (1 << (root % CHAR_BIT))) != 0; } MPI_Allreduce(MPI_IN_PLACE, &root_ok, 1, MPI_INT, MPI_LOR, MPI_COMM_WORLD); if (root_ok) break; } bfs_roots[bfs_root_idx] = root; if((refBFS_Roots!=NULL) && (rank==0)){ if(refBFS_Roots[bfs_root_idx] != bfs_roots[bfs_root_idx]) fprintf(stdout,"ERROR: BFS roots do not match reffrence (Ref: %lu Here: %lu)\n",refBFS_Roots[bfs_root_idx], bfs_roots[bfs_root_idx]); } } num_bfs_roots = bfs_root_idx; /* Find maximum non-zero-degree vertex. */ { int64_t i; max_used_vertex = 0; if (in_generating_rectangle) { for (i = bitmap_size_in_bytes * CHAR_BIT; i > 0; --i) { if (i > nglobalverts) continue; if (has_edge[(i - 1) / CHAR_BIT] & (1 << ((i - 1) % CHAR_BIT))) { max_used_vertex = (i - 1) + my_col * CHAR_BIT * bitmap_size_in_bytes; break; } } } MPI_Allreduce(MPI_IN_PLACE, &max_used_vertex, 1, MPI_INT64_T, MPI_MAX, MPI_COMM_WORLD); } } if (in_generating_rectangle) { MPI_Free_mem(has_edge); } if (tg.data_in_file) { MPI_File_sync(tg.edgefile); } } MPI_Barrier(MPI_COMM_WORLD); double make_graph_stop = MPI_Wtime(); double make_graph_time = make_graph_stop - make_graph_start; if (rank == 0) { /* Not an official part of the results */ fprintf(stdout, "graph_generation: %f s\n", make_graph_time); } /* Make user's graph data structure. */ MPI_Barrier(MPI_COMM_WORLD); double data_struct_start = MPI_Wtime(); #ifdef DOONENODEOMPPURE if((size==1)&&(bDoOneNodePureOpenMP==1)){ create_graph_from_edgelist (tg.edgememory, tg.nglobaledges); } else make_graph_data_structure(&tg); #else make_graph_data_structure(&tg); #endif MPI_Barrier(MPI_COMM_WORLD); double data_struct_stop = MPI_Wtime(); double data_struct_time = data_struct_stop - data_struct_start; if (rank == 0) { /* Not an official part of the results */ fprintf(stdout, "construction_time: %f s\n", data_struct_time); } /* Number of edges visited in each BFS; a double so get_statistics can be * used directly. */ double* edge_counts = (double*)xmalloc(num_bfs_roots * sizeof(double)); int64_t* edge_counts_ul = (int64_t*)xmalloc(num_bfs_roots * sizeof(int64_t)); /* Run BFS. */ int validation_passed = 1; double* bfs_times = (double*)xmalloc(num_bfs_roots * sizeof(double)); double* validate_times = (double*)xmalloc(num_bfs_roots * sizeof(double)); uint64_t nlocalverts = get_nlocalverts_for_pred(); int64_t* pred = (int64_t*)xMPI_Alloc_mem(nlocalverts * sizeof(int64_t)); #ifdef DOONENODEOMPPURE if((size==1)&&(bDoOneNodePureOpenMP==1)){ nlocalverts = tg.nglobaledges; pred = (int64_t*)xMPI_Alloc_mem(nlocalverts * sizeof(int64_t)); } else{ nlocalverts = get_nlocalverts_for_pred(); pred = (int64_t*)xMPI_Alloc_mem(nlocalverts * sizeof(int64_t)); } #else nlocalverts = get_nlocalverts_for_pred(); pred = (int64_t*)xMPI_Alloc_mem(nlocalverts * sizeof(int64_t)); #endif int bfs_root_idx; int CyclesPassed=0; int ValidationStep=0; if(bRunPerf==0) { ValidationStep=1; numberOfCyclesForPerf=1; } for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) bfs_times[bfs_root_idx]=0.0; double performance_start = MPI_Wtime(); int count=0; while(1){ MPI_Barrier(MPI_COMM_WORLD); if (rank == 0)fprintf(stdout, "Starting cycle %d.\n", CyclesPassed); for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) { int64_t root = bfs_roots[bfs_root_idx]; if ((rank == 0)&&(ValidationStep)) fprintf(stdout, "Running BFS %d\n", bfs_root_idx); /* Clear the pred array. */ memset(pred, 0, nlocalverts * sizeof(int64_t)); /* Do the actual BFS. */ MPI_Barrier(MPI_COMM_WORLD); double bfs_start = MPI_Wtime(); #ifdef DOONENODEOMPPURE int64_t max_bfsvtx; if((size==1)&&(bDoOneNodePureOpenMP==1)){ make_bfs_tree (&pred[0], &max_bfsvtx, root); } else run_bfs(root, &pred[0]); #else run_bfs(root, &pred[0]); #endif MPI_Barrier(MPI_COMM_WORLD); double bfs_stop = MPI_Wtime(); if( (!ValidationStep) || (bUsePerfForTEPS==0)){ bfs_times[bfs_root_idx] += bfs_stop - bfs_start; count+=1; } //&&(ValidationStep) if ((rank == 0)) fprintf(stdout, "Time for BFS %d is %f\n", bfs_root_idx, bfs_stop - bfs_start); /* Validate result. */ //if (!getenv("SKIP_VALIDATION")) { if (ValidationStep) { if (rank == 0) fprintf(stdout, "Validating BFS %d\n", bfs_root_idx); MPI_Barrier(MPI_COMM_WORLD); double validate_start = MPI_Wtime(); int64_t edge_visit_count; int validation_passed_one; #ifdef DOONENODEOMPPURE if((size==1)&&(bDoOneNodePureOpenMP==1)){ int64_t result; result=verify_bfs_tree (&pred[0], max_bfsvtx, root, tg.edgememory, tg.nglobaledges); if (result < 0){ fprintf(stdout, "Validation error: code %" PRId64 ".\n", result); validation_passed_one=0; edge_visit_count=1; } else { edge_visit_count=result; validation_passed_one=1; } } else{ if(bRunVal==1){ //fprintf(stdout, "validate_bfs_result_seq\n"); validation_passed_one = validate_bfs_result_seq(&tg, nglobalverts, nlocalverts, root, pred, &edge_visit_count,max_used_vertex); } else{ //fprintf(stdout, "validate_bfs_result\n"); validation_passed_one = validate_bfs_result(&tg, max_used_vertex + 1, nlocalverts, root, pred, &edge_visit_count); } } #else if(bRunVal==1) validation_passed_one = validate_bfs_result_seq(&tg, nglobalverts, nlocalverts, root, pred, &edge_visit_count,max_used_vertex); else validation_passed_one = validate_bfs_result(&tg, max_used_vertex + 1, nlocalverts, root, pred, &edge_visit_count); #endif //int validation_passed_one = validate_bfs_result(&tg, max_used_vertex + 1, nlocalverts, root, pred, &edge_visit_count); MPI_Barrier(MPI_COMM_WORLD); double validate_stop = MPI_Wtime(); validate_times[bfs_root_idx] = validate_stop - validate_start; if (rank == 0) fprintf(stdout, "Validate time for BFS %d is %f\n", bfs_root_idx, validate_times[bfs_root_idx]); edge_counts[bfs_root_idx] = (double)edge_visit_count; edge_counts_ul[bfs_root_idx] = edge_visit_count; if (rank == 0) fprintf(stdout, "TEPS for BFS %d is %g\n", bfs_root_idx, edge_visit_count / (bfs_stop - bfs_start)); if((refEdgeCounts!=NULL) && (rank==0)){ if(refEdgeCounts[bfs_root_idx]!=edge_counts_ul[bfs_root_idx]) fprintf(stdout,"ERROR: Edge count do not match reference (Ref: %lu Here: %lu)\n",refEdgeCounts[bfs_root_idx], edge_counts_ul[bfs_root_idx]); } if (!validation_passed_one) { validation_passed = 0; if (rank == 0) fprintf(stdout, "Validation failed for this BFS root; skipping rest.\n"); break; } } } CyclesPassed++; if((MPI_Wtime()-performance_start>=timeForPerf)||(CyclesPassed>=numberOfCyclesForPerf)){ if(bRunVal){ if(ValidationStep==0) ValidationStep=1; else break; } else break; } if (validation_passed==0) break; } if (rank == 0) fprintf(stdout,"Completed %d cycles\n", CyclesPassed); for (bfs_root_idx = 0; bfs_root_idx < num_bfs_roots; ++bfs_root_idx) { bfs_times[bfs_root_idx]/=(count/num_bfs_roots); } MPI_Barrier(MPI_COMM_WORLD); /* Print results. */ if (rank == 0) { int i; for (i = 0; i < num_bfs_roots; ++i) fprintf(stdout, "%lu %lu # [%2d] bfs_roots edge_visit_count\n",bfs_roots[i],edge_counts_ul[i],i); if (!validation_passed) { fprintf(stdout, "No results printed for invalid run.\n"); } else { int i; fprintf(stdout, "SCALE: %d\n", SCALE); fprintf(stdout, "edgefactor: %d\n", edgefactor); fprintf(stdout, "NBFS: %d\n", num_bfs_roots); fprintf(stdout, "graph_generation: %g\n", make_graph_time); fprintf(stdout, "num_mpi_processes: %d\n", size); fprintf(stdout, "construction_time: %g\n", data_struct_time); double stats[s_LAST]; get_statistics(bfs_times, num_bfs_roots, stats); fprintf(stdout, "min_time: %g\n", stats[s_minimum]); fprintf(stdout, "firstquartile_time: %g\n", stats[s_firstquartile]); fprintf(stdout, "median_time: %g\n", stats[s_median]); fprintf(stdout, "thirdquartile_time: %g\n", stats[s_thirdquartile]); fprintf(stdout, "max_time: %g\n", stats[s_maximum]); fprintf(stdout, "mean_time: %g\n", stats[s_mean]); fprintf(stdout, "stddev_time: %g\n", stats[s_std]); get_statistics(edge_counts, num_bfs_roots, stats); fprintf(stdout, "min_nedge: %.11g\n", stats[s_minimum]); fprintf(stdout, "firstquartile_nedge: %.11g\n", stats[s_firstquartile]); fprintf(stdout, "median_nedge: %.11g\n", stats[s_median]); fprintf(stdout, "thirdquartile_nedge: %.11g\n", stats[s_thirdquartile]); fprintf(stdout, "max_nedge: %.11g\n", stats[s_maximum]); fprintf(stdout, "mean_nedge: %.11g\n", stats[s_mean]); fprintf(stdout, "stddev_nedge: %.11g\n", stats[s_std]); double* secs_per_edge = (double*)xmalloc(num_bfs_roots * sizeof(double)); for (i = 0; i < num_bfs_roots; ++i) secs_per_edge[i] = bfs_times[i] / edge_counts[i]; get_statistics(secs_per_edge, num_bfs_roots, stats); fprintf(stdout, "min_TEPS: %g\n", 1. / stats[s_maximum]); fprintf(stdout, "firstquartile_TEPS: %g\n", 1. / stats[s_thirdquartile]); fprintf(stdout, "median_TEPS: %g\n", 1. / stats[s_median]); fprintf(stdout, "thirdquartile_TEPS: %g\n", 1. / stats[s_firstquartile]); fprintf(stdout, "max_TEPS: %g\n", 1. / stats[s_minimum]); fprintf(stdout, "harmonic_mean_TEPS: %g\n", 1. / stats[s_mean]); /* Formula from: * Title: The Standard Errors of the Geometric and Harmonic Means and * Their Application to Index Numbers * Author(s): Nilan Norris * Source: The Annals of Mathematical Statistics, Vol. 11, No. 4 (Dec., 1940), pp. 445-448 * Publisher(s): Institute of Mathematical Statistics * Stable URL: http://www.jstor.org/stable/2235723 * (same source as in specification). */ fprintf(stdout, "harmonic_stddev_TEPS: %g\n", stats[s_std] / (stats[s_mean] * stats[s_mean] * sqrt(num_bfs_roots - 1))); free(secs_per_edge); secs_per_edge = NULL; free(edge_counts); edge_counts = NULL; get_statistics(validate_times, num_bfs_roots, stats); fprintf(stdout, "min_validate: %g\n", stats[s_minimum]); fprintf(stdout, "firstquartile_validate: %g\n", stats[s_firstquartile]); fprintf(stdout, "median_validate: %g\n", stats[s_median]); fprintf(stdout, "thirdquartile_validate: %g\n", stats[s_thirdquartile]); fprintf(stdout, "max_validate: %g\n", stats[s_maximum]); fprintf(stdout, "mean_validate: %g\n", stats[s_mean]); fprintf(stdout, "stddev_validate: %g\n", stats[s_std]); #if 0 for (i = 0; i < num_bfs_roots; ++i) { fprintf(stdout, "Run %3d: %g s, validation %g s\n", i + 1, bfs_times[i], validate_times[i]); } #endif } } #ifdef SHOWCPUAFF { int i; char cbuf[256]; MPI_Barrier(MPI_COMM_WORLD); pid_t pid=getpid(); for (i = 0; i < size; i++){ if(i==rank){ fprintf(stdout, "MPI Process %d, memory usage:\n",rank); sprintf(cbuf,"grep -i Vm /proc/%d/status",pid); system(cbuf); fprintf(stdout, "\n"); } MPI_Barrier(MPI_COMM_WORLD); } MPI_Barrier(MPI_COMM_WORLD); } #endif MPI_Free_mem(pred); free(bfs_roots); free_graph_data_structure(); if (tg.data_in_file) { MPI_File_close(&tg.edgefile); } else { free(tg.edgememory); tg.edgememory = NULL; } free(bfs_times); free(validate_times); cleanup_globals(); MPI_Finalize(); return 0; }
functor.h
/*! * Copyright (c) 2019 by Contributors * \file kernel/cpu/functor.h * \brief Functors for template on CPU */ #ifndef DGL_KERNEL_CPU_FUNCTOR_H_ #define DGL_KERNEL_CPU_FUNCTOR_H_ #include <dmlc/omp.h> #include <algorithm> #include "../binary_reduce_common.h" namespace dgl { namespace kernel { // Reducer functor specialization template <typename DType> struct ReduceSum<kDLCPU, DType> { static void Call(DType* addr, DType val) { #pragma omp atomic *addr += val; } static DType BackwardCall(DType val, DType accum) { return 1; } }; template <typename DType> struct ReduceMax<kDLCPU, DType> { static void Call(DType* addr, DType val) { #pragma omp critical *addr = std::max(*addr, val); } static DType BackwardCall(DType val, DType accum) { return static_cast<DType>(val == accum); } }; template <typename DType> struct ReduceMin<kDLCPU, DType> { static void Call(DType* addr, DType val) { #pragma omp critical *addr = std::min(*addr, val); } static DType BackwardCall(DType val, DType accum) { return static_cast<DType>(val == accum); } }; template <typename DType> struct ReduceProd<kDLCPU, DType> { static void Call(DType* addr, DType val) { #pragma omp atomic *addr *= val; } static DType BackwardCall(DType val, DType accum) { return accum / val; } }; template <typename DType> struct ReduceNone<kDLCPU, DType> { static void Call(DType* addr, DType val) { *addr = val; } static DType BackwardCall(DType val, DType accum) { return 1; } }; } // namespace kernel } // namespace dgl #endif // DGL_KERNEL_CPU_FUNCTOR_H_
time_dgeqrf-task.c
/** * * @generated d Tue Jan 7 11:45:25 2014 * **/ #define _TYPE double #define _PREC double #define _LAMCH LAPACKE_dlamch_work #define _NAME "PLASMA_dgeqrf_Tile" /* See Lawn 41 page 120 */ #define _FMULS FMULS_GEQRF( M, N ) #define _FADDS FADDS_GEQRF( M, N ) #include "./timing.inc" static double RunTest(real_Double_t *t_, struct user_parameters* params) { double t; PLASMA_desc *descT; int64_t N = params->matrix_size; int64_t IB = params->iblocksize; int64_t NB = params->blocksize; int check = params->check; double check_res = 0; /* Allocate Data */ PLASMA_desc *descA = NULL; double *ptr = (double*)malloc(N * N * sizeof(double)); PLASMA_Desc_Create(&descA, ptr, PlasmaRealDouble, NB, NB, NB*NB, N, N, 0, 0, N, N); #pragma omp parallel #pragma omp master plasma_pdpltmg_quark(*descA, 5373 ); /* Save A for check */ double *A = NULL; if ( check ) { A = (double*)malloc(N * N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descA, (void*)A, N); } /* Allocate Workspace */ plasma_alloc_ibnb_tile(N, N, PlasmaRealDouble, &descT, IB, NB); /* Do the computations */ START_TIMING(); #pragma omp parallel #pragma omp master plasma_pdgeqrf_quark( *descA, *descT , IB); STOP_TIMING(); /* Check the solution */ if ( check ) { /* Allocate B for check */ PLASMA_desc *descB = NULL; double* ptr = (double*)malloc(N * sizeof(double)); PLASMA_Desc_Create(&descB, ptr, PlasmaRealDouble, NB, NB, NB*NB, N, 1, 0, 0, N, 1); /* Initialize and save B */ plasma_pdpltmg_seq(*descB, 2264 ); double *B = (double*)malloc(N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descB, (void*)B, N); /* Compute the solution */ PLASMA_dgeqrs_Tile( descA, descT, descB , IB); /* Copy solution to X */ double *X = (double*)malloc(N * sizeof(double)); plasma_pdtile_to_lapack_quark(*descB, (void*)X, N); check_res = d_check_solution(N, N, 1, A, N, B, X, N); /* Free checking structures */ PASTE_CODE_FREE_MATRIX( descB ); free( A ); free( B ); free( X ); } /* Free data */ PLASMA_Dealloc_Handle_Tile(&descT); PASTE_CODE_FREE_MATRIX( descA ); return check_res; }
piecewise_linear_model.h
// This file is part of PGM-index <https://github.com/gvinciguerra/PGM-index>. // Copyright (c) 2018 Giorgio Vinciguerra. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #pragma once #include <cmath> #include <limits> #include <vector> #include <stdexcept> #include <type_traits> #ifdef _OPENMP #include <omp.h> #else #warning Compilation with -fopenmp is recommended typedef int omp_int_t; inline omp_int_t omp_get_max_threads() { return 1; } #endif template<typename T> using LargeSigned = typename std::conditional_t<std::is_floating_point_v<T>, long double, std::conditional_t<(sizeof(T) < 8), int64_t, __int128>>; template<typename X, typename Y> class OptimalPiecewiseLinearModel { private: using SX = LargeSigned<X>; using SY = LargeSigned<Y>; struct Slope { SX dx{}; SY dy{}; bool operator<(const Slope &p) const { return dy * p.dx < dx * p.dy; } bool operator>(const Slope &p) const { return dy * p.dx > dx * p.dy; } bool operator==(const Slope &p) const { return dy * p.dx == dx * p.dy; } bool operator!=(const Slope &p) const { return dy * p.dx != dx * p.dy; } explicit operator long double() const { return dy / (long double) dx; } }; struct StoredPoint { X x; Y y; }; struct Point { X x{}; SY y{}; Slope operator-(const Point &p) const { return {SX(x) - p.x, y - p.y}; } }; template<bool Upper> struct Hull : private std::vector<StoredPoint> { const SY epsilon; explicit Hull(SY epsilon) : std::vector<StoredPoint>(), epsilon(Upper ? epsilon : -epsilon) {} Point operator[](size_t i) const { auto &p = std::vector<StoredPoint>::operator[](i); return {p.x, SY(p.y) + epsilon}; } void clear() { std::vector<StoredPoint>::clear(); } void resize(size_t n) { std::vector<StoredPoint>::resize(n); } void reserve(size_t n) { std::vector<StoredPoint>::reserve(n); } size_t size() const { return std::vector<StoredPoint>::size(); } void push(X x, Y y) { std::vector<StoredPoint>::emplace_back(StoredPoint{x, y}); }; }; const Y epsilon; Hull<false> lower; Hull<true> upper; X first_x = 0; X last_x = 0; size_t lower_start = 0; size_t upper_start = 0; size_t points_in_hull = 0; Point rectangle[4]; auto cross(const Point &O, const Point &A, const Point &B) const { auto OA = A - O; auto OB = B - O; return (OA.dx * OB.dy) - (OA.dy * OB.dx); } public: class CanonicalSegment; explicit OptimalPiecewiseLinearModel(Y epsilon) : epsilon(epsilon), lower(epsilon), upper(epsilon) { if (epsilon < 0) throw std::invalid_argument("epsilon cannot be negative"); upper.reserve(1u << 16); lower.reserve(1u << 16); } bool add_point(const X &x, const Y &y) { if (points_in_hull > 0 && x <= last_x) throw std::logic_error("Points must be increasing by x."); last_x = x; Point p1{x, SY(y) + epsilon}; Point p2{x, SY(y) - epsilon}; if (points_in_hull == 0) { first_x = x; rectangle[0] = p1; rectangle[1] = p2; upper.clear(); lower.clear(); upper.push(x, y); lower.push(x, y); upper_start = lower_start = 0; ++points_in_hull; return true; } if (points_in_hull == 1) { rectangle[2] = p2; rectangle[3] = p1; upper.push(x, y); lower.push(x, y); ++points_in_hull; return true; } auto slope1 = rectangle[2] - rectangle[0]; auto slope2 = rectangle[3] - rectangle[1]; bool outside_line1 = p1 - rectangle[2] < slope1; bool outside_line2 = p2 - rectangle[3] > slope2; if (outside_line1 || outside_line2) { points_in_hull = 0; return false; } if (p1 - rectangle[1] < slope2) { // Find extreme slope auto min = lower[lower_start] - p1; auto min_i = lower_start; for (auto i = lower_start + 1; i < lower.size(); i++) { auto val = (lower[i] - p1); if (val > min) break; min = val; min_i = i; } rectangle[1] = lower[min_i]; rectangle[3] = p1; lower_start = min_i; // Hull update auto end = upper.size(); for (; end >= upper_start + 2 && cross(upper[end - 2], upper[end - 1], p1) <= 0; --end) continue; upper.resize(end); upper.push(x, y); } if (p2 - rectangle[0] > slope1) { // Find extreme slope auto max = upper[upper_start] - p2; auto max_i = upper_start; for (auto i = upper_start + 1; i < upper.size(); i++) { auto val = (upper[i] - p2); if (val < max) break; max = val; max_i = i; } rectangle[0] = upper[max_i]; rectangle[2] = p2; upper_start = max_i; // Hull update auto end = lower.size(); for (; end >= lower_start + 2 && cross(lower[end - 2], lower[end - 1], p2) >= 0; --end) continue; lower.resize(end); lower.push(x, y); } ++points_in_hull; return true; } CanonicalSegment get_segment(size_t number) { if (points_in_hull == 1) return CanonicalSegment(rectangle[0], rectangle[1], first_x); return CanonicalSegment(rectangle, first_x, number); } void reset() { points_in_hull = 0; lower.clear(); upper.clear(); } }; template<typename X, typename Y> class OptimalPiecewiseLinearModel<X, Y>::CanonicalSegment { friend class OptimalPiecewiseLinearModel; Point rectangle[4]; X first; size_t number; CanonicalSegment(const Point &p0, const Point &p1, X first) : rectangle{p0, p1, p0, p1}, first(first), number(1) {}; CanonicalSegment(const Point (&rectangle)[4], X first, size_t number) : rectangle{rectangle[0], rectangle[1], rectangle[2], rectangle[3]}, first(first), number(number) {}; bool one_point() const { return rectangle[0].x == rectangle[2].x && rectangle[0].y == rectangle[2].y && rectangle[1].x == rectangle[3].x && rectangle[1].y == rectangle[3].y; } public: CanonicalSegment() = default; X get_first_x() const { return first; } size_t get_number() const{ return number; } std::pair<long double, long double> get_intersection() const { auto &p0 = rectangle[0]; auto &p1 = rectangle[1]; auto &p2 = rectangle[2]; auto &p3 = rectangle[3]; auto slope1 = p2 - p0; auto slope2 = p3 - p1; if (one_point() || slope1 == slope2) return {p0.x, p0.y}; auto p0p1 = p1 - p0; auto a = slope1.dx * slope2.dy - slope1.dy * slope2.dx; auto b = (p0p1.dx * slope2.dy - p0p1.dy * slope2.dx) / static_cast<long double>(a); auto i_x = p0.x + b * slope1.dx; auto i_y = p0.y + b * slope1.dy; return {i_x, i_y}; } std::pair<long double, long double> get_floating_point_segment(const X &origin) const { if (one_point()) return {0, (rectangle[0].y + rectangle[1].y) / 2}; auto[i_x, i_y] = get_intersection(); auto[min_slope, max_slope] = get_slope_range(); auto slope = (min_slope + max_slope) / 2.; auto intercept = i_y - (i_x - origin) * slope; return {slope, intercept}; } std::pair<long double, long double> get_slope_range() const { if (one_point()) return {0, 1}; auto min_slope = static_cast<long double>(rectangle[2] - rectangle[0]); auto max_slope = static_cast<long double>(rectangle[3] - rectangle[1]); return {min_slope, max_slope}; } }; template<typename Fin, typename Fout> size_t make_segmentation(size_t n, size_t epsilon, Fin in, Fout out) { if (n == 0) return 0; using X = typename std::invoke_result_t<Fin, size_t>::first_type; using Y = typename std::invoke_result_t<Fin, size_t>::second_type; size_t c = 0; size_t start = 0; auto p = in(0); OptimalPiecewiseLinearModel<X, Y> opt(epsilon); opt.add_point(p.first, p.second); for (size_t i = 1; i < n; ++i) { auto next_p = in(i); if (i != start && next_p.first == p.first) continue; p = next_p; if (!opt.add_point(p.first, p.second)) { out(opt.get_segment(i-start)); start = i; --i; ++c; } } out(opt.get_segment(n-start)); return ++c; } template<typename Fin, typename Fout> size_t make_segmentation_par(size_t n, size_t epsilon, Fin in, Fout out) { auto parallelism = std::min<size_t>(omp_get_max_threads(), 20); auto chunk_size = n / parallelism; auto c = 0ull; if (parallelism == 1 || n < 1ull << 15) return make_segmentation(n, epsilon, in, out); using X = typename std::invoke_result_t<Fin, size_t>::first_type; using Y = typename std::invoke_result_t<Fin, size_t>::second_type; using canonical_segment = typename OptimalPiecewiseLinearModel<X, Y>::CanonicalSegment; std::vector<std::vector<canonical_segment>> results(parallelism); #pragma omp parallel for reduction(+:c) num_threads(parallelism) for (auto i = 0ull; i < parallelism; ++i) { auto first = i * chunk_size; auto last = i == parallelism - 1 ? n : first + chunk_size; if (first > 0) { for (; first < last; ++first) if (in(first).first != in(first - 1).first) break; if (first == last) continue; } auto in_fun = [in, first](auto j) { return in(first + j); }; auto out_fun = [&results, i](auto cs) { results[i].emplace_back(cs); }; results[i].reserve(chunk_size / (epsilon > 0 ? epsilon * epsilon : 16)); c += make_segmentation(last - first, epsilon, in_fun, out_fun); } for (auto &v : results) for (auto &cs : v) out(cs); return c; } template<typename RandomIt> auto make_segmentation(RandomIt first, RandomIt last, size_t epsilon) { using key_type = typename RandomIt::value_type; using canonical_segment = typename OptimalPiecewiseLinearModel<key_type, size_t>::CanonicalSegment; using pair_type = typename std::pair<key_type, size_t>; size_t n = std::distance(first, last); std::vector<canonical_segment> out; out.reserve(epsilon > 0 ? n / (epsilon * epsilon) : n / 16); auto in_fun = [first](auto i) { return pair_type(first[i], i); }; auto out_fun = [&out](auto cs) { out.push_back(cs); }; make_segmentation(n, epsilon, in_fun, out_fun); return out; }
schur_eliminator_impl.h
// Ceres Solver - A fast non-linear least squares minimizer // Copyright 2010, 2011, 2012 Google Inc. All rights reserved. // http://code.google.com/p/ceres-solver/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Google Inc. nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // Author: [email protected] (Sameer Agarwal) // // TODO(sameeragarwal): row_block_counter can perhaps be replaced by // Chunk::start ? #ifndef CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_ #define CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_ #ifdef CERES_USE_OPENMP #include <omp.h> #endif // Eigen has an internal threshold switching between different matrix // multiplication algorithms. In particular for matrices larger than // EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD it uses a cache friendly // matrix matrix product algorithm that has a higher setup cost. For // matrix sizes close to this threshold, especially when the matrices // are thin and long, the default choice may not be optimal. This is // the case for us, as the default choice causes a 30% performance // regression when we moved from Eigen2 to Eigen3. #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 10 #include <algorithm> #include <map> #include <glog/logging.h> #include "Eigen/Dense" #include "ceres/block_random_access_matrix.h" #include "ceres/block_sparse_matrix.h" #include "ceres/block_structure.h" #include "ceres/map_util.h" #include "ceres/schur_eliminator.h" #include "ceres/stl_util.h" #include "ceres/internal/eigen.h" #include "ceres/internal/scoped_ptr.h" namespace ceres { namespace internal { template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>::~SchurEliminator() { STLDeleteElements(&rhs_locks_); } template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: Init(int num_eliminate_blocks, const CompressedRowBlockStructure* bs) { CHECK_GT(num_eliminate_blocks, 0) << "SchurComplementSolver cannot be initialized with " << "num_eliminate_blocks = 0."; num_eliminate_blocks_ = num_eliminate_blocks; const int num_col_blocks = bs->cols.size(); const int num_row_blocks = bs->rows.size(); buffer_size_ = 1; chunks_.clear(); lhs_row_layout_.clear(); int lhs_num_rows = 0; // Add a map object for each block in the reduced linear system // and build the row/column block structure of the reduced linear // system. lhs_row_layout_.resize(num_col_blocks - num_eliminate_blocks_); for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) { lhs_row_layout_[i - num_eliminate_blocks_] = lhs_num_rows; lhs_num_rows += bs->cols[i].size; } int r = 0; // Iterate over the row blocks of A, and detect the chunks. The // matrix should already have been ordered so that all rows // containing the same y block are vertically contiguous. Along // the way also compute the amount of space each chunk will need // to perform the elimination. while (r < num_row_blocks) { const int chunk_block_id = bs->rows[r].cells.front().block_id; if (chunk_block_id >= num_eliminate_blocks_) { break; } chunks_.push_back(Chunk()); Chunk& chunk = chunks_.back(); chunk.size = 0; chunk.start = r; int buffer_size = 0; const int e_block_size = bs->cols[chunk_block_id].size; // Add to the chunk until the first block in the row is // different than the one in the first row for the chunk. while (r + chunk.size < num_row_blocks) { const CompressedRow& row = bs->rows[r + chunk.size]; if (row.cells.front().block_id != chunk_block_id) { break; } // Iterate over the blocks in the row, ignoring the first // block since it is the one to be eliminated. for (int c = 1; c < row.cells.size(); ++c) { const Cell& cell = row.cells[c]; if (InsertIfNotPresent( &(chunk.buffer_layout), cell.block_id, buffer_size)) { buffer_size += e_block_size * bs->cols[cell.block_id].size; } } buffer_size_ = max(buffer_size, buffer_size_); ++chunk.size; } CHECK_GT(chunk.size, 0); r += chunk.size; } const Chunk& chunk = chunks_.back(); uneliminated_row_begins_ = chunk.start + chunk.size; if (num_threads_ > 1) { random_shuffle(chunks_.begin(), chunks_.end()); } buffer_.reset(new double[buffer_size_ * num_threads_]); STLDeleteElements(&rhs_locks_); rhs_locks_.resize(num_col_blocks - num_eliminate_blocks_); for (int i = 0; i < num_col_blocks - num_eliminate_blocks_; ++i) { rhs_locks_[i] = new Mutex; } VLOG(1) << "Eliminator threads: " << num_threads_; } template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: Eliminate(const BlockSparseMatrixBase* A, const double* b, const double* D, BlockRandomAccessMatrix* lhs, double* rhs) { if (lhs->num_rows() > 0) { lhs->SetZero(); VectorRef(rhs, lhs->num_rows()).setZero(); } const CompressedRowBlockStructure* bs = A->block_structure(); const int num_col_blocks = bs->cols.size(); // Add the diagonal to the schur complement. if (D != NULL) { #pragma omp parallel for num_threads(num_threads_) schedule(dynamic) for (int i = num_eliminate_blocks_; i < num_col_blocks; ++i) { const int block_id = i - num_eliminate_blocks_; int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block_id, block_id, &r, &c, &row_stride, &col_stride); if (cell_info != NULL) { const int block_size = bs->cols[i].size; typename EigenTypes<kFBlockSize>::ConstVectorRef diag(D + bs->cols[i].position, block_size); MutexLock l(&cell_info->m); MatrixRef m(cell_info->values, row_stride, col_stride); m.block(r, c, block_size, block_size).diagonal() += diag.array().square().matrix(); } } } // Eliminate y blocks one chunk at a time. For each chunk,x3 // compute the entries of the normal equations and the gradient // vector block corresponding to the y block and then apply // Gaussian elimination to them. The matrix ete stores the normal // matrix corresponding to the block being eliminated and array // buffer_ contains the non-zero blocks in the row corresponding // to this y block in the normal equations. This computation is // done in ChunkDiagonalBlockAndGradient. UpdateRhs then applies // gaussian elimination to the rhs of the normal equations, // updating the rhs of the reduced linear system by modifying rhs // blocks for all the z blocks that share a row block/residual // term with the y block. EliminateRowOuterProduct does the // corresponding operation for the lhs of the reduced linear // system. #pragma omp parallel for num_threads(num_threads_) schedule(dynamic) for (int i = 0; i < chunks_.size(); ++i) { #ifdef CERES_USE_OPENMP int thread_id = omp_get_thread_num(); #else int thread_id = 0; #endif double* buffer = buffer_.get() + thread_id * buffer_size_; const Chunk& chunk = chunks_[i]; const int e_block_id = bs->rows[chunk.start].cells.front().block_id; const int e_block_size = bs->cols[e_block_id].size; VectorRef(buffer, buffer_size_).setZero(); typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix ete(e_block_size, e_block_size); if (D != NULL) { const typename EigenTypes<kEBlockSize>::ConstVectorRef diag(D + bs->cols[e_block_id].position, e_block_size); ete = diag.array().square().matrix().asDiagonal(); } else { ete.setZero(); } typename EigenTypes<kEBlockSize>::Vector g(e_block_size); g.setZero(); // We are going to be computing // // S += F'F - F'E(E'E)^{-1}E'F // // for each Chunk. The computation is broken down into a number of // function calls as below. // Compute the outer product of the e_blocks with themselves (ete // = E'E). Compute the product of the e_blocks with the // corresonding f_blocks (buffer = E'F), the gradient of the terms // in this chunk (g) and add the outer product of the f_blocks to // Schur complement (S += F'F). ChunkDiagonalBlockAndGradient( chunk, A, b, chunk.start, &ete, &g, buffer, lhs); // Normally one wouldn't compute the inverse explicitly, but // e_block_size will typically be a small number like 3, in // which case its much faster to compute the inverse once and // use it to multiply other matrices/vectors instead of doing a // Solve call over and over again. typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix inverse_ete = ete .template selfadjointView<Eigen::Upper>() .ldlt() .solve(Matrix::Identity(e_block_size, e_block_size)); // For the current chunk compute and update the rhs of the reduced // linear system. // // rhs = F'b - F'E(E'E)^(-1) E'b UpdateRhs(chunk, A, b, chunk.start, inverse_ete * g, rhs); // S -= F'E(E'E)^{-1}E'F ChunkOuterProduct(bs, inverse_ete, buffer, chunk.buffer_layout, lhs); } // For rows with no e_blocks, the schur complement update reduces to // S += F'F. NoEBlockRowsUpdate(A, b, uneliminated_row_begins_, lhs, rhs); } template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: BackSubstitute(const BlockSparseMatrixBase* A, const double* b, const double* D, const double* z, double* y) { const CompressedRowBlockStructure* bs = A->block_structure(); #pragma omp parallel for num_threads(num_threads_) schedule(dynamic) for (int i = 0; i < chunks_.size(); ++i) { const Chunk& chunk = chunks_[i]; const int e_block_id = bs->rows[chunk.start].cells.front().block_id; const int e_block_size = bs->cols[e_block_id].size; typename EigenTypes<kEBlockSize>::VectorRef y_block( y + bs->cols[e_block_id].position, e_block_size); typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix ete(e_block_size, e_block_size); if (D != NULL) { const typename EigenTypes<kEBlockSize>::ConstVectorRef diag(D + bs->cols[e_block_id].position, e_block_size); ete = diag.array().square().matrix().asDiagonal(); } else { ete.setZero(); } for (int j = 0; j < chunk.size; ++j) { const CompressedRow& row = bs->rows[chunk.start + j]; const double* row_values = A->RowBlockValues(chunk.start + j); const Cell& e_cell = row.cells.front(); DCHECK_EQ(e_block_id, e_cell.block_id); const typename EigenTypes<kRowBlockSize, kEBlockSize>::ConstMatrixRef e_block(row_values + e_cell.position, row.block.size, e_block_size); typename EigenTypes<kRowBlockSize>::Vector sj = typename EigenTypes<kRowBlockSize>::ConstVectorRef (b + bs->rows[chunk.start + j].block.position, row.block.size); for (int c = 1; c < row.cells.size(); ++c) { const int f_block_id = row.cells[c].block_id; const int f_block_size = bs->cols[f_block_id].size; const typename EigenTypes<kRowBlockSize, kFBlockSize>::ConstMatrixRef f_block(row_values + row.cells[c].position, row.block.size, f_block_size); const int r_block = f_block_id - num_eliminate_blocks_; sj -= f_block * typename EigenTypes<kFBlockSize>::ConstVectorRef (z + lhs_row_layout_[r_block], f_block_size); } y_block += e_block.transpose() * sj; ete.template selfadjointView<Eigen::Upper>() .rankUpdate(e_block.transpose(), 1.0); } y_block = ete .template selfadjointView<Eigen::Upper>() .ldlt() .solve(y_block); } } // Update the rhs of the reduced linear system. Compute // // F'b - F'E(E'E)^(-1) E'b template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: UpdateRhs(const Chunk& chunk, const BlockSparseMatrixBase* A, const double* b, int row_block_counter, const Vector& inverse_ete_g, double* rhs) { const CompressedRowBlockStructure* bs = A->block_structure(); const int e_block_size = inverse_ete_g.rows(); int b_pos = bs->rows[row_block_counter].block.position; for (int j = 0; j < chunk.size; ++j) { const CompressedRow& row = bs->rows[row_block_counter + j]; const double *row_values = A->RowBlockValues(row_block_counter + j); const Cell& e_cell = row.cells.front(); const typename EigenTypes<kRowBlockSize, kEBlockSize>::ConstMatrixRef e_block(row_values + e_cell.position, row.block.size, e_block_size); const typename EigenTypes<kRowBlockSize>::Vector sj = typename EigenTypes<kRowBlockSize>::ConstVectorRef (b + b_pos, row.block.size) - e_block * (inverse_ete_g); for (int c = 1; c < row.cells.size(); ++c) { const int block_id = row.cells[c].block_id; const int block_size = bs->cols[block_id].size; const typename EigenTypes<kRowBlockSize, kFBlockSize>::ConstMatrixRef b(row_values + row.cells[c].position, row.block.size, block_size); const int block = block_id - num_eliminate_blocks_; MutexLock l(rhs_locks_[block]); typename EigenTypes<kFBlockSize>::VectorRef (rhs + lhs_row_layout_[block], block_size).noalias() += b.transpose() * sj; } b_pos += row.block.size; } } // Given a Chunk - set of rows with the same e_block, e.g. in the // following Chunk with two rows. // // E F // [ y11 0 0 0 | z11 0 0 0 z51] // [ y12 0 0 0 | z12 z22 0 0 0] // // this function computes twp matrices. The diagonal block matrix // // ete = y11 * y11' + y12 * y12' // // and the off diagonal blocks in the Guass Newton Hessian. // // buffer = [y11'(z11 + z12), y12' * z22, y11' * z51] // // which are zero compressed versions of the block sparse matrices E'E // and E'F. // // and the gradient of the e_block, E'b. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: ChunkDiagonalBlockAndGradient( const Chunk& chunk, const BlockSparseMatrixBase* A, const double* b, int row_block_counter, typename EigenTypes<kEBlockSize, kEBlockSize>::Matrix* ete, typename EigenTypes<kEBlockSize>::Vector* g, double* buffer, BlockRandomAccessMatrix* lhs) { const CompressedRowBlockStructure* bs = A->block_structure(); int b_pos = bs->rows[row_block_counter].block.position; const int e_block_size = ete->rows(); // Iterate over the rows in this chunk, for each row, compute the // contribution of its F blocks to the Schur complement, the // contribution of its E block to the matrix EE' (ete), and the // corresponding block in the gradient vector. for (int j = 0; j < chunk.size; ++j) { const CompressedRow& row = bs->rows[row_block_counter + j]; const double *row_values = A->RowBlockValues(row_block_counter + j); if (row.cells.size() > 1) { EBlockRowOuterProduct(A, row_block_counter + j, lhs); } // Extract the e_block, ETE += E_i' E_i const Cell& e_cell = row.cells.front(); const typename EigenTypes<kRowBlockSize, kEBlockSize>::ConstMatrixRef e_block(row_values + e_cell.position, row.block.size, e_block_size); ete->template selfadjointView<Eigen::Upper>() .rankUpdate(e_block.transpose(), 1.0); // g += E_i' b_i g->noalias() += e_block.transpose() * typename EigenTypes<kRowBlockSize>::ConstVectorRef (b + b_pos, row.block.size); // buffer = E'F. This computation is done by iterating over the // f_blocks for each row in the chunk. for (int c = 1; c < row.cells.size(); ++c) { const int f_block_id = row.cells[c].block_id; const int f_block_size = bs->cols[f_block_id].size; const typename EigenTypes<kRowBlockSize, kFBlockSize>::ConstMatrixRef f_block(row_values + row.cells[c].position, row.block.size, f_block_size); double* buffer_ptr = buffer + FindOrDie(chunk.buffer_layout, f_block_id); typename EigenTypes<kEBlockSize, kFBlockSize>::MatrixRef (buffer_ptr, e_block_size, f_block_size).noalias() += e_block.transpose() * f_block; } b_pos += row.block.size; } } // Compute the outer product F'E(E'E)^{-1}E'F and subtract it from the // Schur complement matrix, i.e // // S -= F'E(E'E)^{-1}E'F. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: ChunkOuterProduct(const CompressedRowBlockStructure* bs, const Matrix& inverse_ete, const double* buffer, const BufferLayoutType& buffer_layout, BlockRandomAccessMatrix* lhs) { // This is the most computationally expensive part of this // code. Profiling experiments reveal that the bottleneck is not the // computation of the right-hand matrix product, but memory // references to the left hand side. const int e_block_size = inverse_ete.rows(); BufferLayoutType::const_iterator it1 = buffer_layout.begin(); // S(i,j) -= bi' * ete^{-1} b_j for (; it1 != buffer_layout.end(); ++it1) { const int block1 = it1->first - num_eliminate_blocks_; const int block1_size = bs->cols[it1->first].size; const typename EigenTypes<kEBlockSize, kFBlockSize>::ConstMatrixRef b1(buffer + it1->second, e_block_size, block1_size); const typename EigenTypes<kFBlockSize, kEBlockSize>::Matrix b1_transpose_inverse_ete = b1.transpose() * inverse_ete; BufferLayoutType::const_iterator it2 = it1; for (; it2 != buffer_layout.end(); ++it2) { const int block2 = it2->first - num_eliminate_blocks_; int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride); if (cell_info == NULL) { continue; } const int block2_size = bs->cols[it2->first].size; const typename EigenTypes<kEBlockSize, kFBlockSize>::ConstMatrixRef b2(buffer + it2->second, e_block_size, block2_size); MutexLock l(&cell_info->m); MatrixRef m(cell_info->values, row_stride, col_stride); // We explicitly construct a block object here instead of using // m.block(), as m.block() variant of the constructor does not // allow mixing of template sizing and runtime sizing parameters // like the Matrix class does. Eigen::Block<MatrixRef, kFBlockSize, kFBlockSize> block(m, r, c, block1_size, block2_size); block.noalias() -= b1_transpose_inverse_ete * b2; } } } // For rows with no e_blocks, the schur complement update reduces to S // += F'F. This function iterates over the rows of A with no e_block, // and calls NoEBlockRowOuterProduct on each row. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: NoEBlockRowsUpdate(const BlockSparseMatrixBase* A, const double* b, int row_block_counter, BlockRandomAccessMatrix* lhs, double* rhs) { const CompressedRowBlockStructure* bs = A->block_structure(); for (; row_block_counter < bs->rows.size(); ++row_block_counter) { const CompressedRow& row = bs->rows[row_block_counter]; const double *row_values = A->RowBlockValues(row_block_counter); for (int c = 0; c < row.cells.size(); ++c) { const int block_id = row.cells[c].block_id; const int block_size = bs->cols[block_id].size; const int block = block_id - num_eliminate_blocks_; VectorRef(rhs + lhs_row_layout_[block], block_size).noalias() += (ConstMatrixRef(row_values + row.cells[c].position, row.block.size, block_size).transpose() * ConstVectorRef(b + row.block.position, row.block.size)); } NoEBlockRowOuterProduct(A, row_block_counter, lhs); } } // A row r of A, which has no e_blocks gets added to the Schur // Complement as S += r r'. This function is responsible for computing // the contribution of a single row r to the Schur complement. It is // very similar in structure to EBlockRowOuterProduct except for // one difference. It does not use any of the template // parameters. This is because the algorithm used for detecting the // static structure of the matrix A only pays attention to rows with // e_blocks. This is becase rows without e_blocks are rare and // typically arise from regularization terms in the original // optimization problem, and have a very different structure than the // rows with e_blocks. Including them in the static structure // detection will lead to most template parameters being set to // dynamic. Since the number of rows without e_blocks is small, the // lack of templating is not an issue. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: NoEBlockRowOuterProduct(const BlockSparseMatrixBase* A, int row_block_index, BlockRandomAccessMatrix* lhs) { const CompressedRowBlockStructure* bs = A->block_structure(); const CompressedRow& row = bs->rows[row_block_index]; const double *row_values = A->RowBlockValues(row_block_index); for (int i = 0; i < row.cells.size(); ++i) { const int block1 = row.cells[i].block_id - num_eliminate_blocks_; DCHECK_GE(block1, 0); const int block1_size = bs->cols[row.cells[i].block_id].size; const ConstMatrixRef b1(row_values + row.cells[i].position, row.block.size, block1_size); int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block1, block1, &r, &c, &row_stride, &col_stride); if (cell_info != NULL) { MutexLock l(&cell_info->m); MatrixRef m(cell_info->values, row_stride, col_stride); m.block(r, c, block1_size, block1_size) .selfadjointView<Eigen::Upper>() .rankUpdate(b1.transpose(), 1.0); } for (int j = i + 1; j < row.cells.size(); ++j) { const int block2 = row.cells[j].block_id - num_eliminate_blocks_; DCHECK_GE(block2, 0); DCHECK_LT(block1, block2); int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride); if (cell_info == NULL) { continue; } const int block2_size = bs->cols[row.cells[j].block_id].size; MutexLock l(&cell_info->m); MatrixRef m(cell_info->values, row_stride, col_stride); m.block(r, c, block1_size, block2_size).noalias() += b1.transpose() * ConstMatrixRef(row_values + row.cells[j].position, row.block.size, block2_size); } } } // For a row with an e_block, compute the contribition S += F'F. This // function has the same structure as NoEBlockRowOuterProduct, except // that this function uses the template parameters. template <int kRowBlockSize, int kEBlockSize, int kFBlockSize> void SchurEliminator<kRowBlockSize, kEBlockSize, kFBlockSize>:: EBlockRowOuterProduct(const BlockSparseMatrixBase* A, int row_block_index, BlockRandomAccessMatrix* lhs) { const CompressedRowBlockStructure* bs = A->block_structure(); const CompressedRow& row = bs->rows[row_block_index]; const double *row_values = A->RowBlockValues(row_block_index); for (int i = 1; i < row.cells.size(); ++i) { const int block1 = row.cells[i].block_id - num_eliminate_blocks_; DCHECK_GE(block1, 0); const int block1_size = bs->cols[row.cells[i].block_id].size; const typename EigenTypes<kRowBlockSize, kFBlockSize>::ConstMatrixRef b1(row_values + row.cells[i].position, row.block.size, block1_size); { int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block1, block1, &r, &c, &row_stride, &col_stride); if (cell_info == NULL) { continue; } MutexLock l(&cell_info->m); MatrixRef m(cell_info->values, row_stride, col_stride); Eigen::Block<MatrixRef, kFBlockSize, kFBlockSize> block(m, r, c, block1_size, block1_size); block.template selfadjointView<Eigen::Upper>() .rankUpdate(b1.transpose(), 1.0); } for (int j = i + 1; j < row.cells.size(); ++j) { const int block2 = row.cells[j].block_id - num_eliminate_blocks_; DCHECK_GE(block2, 0); DCHECK_LT(block1, block2); const int block2_size = bs->cols[row.cells[j].block_id].size; int r, c, row_stride, col_stride; CellInfo* cell_info = lhs->GetCell(block1, block2, &r, &c, &row_stride, &col_stride); if (cell_info == NULL) { continue; } const typename EigenTypes<kRowBlockSize, kFBlockSize>::ConstMatrixRef b2(row_values + row.cells[j].position, row.block.size, block2_size); MutexLock l(&cell_info->m); MatrixRef m(cell_info->values, row_stride, col_stride); Eigen::Block<MatrixRef, kFBlockSize, kFBlockSize> block(m, r, c, block1_size, block2_size); block.noalias() += b1.transpose() * b2; } } } } // namespace internal } // namespace ceres #endif // CERES_INTERNAL_SCHUR_ELIMINATOR_IMPL_H_
opencl_electrum_modern_fmt_plug.c
/* * This software is Copyright (c) 2017 Dhiru Kholia and it is hereby released * to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. * * Based on opencl_pbkdf2_hmac_sha512_fmt_plug.c file. */ #include "arch.h" #if !AC_BUILT #define HAVE_LIBZ 1 #endif #if HAVE_LIBZ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_electrum_modern; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_electrum_modern); #else #include <stdint.h> #include <string.h> #include <zlib.h> #include <openssl/bn.h> #ifdef _OPENMP #include <omp.h> #endif #include "misc.h" #include "arch.h" #include "common.h" #include "formats.h" #include "options.h" #include "common-opencl.h" #include "johnswap.h" #include "secp256k1.h" #include "aes.h" #include "sha2.h" #include "hmac_sha.h" #include "pbkdf2_hmac_common.h" #undef FORMAT_NAME #define FORMAT_NAME "Electrum Wallet 2.8+" #define FORMAT_LABEL "electrum-modern-opencl" #define FORMAT_TAG "$electrum$" #define TAG_LENGTH (sizeof(FORMAT_TAG) - 1) #define ALGORITHM_NAME "PBKDF2-SHA512 OpenCL" #define BINARY_SIZE 16 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define PLAINTEXT_LENGTH 110 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define KERNEL_NAME "pbkdf2_sha512_kernel" #define SPLIT_KERNEL_NAME "pbkdf2_sha512_loop" #define CONFIG_NAME "pbkdf2_sha512" #define HASH_LOOPS 250 #define ITERATIONS 10000 static struct fmt_tests electrum_tests[] = { // Electrum 2.8.0+ encrypted wallets {"$electrum$4*03c2a94eb01e9453c24c9bf49102356788673cc26fbe27b9bf54b0f150758c7864*4249453103c2a94eb01e9453c24c9bf49102356788673cc26fbe27b9bf54b0f150758c7864355ed45b963901b56cd6c483468247c7c8c76ba11c9cb94633575838cffb8f0cebfc9af91ba402c06cca5c08238c643a0291e66e1a849eb66a9eda17e1496d09f46bfe6f63bfdcd591c260f31b92bd5958ce85c7719983a7395c88570946a59d5dcc2188680aba439cde0dbdfeaba985fe3d1a97d25b81573a92f72aea8c60fa3a4228acb789d7f307f6a19d1025fa6ac81d91d45ef07c0b26d9f85fc6ba07246b8b19d641929aac16ff1c942a3d69b824e3e39a122402aed63d3d12ca299416500459e7353bd56db92102c93f045ccc719cee90d2f891ff6b128886ec90768364bcc89c3393f21a5b57915f4eaf4e3b9c7a3958124b43956a47572ae38df2a11b84f6dc25ddc3d3b1968e3adadc756507118301e8cc490d249dc603f4f46c3bf0b214fd3bfb8dab6f048ba7d60dbee031d386a5aeec6664d2891abbeb0201b437d6e37c140be3e6210078e76afafbd78a8acaf45f21cf83c69218f9bfd3abb0211d57ab1874e9d645171cdaad4887a9fea86003b9948d22d9e7bfaec4c4bd0786cd4d191c82c61e83c61bae06a7c9936af46f8fa121ab696aba24ad8fd8f69537aa713bf271e4be567e7e3ccd141511c96ce634175f845ff680f71bbd595ef5d45d9cfd9a7e099fbab7964add7a76c4820b20952121e5621cb53c9476dc23860a5bc4ba3ecf636dc224503202dc11bf3bc88c70dcc2005684f7d3ebe6a7ea1487423a5145442f8f3d806d5d219560b4bce272ef9d6e32849b692cd91d4c60462b0f813603a52dc84b959051e787d890661e9f439a11fa8819c4fb947ff8dd0a5b7e5e63605f4e9f6eac6f8b2bfd7a9098dd2201c2f4cdaa2d7d0691ccf42b2761a8bb2a08c755077a753a41bcf305c83da8cd9ebaeee0360afb4be00827e167b2c1a3d5975d3a4a1e3b3b56794a155253437710ee3c0d0a2de0c4d631b48808fa946146f09e8ea9888d6c6bad104ebed814e79bdc26be38e8580d8fff6324405c128627079d1e3bafc2479274a3bc4f8196e923c835204e91ce8a9cb235c5349056415ad58a83b41254eda57839cd2e0bb66f125e32c76671f6447b2b0321d021c60706ff6f103ce483986fe0f1cc62307f6a1e89c4b2f334fc6f1f2597f5d68b3948c7655025a04ea858bc33eb341de09bdb4862701abcbc4c907270856de6072ee8d0c9e46e19c50eac454d4ca5fcd1a35f5d239aadc82543deafcd17f0eae2145561b8834dd80d337c574d3e931365db294d66aa4b47669f92784325b85abae49a8447a2afeb4cac460cba2a9d7b298bd3f69ac31862b92a970ed8d3241227858b0c40b2f6793cdd733020987beb7e6f01826fa2dae2b345f4e8e96da885a00901b20308f37c8613cf28ef997a6f25c741af917a547b38cff7577d2cac2654d5cdac2d0f1135ac6db3d70174b03c4149d134325f1b805ef11cd62531c13436ad1c7cb73f488dc411d349be34523d477953e8b47848e31ec85230a99ecd88c9cbc5d33de132aacd04877123cff599bea3b2e7b931347673cca605b3bc129496d5e80b06ae0eb3fce5c24ea0f8d2ecd4cfb9ed5034b26ed18b564731c78f5344ec863bd78797ad7de722c7a88e047af0364f69a303dc5f716ebda1de9ca21cb49e4091cb975c17f098932e884f36bded1fab34814931b0aeb72b1bc90747f7f5ebe73c547681f7a8d6d74e7acde2ba6e5e998bd6b035ade5fa64171dde4a82ed5ed7f273220d47bbd5a1c2ed4359d02392b746ba653d1c30f63bce161d0555ebc4775262036be51d4a50113bbac6823fd6a0d387a32673dc454c4d9d018cc25885a0d15d3f7488bbe18398d758cbbf1a24eaf71bd1560ff216e342e09efdbfae2872cfdf59ed802420ba8522edfd74f6d728ffa1683e586b53cbec80f00be6478a44d8df1c69a5cdbb50aa75da2f2dd0a679b037b4173f20b9514064d15ff50f1e9beb0112a41cdc0ecf7fb3028fe6f4c7339bb79d50cb7d43cabd8ae198741677d41e411c811c6267e9b4e41d944b035e47406d5120f1ee192db810cf6774*40c7a179573d57c54d0da0a1c4d71e306e1eea823f637f29c3e43b9792469d15", "openwall123"}, {"$electrum$4*0328e536dd1fbbb85d78de1a8c21215d4646cd87d6b6545afcfb203e5bb32e0de4*424945310328e536dd1fbbb85d78de1a8c21215d4646cd87d6b6545afcfb203e5bb32e0de461b1e287a5acff4b40e4abd73ff62dc233c1c7a6a54b3270949281b9d44bc6e746743733360500718826e50bb28ea99a6378dc0b0c578e9d0bf09c667671c82a1bd71c8121edbb4c9cbca93ab0e17e218558ead81755e62b0d4ad547aa1b3beb0b9ee43b11270261c9b38502f00e7f6f096811b7fdae6f3dce85c278d3751fec044027054218ccf20d404bab24380b303f094704e626348a218f44ab88ce2ac5fa7d450069fca3bb53f9359dbbaad0ea1b3859129b19c93ed7888130f8a534f84a629c67edc150a1c5882a83cb0add4615bb569e8dc471de4d38fc8b1e0b9b28040b5ea86093fcdeceaedb6b8f073f6f0ee5541f473a4b1c2bfae4fc91e4bbb40fa2185ecfa4c72010bcf8df05b1a7db45f64307dbc439f8389f0e368e38960b6d61ac88c07ce95a4b03d6d8b13f4c7dc7d7c447097865235ab621aeef38dc4172bf2dc52e701132480127be375fe98834f16d9895dce7f6cdfe900a2ce57eaa6c3036c1b9a661c3c9adbf84f4adfe6d4d9fa9f829f2957cfb353917dc77fd8dd4872b7d90cb71b7d3a29c9bfe3440e02449220acba410fa0af030f51aa2438f7478dbb277d62613112e4eebc66d5d7bdba793fb2073d449954f563284819189ffb5dbcdeb6c95c64bc24e0ef986bce07bafe96ab449ae2b6edaf4f98ffbd392a57bd93c2359444ec4046ae65b440adb96b6e4eef9d06bb04d2f3fa2e4175165bcadbf7e13cc3b6e65e67df901f96a2f154bc763b56b3736a335e1d1bc16e99736f757a4ae56c099645c917360b1ecf8dcefc7281541c6ff65d87cadab4a48f1f6b7b73a3e5a67e2e032abb56b499e73a9f3b69ce065e43b0174639785ae30635d105ebcc827dcf9b19bdd1a92879a5d4bc4e12b5630c188b1b96e3c586e19901b8f96084bcd59b2f4b201a3a8b6e633a5c194901d4609add9671b0bcc12b2b94ae873d201258b36315484e4b9c5f5d6289656baa93eec9e92aec88e2d73d86b9e3d1f24294e3d8ebe9a9f2f6edfbf28f530670c5b086fc4f74df89b4e4cbe06ee7e45cbd238b599d19c2d5da5523b12b1e7050ea0a9b47a5d22c6c3fc476f814f9705dc7ed3aeb1b44fc6b4d69f02a74963dce5057c3c049f92e595a4da5035cffc303a4cb162803aa3f816527a7e466b8424789a0d77e26819615662420c370457e29fcc1938fd754f3acfd21416ce3ab27e9febbc0e24fc7055eddc31e48faa014f9f3695c2e956f0e6c94c507a8d2f8c3aeb4b98b69b6340b6a3acb1acdde9581279f78ee10687616360c018e9f67d6c8bb5950e8fdabd3d0d5808824975aa4a50f88581472212f24ad58a700fe4787642b973924575fe71d1ecd7b2b6acd363f48c40bdd55f35f60a06dee544c266e608fd5a6d263f745e8b11d1160638eb301adfd1a88eddf6d0ccb9e1021e0bde9cf5163583a202b3dc95c255c8cc245a425391163b387c5312d07637272621b94bfde151238c5f55371774ca07603fe3e0a43e92b5cf46096c4c8014e03e730555b61bb544a3998ccd8e45e0f9427c66ce1da1e8cc86d5414fe0d0d49d0a048fb55b76eb3a0a0ba2f1a94227eb6b7b58ff3d410bcd782970689dd026350cbde243de749c27f4647ede96996767354aaf14e336bec47b7498774a519d999f15d424ab34c05254ac835c6df8482c3b6e72b879205392f02f2a666185250ab3b0dd70d219de936495f873b3fe8722026b167437d5fc8fd21aa67ba642da8ca68a5823bc8f6da6fd1a50996a3e4d9fb2bd15909a91f217c512561a502d26c4f0baa0145b4acbcdea8adecbeaeff956e0ec6ae77d35872d2d6351e70c6bb101d824f41a2b7029f16708cd4c8b7a894453f82e79523765de14c82106f74a146c8f76cf20caeb35475e881be1c74a1dc0783b0ff9a40060e362ec3bb5e3dc3919914787893b0dc80123f44a44744f107268eb85437bf3116efa5bb03d6903ebd987291e8574344cadffa7f960789a3ef6c933305e6a80319c9cd9a49d208c4d4070f47c44edea53476b7779cec179af985f7c8b4b91efb56e0a35d4ecb1ff684a1fd0ee8a2d473e00cd8fe3a752cf7b62fffda4ebe90c992caacbee70c7d912d360e5dd24074430cb2b694ff2fcca6eb77d10b1b22a26841af36501d9a066e280d25ca215378e7635fda9ce865ca6af9ae962a3b6039dbba483a5ab7bee69d891c9809483744a0b0ab94498d1ada452e3a90a19decee6bf2b97de274413f78bd896fc2634d3e26d4bde265143deebf580693aa1925aea6f6ce003f195a226b04377e662e0d87b4a09299061f13c4b0ad2d4281eac386c03f533b1d2a9fb466814817bf27aa737cdeda708b1db19f550b2bdc8360a6e4a7ded415d5ef826f67a8c3623c01751af885a428c2b504f12d04d8c673b1ec69a8a6f001951e442cecd11aae4fbc77a5c18f065574d4a28ee1bc5a987903b00dc61e6760695c627437bc7bed48e4fa16eccea6fa878e74dbb010fc52af27f36b6e81e70444ce0f4a83f5aeca326d5a842adba562a0d39410f4f700934b1881b2bebac2215261422b8f474673ef583e5431b183199faa764e1e340f873a104b6d4a0c39ab970e2d77e5f8e7335ea3c68e87a85fd45113eb53acfbc8feb3955f971df7cadafb2c4c9cb789c1de9468329915afe521681af9007e1388d5cca660d9b13325ac31242e0403c1d82d871d2efc0706d58532c4609502a807ebd95e64653e3a631f469c01c89cd70247b11bbb61eb15347023b8280ab44d4ca21d455a913889a541325dec2ef257e6cd3bb3d7830ff465240d132aa6ee0b9146682d86c093b5f1f40ce5368f43198968d85070609a178797300e57291ea0c967e2dbe874136406b58f163e68be4325db28b3c684c020be278a7d126efd215c1fb84350864f18926d9f394b109a308468ead25bf2475e79843bbd7f323219ecb2ab658da2d9ded21953f25383a9952fe2e47c3ed3f11c61b85e388c553a59d896a2eceaaf2d0e826bb77b1bb3e0f8ddbb3e04ec0f811063dd3775423d71f8632a3af2cda84d789afe92d710fd35305abcf1f2dd608ef3319eb4e2b21e94352d06836d83caaf8088ce9bbf78b4c16a962581e8766f4c08bdfbc9920f3ab47fe372816a4e8d0f7d78a622ff16af7d71651e4abb8cc0dd920b4e089df5399b2e1a7d06dbc75870ca1498a045c35bde9938361005cca7ba2bb8573e365406f7e12ba2de2d060a6a794fcc22c4f8289f772c309e3a79df55ca3869b010754e89d596de5aa70c779ec8ecf15788827b565c0abb43bb38c036ce987638a6654294efcbaf7b772fbbd9b00b62f4a898854a67a55006ece2fa37dd2ed18d29fc98343a5f166b2df1c5f1caec075987857286a6b782f529ea0fac91c5d5988813bc5c39201bcc236d51932a1545d30b147d743ce399b2e0c4e3a44b4888b16aff1e4c347ea6caee511424a14fe8bb0d6e8e0eb31be05de81b739f6f2646d0a6bf0dfc1859121402b1cca3b052671c5074796b0a87404b07518ad6b423bde12366e110d842dce8639778163f2f8c895abe32a2320593b4e4c51ed94a325d23c7cc02e46a3bed4b1bc322a6924e14705a4f1d5abf3a7f8853270edf58e0aeb7fd124550729570658752f3e9872e43abeddc8dd226761030a26b25203fd5b053dfebbea0f93835df44b2fcd5ce0a2463df58c88f7bf1798*ec90c1ff54632e7c8cfb812eeb14d7ec49ddaf576dca10bfb16f965e6106ce48", "btcr-test-password"}, // Electrum 2.8.0+ encrypted wallet with truncated hash, "electrum28-wallet" from btcrecover project {"$electrum$5*0328e536dd1fbbb85d78de1a8c21215d4646cd87d6b6545afcfb203e5bb32e0de4*61b1e287a5acff4b40e4abd73ff62dc233c1c7a6a54b3270949281b9d44bc6e746743733360500718826e50bb28ea99a6378dc0b0c578e9d0bf09c667671c82a1bd71c8121edbb4c9cbca93ab0e17e218558ead81755e62b0d4ad547aa1b3beb0b9ee43b11270261c9b38502f00e7f6f096811b7fdae6f3dce85c278d3751fec044027054218ccf20d404bab24380b303f094704e626348a218f44ab88ce2ac5fa7d450069fca3bb53f9359dbbaad0ea1b3859129b19c93ed7888130f8a534f84a629c67edc150a1c5882a83cb0add4615bb569e8dc471de4d38fc8b1e0b9b28040b5ea86093fcdeceaedb6b8f073f6f0ee5541f473a4b1c2bfae4fc91e4bbb40fa2185ecfa4c72010bcf8df05b1a7db45f64307dbc439f8389f0e368e38960b6d61ac88c07ce95a4b03d6d8b13f4c7dc7d7c447097865235ab621aeef38dc4172bf2dc52e701132480127be375fe98834f16d9895dce7f6cdfe900a2ce57eaa6c3036c1b9a661c3c9adbf84f4adfe6d4d9fa9f829f2957cfb353917dc77fd8dd4872b7d90cb71b7d3a29c9bfe3440e02449220acba410fa0af030f51aa2438f7478dbb277d62613112e4eebc66d5d7bdba793fb2073d449954f563284819189ffb5dbcdeb6c95c64bc24e0ef986bce07bafe96ab449ae2b6edaf4f98ffbd392a57bd93c2359444ec4046ae65b440adb96b6e4eef9d06bb04d2f3fa2e4175165bcadbf7e13cc3b6e65e67df901f96a2f154bc763b56b3736a335e1d1bc16e99736f757a4ae56c099645c917360b1ecf8dcefc7281541c6ff65d87cadab4a48f1f6b7b73a3e5a67e2e032abb56b499e73a9f3b69ce065e43b0174639785ae30635d105ebcc827dcf9b19bdd1a92879a5d4bc4e12b5630c188b1b96e3c586e19901b8f96084bcd59b2f4b201a3a8b6e633a5c194901d4609add9671b0bcc12b2b94ae873d201258b36315484e4b9c5f5d6289656baa93eec9e92aec88e2d73d86b9e3d1f24294e3d8ebe9a9f2f6edfbf28f530670c5b086fc4f74df89b4e4cbe06ee7e45cbd238b599d19c2d5da5523b12b1e7050ea0a9b47a5d22c6c3fc476f814f9705dc7ed3aeb1b44fc6b4d69f02a74963dce5057c3c049f92e595a4da5035cffc303a4cb162803aa3f816527a7e466b8424789a0d77e26819615662420c370457e29fcc1938fd754f3acfd21416ce3ab27e9febbc0e24fc7055eddc31e48faa014f9f3695c2e956f0e6c94c507a8d2f8c3aeb4b98b69b6340b6a3acb1acdde9581279f78ee10687616360c018e9f67d6c8bb5950e8fdabd3d0d5808824975aa4a50f88581472212f24ad58a700fe4787642b973924575fe71d1ecd7b2b6acd363f48c40bdd55f35f60a06dee544c266e608fd5a6d263f745e8b11d1160638eb301adfd1a88eddf6d0ccb9e1021e0bde9cf5163583a202b3dc95c255c8cc24*ec90c1ff54632e7c8cfb812eeb14d7ec49ddaf576dca10bfb16f965e6106ce48", "btcr-test-password"}, {NULL} }; static struct custom_salt { uint32_t type; unsigned char salt[8]; // fake salt uint32_t saltlen; unsigned char ephemeral_pubkey[128]; unsigned char data[16384]; // is 16 KiB enough? uint32_t datalen; unsigned char mac[32]; secp256k1_pubkey pubkey; } *cur_salt; typedef struct { // for plaintext, we must make sure it is a full uint64_t width. uint64_t v[(PLAINTEXT_LENGTH + 7) / 8]; // v must be kept aligned(8) uint64_t length; // keep 64 bit aligned, length is overkill, but easiest way to stay aligned. } pass_t; typedef struct { uint64_t hash[8]; } crack_t; typedef struct { // for salt, we append \x00\x00\x00\x01\x80 and must make sure it is a full uint64 width uint64_t salt[(PBKDF2_64_MAX_SALT_SIZE + 1 + 4 + 7) / 8]; // salt must be kept aligned(8) uint32_t length; uint32_t rounds; } salt_t; typedef struct { uint64_t ipad[8]; uint64_t opad[8]; uint64_t hash[8]; uint64_t W[8]; cl_uint rounds; } state_t; static pass_t *host_pass; /** plain ciphertexts **/ static salt_t *host_salt; /** salt **/ static crack_t *host_crack; /** cracked or no **/ static cl_mem mem_in, mem_out, mem_salt, mem_state; static cl_kernel split_kernel; static cl_int cl_error; static struct fmt_main *self; static uint32_t (*crypt_out)[BINARY_SIZE * 2 / sizeof(uint32_t)]; #define STEP 0 #define SEED 256 static const char *warn[] = { "xfer: ", ", init: " , ", crypt: ", ", res xfer: " }; static int split_events[] = { 2, -1, -1 }; //This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" /* ------- Helper functions ------- */ static size_t get_task_max_work_group_size() { size_t min_lws = autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); return MIN(min_lws, autotune_get_task_max_work_group_size(FALSE, 0, split_kernel)); } static void create_clobj(size_t kpc, struct fmt_main *self) { host_pass = mem_calloc(kpc, sizeof(pass_t)); host_crack = mem_calloc(kpc, sizeof(crack_t)); host_salt = mem_calloc(1, sizeof(salt_t)); crypt_out = mem_calloc(kpc, sizeof(*crypt_out)); #define CL_RO CL_MEM_READ_ONLY #define CL_WO CL_MEM_WRITE_ONLY #define CL_RW CL_MEM_READ_WRITE #define CLCREATEBUFFER(_flags, _size, _string) \ clCreateBuffer(context[gpu_id], _flags, _size, NULL, &cl_error); \ HANDLE_CLERROR(cl_error, _string); #define CLKERNELARG(kernel, id, arg, msg) \ HANDLE_CLERROR(clSetKernelArg(kernel, id, sizeof(arg), &arg), msg); mem_in = CLCREATEBUFFER(CL_RO, kpc * sizeof(pass_t), "Cannot allocate mem in"); mem_salt = CLCREATEBUFFER(CL_RO, sizeof(salt_t), "Cannot allocate mem salt"); mem_out = CLCREATEBUFFER(CL_WO, kpc * sizeof(crack_t), "Cannot allocate mem out"); mem_state = CLCREATEBUFFER(CL_RW, kpc * sizeof(state_t), "Cannot allocate mem state"); CLKERNELARG(crypt_kernel, 0, mem_in, "Error while setting mem_in"); CLKERNELARG(crypt_kernel, 1, mem_salt, "Error while setting mem_salt"); CLKERNELARG(crypt_kernel, 2, mem_state, "Error while setting mem_state"); CLKERNELARG(split_kernel, 0, mem_state, "Error while setting mem_state"); CLKERNELARG(split_kernel, 1, mem_out, "Error while setting mem_out"); } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[128]; snprintf(build_opts, sizeof(build_opts), "-DHASH_LOOPS=%u -DPLAINTEXT_LENGTH=%d -DPBKDF2_64_MAX_SALT_SIZE=%d", HASH_LOOPS, PLAINTEXT_LENGTH, PBKDF2_64_MAX_SALT_SIZE); opencl_init("$JOHN/kernels/pbkdf2_hmac_sha512_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], KERNEL_NAME, &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); split_kernel = clCreateKernel(program[gpu_id], SPLIT_KERNEL_NAME, &cl_error); HANDLE_CLERROR(cl_error, "Error creating split kernel"); //Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, HASH_LOOPS, split_events, warn, 2, self, create_clobj, release_clobj, sizeof(state_t), 0, db); //Auto tune execution from shared/included code. autotune_run(self, ITERATIONS, 0, (cpu(device_info[gpu_id]) ? 1000000000 : 10000000000ULL)); } } static void release_clobj(void) { if (host_pass) { MEM_FREE(host_pass); MEM_FREE(host_salt); MEM_FREE(host_crack); HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_salt), "Release mem salt"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); HANDLE_CLERROR(clReleaseMemObject(mem_state), "Release mem state"); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy, *keeptr, *p; int value, extra; if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH) != 0) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += TAG_LENGTH; if ((p = strtokm(ctcopy, "*")) == NULL) // type goto err; if (!isdec(p)) goto err; value = atoi(p); if (value != 4 && value != 5) goto err; if ((p = strtokm(NULL, "*")) == NULL) // ephemeral_pubkey goto err; if (hexlenl(p, &extra) > 128 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // data goto err; if (hexlenl(p, &extra) > 16384 * 2 || extra) goto err; if ((p = strtokm(NULL, "*")) == NULL) // data goto err; if (hexlenl(p, &extra) > 32 * 2 || extra) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static void *get_salt(char *ciphertext) { static struct custom_salt cs; secp256k1_context *ctx; char *ctcopy = strdup(ciphertext); char *keeptr = ctcopy; char *p; int i, length; memset(&cs, 0, SALT_SIZE); ctcopy += TAG_LENGTH; p = strtokm(ctcopy, "*"); cs.type = atoi(p); p = strtokm(NULL, "*"); length = strlen(p) / 2; for (i = 0; i < length; i++) cs.ephemeral_pubkey[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); cs.datalen = strlen(p) / 2; for (i = 0; i < cs.datalen; i++) cs.data[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; p = strtokm(NULL, "*"); for (i = 0; i < 32; i++) cs.mac[i] = (atoi16[ARCH_INDEX(p[2 * i])] << 4) | atoi16[ARCH_INDEX(p[2 * i + 1])]; ctx = secp256k1_context_create(SECP256K1_CONTEXT_NONE); secp256k1_ec_pubkey_parse(ctx, &cs.pubkey, cs.ephemeral_pubkey, length); secp256k1_context_destroy(ctx); // we append the count and EOM here, one time. memcpy(cs.salt, "\x0\x0\x0\x1\x80", 5); cs.saltlen = 5; // we include the x80 byte in our saltlen, but the .cl kernel knows to reduce saltlen by 1 MEM_FREE(keeptr); return &cs; } static void set_salt(void *salt) { cur_salt = (struct custom_salt*)salt; memcpy(host_salt->salt, cur_salt->salt, cur_salt->saltlen); host_salt->length = cur_salt->saltlen; host_salt->rounds = 1024; // fixed HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_salt, CL_FALSE, 0, sizeof(salt_t), host_salt, 0, NULL, NULL), "Copy salt to gpu"); } void *electrum_get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static const char *group_order = "fffffffffffffffffffffffffffffffebaaedce6af48a03bbfd25e8cd0364141"; // The decypted and decompressed wallet should start with one of these two, // Christopher Gurnee #define EXPECTED_BYTES_1 "{\n \"" #define EXPECTED_BYTES_2 "{\r\n \"" static int crypt_all(int *pcount, struct db_salt *salt) { int i; const int count = *pcount; int index; int loops = (host_salt->rounds + HASH_LOOPS - 1) / HASH_LOOPS; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); // Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, global_work_size * sizeof(pass_t), host_pass, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); // Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); for (i = 0; i < (ocl_autotune_running ? 1 : loops); i++) { BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], split_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[2]), "Run split kernel"); BENCH_CLERROR(clFinish(queue[gpu_id]), "clFinish"); opencl_process_event(); } // Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, global_work_size * sizeof(crack_t), host_crack, 0, NULL, multi_profilingEvent[3]), "Copy result back"); if (!ocl_autotune_running) { #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { BIGNUM *p, *q, *r; BN_CTX *ctx; uint64_t u[8]; unsigned char static_privkey[64]; unsigned char shared_pubkey[33]; unsigned char keys[128]; unsigned char cmac[32]; secp256k1_context *sctx; SHA512_CTX md_ctx; int shared_pubkeylen= 33; int j; memcpy(u, host_crack[index].hash, 64); for (j = 0; j < 8; j++) u[j] = JOHNSWAP64(u[j]); memcpy(static_privkey, u, 64); // do static_privkey % GROUP_ORDER p = BN_bin2bn(static_privkey, 64, NULL); q = BN_new(); r = BN_new(); BN_hex2bn(&q, group_order); ctx = BN_CTX_new(); BN_mod(r, p, q, ctx); BN_CTX_free(ctx); BN_free(p); BN_free(q); BN_bn2bin(r, static_privkey); BN_free(r); sctx = secp256k1_context_create(SECP256K1_CONTEXT_NONE); // multiply point with a scaler, shared_pubkey is compressed representation secp256k1_mul(sctx, shared_pubkey, &cur_salt->pubkey, static_privkey); secp256k1_context_destroy(sctx); SHA512_Init(&md_ctx); SHA512_Update(&md_ctx, shared_pubkey, shared_pubkeylen); SHA512_Final(keys, &md_ctx); if (cur_salt->type == 4) { // calculate mac of data hmac_sha256(keys + 32, 32, cur_salt->data, cur_salt->datalen, cmac, 32); memcpy(crypt_out[index], cmac, BINARY_SIZE); } else if (cur_salt->type == 5) { z_stream z; unsigned char iv[16]; unsigned char out[512] = { 0 }; unsigned char fout[512] = { 0 }; AES_KEY aes_decrypt_key; // common zlib settings z.zalloc = Z_NULL; z.zfree = Z_NULL; z.opaque = Z_NULL; z.avail_in = 512; z.avail_out = 512; z.next_out = fout; memcpy(iv, keys, 16); memset(crypt_out[index], 0, BINARY_SIZE); // fast zlib based rejection test, is this totally safe? AES_set_decrypt_key(keys + 16, 128, &aes_decrypt_key); AES_cbc_encrypt(cur_salt->data, out, 16, &aes_decrypt_key, iv, AES_DECRYPT); if ((memcmp(out, "\x78\x9c", 2) != 0) || (out[2] & 0x7) != 0x5) { } else { AES_set_decrypt_key(keys + 16, 128, &aes_decrypt_key); AES_cbc_encrypt(cur_salt->data + 16, out + 16, 512 - 16, &aes_decrypt_key, iv, AES_DECRYPT); z.next_in = out; inflateInit2(&z, 15); inflate(&z, Z_NO_FLUSH); inflateEnd(&z); if ((memcmp(fout, EXPECTED_BYTES_1, 7) == 0) || (memcmp(fout, EXPECTED_BYTES_2, 8) == 0)) memcpy(crypt_out[index], cur_salt->mac, BINARY_SIZE); // dirty hack! } } } } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } static void set_key(char *key, int index) { int saved_len = MIN(strlen(key), PLAINTEXT_LENGTH); // make sure LAST uint64 that has any key in it gets null, since we simply // ^= the whole uint64 with the ipad/opad mask strncpy((char*)host_pass[index].v, key, PLAINTEXT_LENGTH); host_pass[index].length = saved_len; } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; memcpy(ret, host_pass[index].v, PLAINTEXT_LENGTH); ret[host_pass[index].length] = 0; return ret; } struct fmt_main fmt_opencl_electrum_modern = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_HUGE_INPUT, { NULL }, { FORMAT_TAG }, electrum_tests }, { init, done, reset, fmt_default_prepare, valid, fmt_default_split, electrum_get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */ #endif /* HAVE_LIBZ */
Perft.h
// JAGLAVAK CHESS ENGINE (c) 2019 Stuart Riffle #pragma once const int MAX_PARALLEL_DEPTH = 5; static void GatherPerftParallelPositions( const Position& pos, int depth, vector< Position >* dest ) { MoveList valid; valid.FindMoves( pos ); for( int i = 0; i < valid._Count; i++ ) { Position next = pos; next.Step( valid._Move[i] ); if( depth == (MAX_PARALLEL_DEPTH + 1) ) dest->push_back( next ); else GatherPerftParallelPositions( next, depth - 1, dest ); } } static u64 CalcPerftParallel( const Position& pos, int depth ); static u64 CalcPerftInternal( const Position& pos, int depth ) { if( (depth > MAX_PARALLEL_DEPTH) && (depth <= MAX_PARALLEL_DEPTH + 3) ) { return( CalcPerftParallel( pos, depth ) ); } MoveList valid; valid.FindMoves( pos ); u64 total = 0; for( int i = 0; i < valid._Count; i++ ) { Position next = pos; next.Step( valid._Move[i] ); if( depth == 2 ) { MoveList dummy; total += dummy.FindMoves( next ); } else { total += CalcPerftInternal( next, depth - 1 ); } } return( total ); } static u64 CalcPerftParallel( const Position& pos, int depth ) { vector< Position > positions( 16384 ); GatherPerftParallelPositions( pos, depth, &positions ); u64 total = 0; #pragma omp parallel for reduction(+: total) schedule(dynamic) for( int i = 0; i < ( int) positions.size(); i++ ) { u64 subtotal = CalcPerftInternal( positions[i], MAX_PARALLEL_DEPTH ); total = total + subtotal; } return(total); } static u64 CalcPerft( const Position& pos, int depth ) { if( depth < 2 ) { MoveList dummy; return( dummy.FindMoves( pos ) ); } return( CalcPerftInternal( pos, depth ) ); } static map< MoveSpec, u64 > DividePerft( const Position& pos, int depth ) { MoveList valid; valid.FindMoves( pos ); map< MoveSpec, u64 > result; for( int i = 0; i < valid._Count; i++ ) { Position next = pos; next.Step( valid._Move[i] ); u64 count = (depth > 1)? CalcPerft( next, depth - 1 ) : 1; result[valid._Move[i]] = count; } return result; }
distribute_simd_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd foo // expected-error@+1 {{unexpected OpenMP directive '#pragma omp distribute simd'}} #pragma omp distribute simd safelen(4) void test_no_clause() { int i; #pragma omp target #pragma omp teams #pragma omp distribute simd for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{statement after '#pragma omp distribute simd' must be a for loop}} #pragma omp distribute simd ++i; } void test_branch_protected_scope() { int i = 0; L1: ++i; int x[24]; #pragma omp target #pragma omp teams #pragma omp distribute simd for (i = 0; i < 16; ++i) { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause() { int i; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd foo bar for (i = 0; i < 16; ++i) ; } void test_non_identifiers() { int i, x; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd; for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd private(x); for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+1 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} #pragma omp distribute simd, private(x); for (i = 0; i < 16; ++i) ; } extern int foo(); void test_safelen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd safelen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd safelen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd safelen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // xxpected-error@+1 {{expected expression}} #pragma omp distribute simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd safelen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd safelen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd safelen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd safelen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'safelen' clause must be a strictly positive integer value}} #pragma omp distribute simd safelen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd simdlen for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd simdlen() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd simdlen 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4 for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, , 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd simdlen(4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} #pragma omp distribute simd simdlen(4, 8) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd simdlen(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd simdlen(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'simdlen' clause must be a strictly positive integer value}} #pragma omp distribute simd simdlen(5 - 5) for (i = 0; i < 16; ++i) ; } void test_safelen_simdlen() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute simd simdlen(6) safelen(5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{the value of 'simdlen' parameter must be less than or equal to the value of the 'safelen' parameter}} #pragma omp distribute simd safelen(5) simdlen(6) for (i = 0; i < 16; ++i) ; } void test_collapse() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected '('}} #pragma omp distribute simd collapse for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd collapse() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd collapse(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-warning@+2 {{extra tokens at the end of '#pragma omp distribute simd' are ignored}} // expected-error@+1 {{expected '('}} #pragma omp distribute simd collapse 4) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4 for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, ) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // xxpected-error@+1 {{expected expression}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, , 4) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams #pragma omp distribute simd collapse(4) for (int i1 = 0; i1 < 16; ++i1) for (int i2 = 0; i2 < 16; ++i2) for (int i3 = 0; i3 < 16; ++i3) for (int i4 = 0; i4 < 16; ++i4) foo(); #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} // expected-note@+1 {{to match this '('}} expected-note@+1 {{as specified in 'collapse' clause}} #pragma omp distribute simd collapse(4, 8) for (i = 0; i < 16; ++i) ; // expected-error {{expected 4 for loops after '#pragma omp distribute simd', but found only 1}} #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd collapse(2.5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expression is not an integer constant expression}} #pragma omp distribute simd collapse(foo()) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(-5) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument to 'collapse' clause must be a strictly positive integer value}} #pragma omp distribute simd collapse(5 - 5) for (i = 0; i < 16; ++i) ; // expected-note@+3 {{defined as reduction}} #pragma omp target #pragma omp teams #pragma omp distribute simd collapse(2) reduction(+ : i) for (i = 0; i < 16; ++i) // expected-note@+1 {{variable with automatic storage duration is predetermined as private; perhaps you forget to enclose 'omp for' directive into a parallel or another task region?}} for (int j = 0; j < 16; ++j) // expected-error@+2 2 {{reduction variable must be shared}} // expected-error@+1 {{OpenMP constructs may not be nested inside a simd region}} #pragma omp for reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; #pragma omp target #pragma omp teams for (i = 0; i < 16; ++i) for (int j = 0; j < 16; ++j) #pragma omp distribute simd reduction(+ : i, j) for (int k = 0; k < 16; ++k) i += j; } void test_linear() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd linear( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd linear(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd linear(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd linear(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute simd linear(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute simd linear(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute simd linear(x, y, z) for (i = 0; i < 16; ++i) ; } void test_aligned() { int i; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd aligned(0) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{use of undeclared identifier 'x'}} #pragma omp distribute simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{use of undeclared identifier 'x'}} // expected-error@+1 {{use of undeclared identifier 'y'}} #pragma omp distribute simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{use of undeclared identifier 'x'}} // expected-error@+2 {{use of undeclared identifier 'y'}} // expected-error@+1 {{use of undeclared identifier 'z'}} #pragma omp distribute simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; int *x, y, z[25]; // expected-note 4 {{'y' defined here}} #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd aligned(x :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x :, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd aligned(x : 2 * 2) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x : 1, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd aligned(x : 1, y, z : 1) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y, z) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+2 {{defined as aligned}} // expected-error@+1 {{a variable cannot appear in more than one aligned clause}} #pragma omp distribute simd aligned(x) aligned(z, x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-note@+3 {{defined as aligned}} // expected-error@+2 {{a variable cannot appear in more than one aligned clause}} // expected-error@+1 2 {{argument of aligned clause should be array or pointer, not 'int'}} #pragma omp distribute simd aligned(x, y, z) aligned(y, z) for (i = 0; i < 16; ++i) ; } void test_private() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp distribute simd private( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd private(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd private(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd private() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd private(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd private(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd private(x, y, z) for (i = 0; i < 16; ++i) { x = y * i + z; } } void test_firstprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd firstprivate( for (i = 0; i < 16; ++i) ; } void test_lastprivate() { int i; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd lastprivate(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 2 {{expected expression}} #pragma omp distribute simd lastprivate(, ) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd lastprivate(int) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd lastprivate(0) for (i = 0; i < 16; ++i) ; int x, y, z; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd lastprivate(x, y, z) for (i = 0; i < 16; ++i) ; } void test_reduction() { int i, x, y; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction( for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction() for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected identifier}} #pragma omp distribute simd reduction( : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected identifier}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(, for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // expected-error@+2 {{expected expression}} // expected-warning@+1 {{missing ':' after reduction identifier - ignoring}} #pragma omp distribute simd reduction(+ for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+3 {{expected ')'}} expected-note@+3 {{to match this '('}} // // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+: for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ :) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ :, y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected expression}} #pragma omp distribute simd reduction(+ : x, + : y) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected identifier}} #pragma omp distribute simd reduction(% : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(+ : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(* : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(- : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(& : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(| : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(^ : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(&& : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(|| : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(max : x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams #pragma omp distribute simd reduction(min : x) for (i = 0; i < 16; ++i) ; struct X { int x; }; struct X X; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd reduction(+ : X.x) for (i = 0; i < 16; ++i) ; #pragma omp target #pragma omp teams // expected-error@+1 {{expected variable name}} #pragma omp distribute simd reduction(+ : x + x) for (i = 0; i < 16; ++i) ; } void test_loop_messages() { float a[100], b[100], c[100]; #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute simd for (float fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } #pragma omp target #pragma omp teams // expected-error@+2 {{variable must be of integer or pointer type}} #pragma omp distribute simd for (double fi = 0; fi < 10.0; fi++) { c[(int)fi] = a[(int)fi] + b[(int)fi]; } } void linear_modifiers(int argc) { int k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(k) for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(val(k)) for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(uval(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(ref(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; #pragma omp target #pragma omp teams #pragma omp distribute simd linear(foo(k)) // expected-error {{expected 'val' modifier}} for (k = 0; k < argc; ++k) ++k; }
GB_binop__eq_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__eq_fp32 // A.*B function (eWiseMult): GB_AemultB__eq_fp32 // A*D function (colscale): GB_AxD__eq_fp32 // D*A function (rowscale): GB_DxB__eq_fp32 // C+=B function (dense accum): GB_Cdense_accumB__eq_fp32 // C+=b function (dense accum): GB_Cdense_accumb__eq_fp32 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__eq_fp32 // C=scalar+B GB_bind1st__eq_fp32 // C=scalar+B' GB_bind1st_tran__eq_fp32 // C=A+scalar GB_bind2nd__eq_fp32 // C=A'+scalar GB_bind2nd_tran__eq_fp32 // C type: bool // A type: float // B,b type: float // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ float bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = (x == y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_FP32 || GxB_NO_EQ_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__eq_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__eq_fp32 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__eq_fp32 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__eq_fp32 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__eq_fp32 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__eq_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__eq_fp32 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__eq_fp32 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; float bij = Bx [p] ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__eq_fp32 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = Ax [p] ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB_bind1st_tran__eq_fp32 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = Ax [pA] ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB_bind2nd_tran__eq_fp32 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
test_master1.c
//===-- test_master1.cc - Test the "master" construct -------------*- C -*-===// // // Part of the LOMP project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// // // This file has been modified from the file // openmp/runtime/test/master/omp_master.c // of the LLVM project (https://github.com/llvm/llvm-project) // under the Apache License v2.0 with LLVM Exceptions. // //===----------------------------------------------------------------------===// #include <stdio.h> #include <stdlib.h> #include <omp.h> #include "tests.h" int test_omp_master(void) { int nthreads; int executing_thread; nthreads = 0; executing_thread = -1; #pragma omp parallel { #pragma omp master { #pragma omp atomic nthreads++; executing_thread = omp_get_thread_num(); } /* end of master*/ } /* end of parallel*/ return ((nthreads == 1) && (executing_thread == 0)); } int main(void) { int i; int num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_omp_master()) { num_failed++; } } return num_failed != 0 ? EXIT_FAILURE : EXIT_SUCCESS; }
GB_subassign_13.c
//------------------------------------------------------------------------------ // GB_subassign_13: C(I,J)<!M> = scalar ; using S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Method 13: C(I,J)<!M> = scalar ; using S // M: present // Mask_comp: true // C_replace: false // accum: NULL // A: scalar // S: constructed #include "GB_subassign_methods.h" GrB_Info GB_subassign_13 ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const void *scalar, const GrB_Type atype, const GrB_Matrix S, GB_Context Context ) { //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_GET_C ; const bool C_is_hyper = C->is_hyper ; const int64_t Cnvec = C->nvec ; const int64_t *restrict Ch = C->h ; const int64_t *restrict Cp = C->p ; GB_GET_MASK ; const bool M_is_hyper = M->is_hyper ; const int64_t Mnvec = M->nvec ; GB_GET_SCALAR ; GB_GET_S ; const int64_t *restrict Sh = S->h ; const int64_t Snvec = S->nvec ; const bool S_is_hyper = S->is_hyper ; GrB_BinaryOp accum = NULL ; //-------------------------------------------------------------------------- // Method 13: C(I,J)<!M> = scalar ; using S //-------------------------------------------------------------------------- // Time: Close to optimal; must visit all IxJ, so Omega(|I|*|J|) is // required. The sparsity of !M cannot be exploited. // Methods 13, 15, 17, and 19 are very similar. //-------------------------------------------------------------------------- // Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19) //-------------------------------------------------------------------------- GB_SUBASSIGN_IXJ_SLICE ; //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (int taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; //------------------------------------------------------------------ // get S(iA_start:end,j) and M(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S) ; GB_GET_VECTOR_FOR_IXJ (M) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC)<!M,repl> = scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { //-------------------------------------------------------------- // Get the indices at the top of each list. //-------------------------------------------------------------- int64_t iS = (pS < pS_end) ? Si [pS] : INT64_MAX ; int64_t iM = (pM < pM_end) ? Mi [pM] : INT64_MAX ; //-------------------------------------------------------------- // find the smallest index of [iS iA iM] (always iA) //-------------------------------------------------------------- int64_t i = iA ; //-------------------------------------------------------------- // get M(i,j) //-------------------------------------------------------------- bool mij ; if (i == iM) { // mij = (bool) M [pM] cast_M (&mij, Mx +(pM*msize), 0) ; GB_NEXT (M) ; } else { // mij not present, implicitly false ASSERT (i < iM) ; mij = false ; } // complement the mask entry mij since Mask_comp is true mij = !mij ; //-------------------------------------------------------------- // assign the entry //-------------------------------------------------------------- if (i == iS) { ASSERT (i == iA) ; { // both S (i,j) and A (i,j) present if (mij) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =A ): copy A, no accum // [X A 1]: action: ( undelete ): zombie lives GB_C_S_LOOKUP ; GB_noaccum_C_A_1_scalar ; } GB_NEXT (S) ; } } else { ASSERT (i == iA) ; { // S (i,j) is not present, A (i,j) is present if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } } } } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (int taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t j = kfirst ; j <= klast ; j++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; //------------------------------------------------------------------ // get S(iA_start:end,j) and M(iA_start:end,j) //------------------------------------------------------------------ GB_GET_VECTOR_FOR_IXJ (S) ; GB_GET_VECTOR_FOR_IXJ (M) ; //------------------------------------------------------------------ // C(I(iA_start,iA_end-1),jC)<!M,repl> = scalar //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { //-------------------------------------------------------------- // Get the indices at the top of each list. //-------------------------------------------------------------- int64_t iS = (pS < pS_end) ? Si [pS] : INT64_MAX ; int64_t iM = (pM < pM_end) ? Mi [pM] : INT64_MAX ; //-------------------------------------------------------------- // find the smallest index of [iS iA iM] (always iA) //-------------------------------------------------------------- int64_t i = iA ; //-------------------------------------------------------------- // get M(i,j) //-------------------------------------------------------------- bool mij ; if (i == iM) { // mij = (bool) M [pM] cast_M (&mij, Mx +(pM*msize), 0) ; GB_NEXT (M) ; } else { // mij not present, implicitly false ASSERT (i < iM) ; mij = false ; } // complement the mask entry mij since Mask_comp is true mij = !mij ; //-------------------------------------------------------------- // assign the entry //-------------------------------------------------------------- if (i == iS) { ASSERT (i == iA) ; { GB_NEXT (S) ; } } else { ASSERT (i == iA) ; { // S (i,j) is not present, A (i,j) is present if (mij) { // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; GB_PENDING_INSERT (scalar) ; } } } } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
GB_unop__identity_fp64_uint64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp64_uint64 // op(A') function: GB_unop_tran__identity_fp64_uint64 // C type: double // A type: uint64_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ uint64_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp64_uint64 ( double *Cx, // Cx and Ax may be aliased const uint64_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint64_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp64_uint64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt_var.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-1, 3D 25 point stencil with axis-symmetric ariable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*13); for(m=0; m<13;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 8; tile_size[1] = 8; tile_size[2] = 32; tile_size[3] = 32; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<13; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=Nt-1;t1++) { lbp=ceild(t1+1,2); ubp=min(floord(4*Nt+Nz-9,8),floord(4*t1+Nz-2,8)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-6,8),ceild(8*t2-Nz-19,32));t3<=min(floord(4*Nt+Ny-9,32),floord(4*t1+Ny-1,32));t3++) { for (t4=max(max(ceild(t1-6,8),ceild(8*t2-Nz-19,32)),ceild(32*t3-Ny-19,32));t4<=min(min(floord(4*Nt+Nx-9,32),floord(4*t1+Nx-1,32)),floord(32*t3+Nx+19,32));t4++) { for (t5=max(max(max(max(0,ceild(8*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(32*t4-Nx+5,4)),t1);t5<=min(min(min(Nt-1,t1+1),8*t3+6),8*t4+6);t5++) { for (t6=max(max(8*t2,4*t5+4),-8*t1+8*t2+8*t5-7);t6<=min(min(8*t2+7,-8*t1+8*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) { lbv=max(32*t4,4*t5+4); ubv=min(32*t4+31,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((((((((((((coef[0][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef[1][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]))) + (coef[3][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef[4][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[5][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]))) + (coef[6][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef[7][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[8][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]))) + (coef[9][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef[10][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]))) + (coef[11][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]))) + (coef[12][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4] + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "variable axis-symmetric") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<13;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
tool_available.c
// The OpenMP standard defines 3 ways of providing ompt_start_tool: // 1. "statically-linking the tool’s definition of ompt_start_tool into an // OpenMP application" // RUN: %libomp-compile -DCODE -DTOOL && env OMP_TOOL_VERBOSE_INIT=stdout \ // RUN: %libomp-run | FileCheck %s --check-prefixes CHECK,ADDRSPACE // Note: We should compile the tool without -fopenmp as other tools developer // would do. Otherwise this test may pass for the wrong reasons on Darwin. // RUN: %clang %flags -DTOOL -shared -fPIC %s -o %T/tool.so // 2. "introducing a dynamically-linked library that includes the tool’s // definition of ompt_start_tool into the application’s address space" // 2.1 Link with tool during compilation // RUN: %libomp-compile -DCODE %no-as-needed-flag %T/tool.so && \ // RUN: env OMP_TOOL_VERBOSE_INIT=stdout %libomp-run | FileCheck %s \ // RUN: --check-prefixes CHECK,ADDRSPACE // 2.2 Link with tool during compilation, but AFTER the runtime // RUN: %libomp-compile -DCODE -lomp %no-as-needed-flag %T/tool.so && \ // RUN: env OMP_TOOL_VERBOSE_INIT=stdout %libomp-run | FileCheck %s \ // RUN: --check-prefixes CHECK,ADDRSPACE // 2.3 Inject tool via the dynamic loader // RUN: %libomp-compile -DCODE && env OMP_TOOL_VERBOSE_INIT=stdout \ // RUN: %preload-tool %libomp-run | FileCheck %s \ // RUN: --check-prefixes CHECK,ADDRSPACE // 3. "providing the name of a dynamically-linked library appropriate for the // architecture and operating system used by the application in the // tool-libraries-var ICV" // 3.1 OMP_TOOL_VERBOSE_INIT not set // RUN: %libomp-compile -DCODE && \ // RUN: env OMP_TOOL_LIBRARIES=%T/tool.so %libomp-run | FileCheck %s // 3.2 OMP_TOOL_VERBOSE_INIT disabled // RUN: env OMP_TOOL_LIBRARIES=%T/tool.so OMP_TOOL_VERBOSE_INIT=disabled \ // RUN: %libomp-run | FileCheck %s // 3.3 OMP_TOOL_VERBOSE_INIT to stdout // RUN: %libomp-compile -DCODE && env OMP_TOOL_LIBRARIES=%T/tool.so \ // RUN: OMP_TOOL_VERBOSE_INIT=stdout %libomp-run | \ // RUN: FileCheck %s -DPARENTPATH=%T --check-prefixes CHECK,TOOLLIB // 3.4 OMP_TOOL_VERBOSE_INIT to stderr, check merged stdout and stderr // RUN: env OMP_TOOL_LIBRARIES=%T/tool.so OMP_TOOL_VERBOSE_INIT=stderr \ // RUN: %libomp-run 2>&1 | \ // RUN: FileCheck %s -DPARENTPATH=%T --check-prefixes CHECK,TOOLLIB // 3.5 OMP_TOOL_VERBOSE_INIT to stderr, check just stderr // RUN: env OMP_TOOL_LIBRARIES=%T/tool.so OMP_TOOL_VERBOSE_INIT=stderr \ // RUN: %libomp-run 2>&1 >/dev/null | \ // RUN: FileCheck %s -DPARENTPATH=%T --check-prefixes TOOLLIB // 3.6 OMP_TOOL_VERBOSE_INIT to file "init.log" // RUN: env OMP_TOOL_LIBRARIES=%T/tool.so OMP_TOOL_VERBOSE_INIT=%T/init.log \ // RUN: %libomp-run | FileCheck %s && cat %T/init.log | \ // RUN: FileCheck %s -DPARENTPATH=%T --check-prefixes TOOLLIB // REQUIRES: ompt /* * This file contains code for an OMPT shared library tool to be * loaded and the code for the OpenMP executable. * -DTOOL enables the code for the tool during compilation * -DCODE enables the code for the executable during compilation */ // Check if libomp supports the callbacks for this test. // CHECK-NOT: {{^}}0: Could not register callback // ADDRSPACE: ----- START LOGGING OF TOOL REGISTRATION ----- // ADDRSPACE-NEXT: Search for OMP tool in current address space... Success. // ADDRSPACE-NEXT: Tool was started and is using the OMPT interface. // ADDRSPACE-NEXT: ----- END LOGGING OF TOOL REGISTRATION ----- // TOOLLIB: ----- START LOGGING OF TOOL REGISTRATION ----- // TOOLLIB-NEXT: Search for OMP tool in current address space... Failed. // TOOLLIB-NEXT: Searching tool libraries... // TOOLLIB-NEXT: OMP_TOOL_LIBRARIES = [[PARENTPATH]]/tool.so // TOOLLIB-NEXT: Opening [[PARENTPATH]]/tool.so... Success. // TOOLLIB-NEXT: Searching for ompt_start_tool in // TOOLLIB-SAME: [[PARENTPATH]]/tool.so... Success. // TOOLLIB-NEXT: Tool was started and is using the OMPT interface. // TOOLLIB-NEXT: ----- END LOGGING OF TOOL REGISTRATION ----- #ifdef CODE #include "omp.h" int main() { #pragma omp parallel num_threads(2) { } // CHECK-NOT: ----- START LOGGING OF TOOL REGISTRATION ----- // CHECK-NOT: ----- END LOGGING OF TOOL REGISTRATION ----- // CHECK: {{^}}0: NULL_POINTER=[[NULL:.*$]] // CHECK: {{^}}0: ompt_event_runtime_shutdown return 0; } #endif /* CODE */ #ifdef TOOL #include <stdio.h> #include <omp-tools.h> int ompt_initialize(ompt_function_lookup_t lookup, int initial_device_num, ompt_data_t *tool_data) { printf("0: NULL_POINTER=%p\n", (void*)NULL); return 1; //success } void ompt_finalize(ompt_data_t* tool_data) { printf("0: ompt_event_runtime_shutdown\n"); } ompt_start_tool_result_t* ompt_start_tool( unsigned int omp_version, const char *runtime_version) { static ompt_start_tool_result_t ompt_start_tool_result = {&ompt_initialize,&ompt_finalize, 0}; return &ompt_start_tool_result; } #endif /* TOOL */
psd.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP SSSSS DDDD % % P P SS D D % % PPPP SSS D D % % P SS D D % % P SSSSS DDDD % % % % % % Read/Write Adobe Photoshop Image Format % % % % Software Design % % Cristy % % Leonard Rosenthol % % July 1992 % % Dirk Lemstra % % December 2013 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/channel.h" #include "MagickCore/colormap.h" #include "MagickCore/colormap-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/constitute.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/magick.h" #include "MagickCore/memory_.h" #include "MagickCore/module.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/policy.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/registry.h" #include "MagickCore/quantum-private.h" #include "MagickCore/static.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #ifdef MAGICKCORE_ZLIB_DELEGATE #include <zlib.h> #endif #include "psd-private.h" /* Define declaractions. */ #define MaxPSDChannels 56 #define PSDQuantum(x) (((ssize_t) (x)+1) & -2) /* Enumerated declaractions. */ typedef enum { Raw = 0, RLE = 1, ZipWithoutPrediction = 2, ZipWithPrediction = 3 } PSDCompressionType; typedef enum { BitmapMode = 0, GrayscaleMode = 1, IndexedMode = 2, RGBMode = 3, CMYKMode = 4, MultichannelMode = 7, DuotoneMode = 8, LabMode = 9 } PSDImageType; /* Typedef declaractions. */ typedef struct _ChannelInfo { short type; size_t size; } ChannelInfo; typedef struct _MaskInfo { Image *image; RectangleInfo page; unsigned char background, flags; } MaskInfo; typedef struct _LayerInfo { ChannelInfo channel_info[MaxPSDChannels]; char blendkey[4]; Image *image; MaskInfo mask; Quantum opacity; RectangleInfo page; size_t offset_x, offset_y; unsigned char clipping, flags, name[257], visible; unsigned short channels; StringInfo *info; } LayerInfo; /* Forward declarations. */ static MagickBooleanType WritePSDImage(const ImageInfo *,Image *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s P S D % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsPSD()() returns MagickTrue if the image format type, identified by the % magick string, is PSD. % % The format of the IsPSD method is: % % MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) % % A description of each parameter follows: % % o magick: compare image format pattern against these bytes. % % o length: Specifies the length of the magick string. % */ static MagickBooleanType IsPSD(const unsigned char *magick,const size_t length) { if (length < 4) return(MagickFalse); if (LocaleNCompare((const char *) magick,"8BPS",4) == 0) return(MagickTrue); return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a d P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReadPSDImage() reads an Adobe Photoshop image file and returns it. It % allocates the memory necessary for the new Image structure and returns a % pointer to the new image. % % The format of the ReadPSDImage method is: % % Image *ReadPSDImage(image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o exception: return any errors or warnings in this structure. % */ static const char *CompositeOperatorToPSDBlendMode(Image *image) { switch (image->compose) { case ColorBurnCompositeOp: return(image->endian == LSBEndian ? "vidi" : "idiv"); case ColorDodgeCompositeOp: return(image->endian == LSBEndian ? " vid" : "div "); case ColorizeCompositeOp: return(image->endian == LSBEndian ? "rloc" : "colr"); case DarkenCompositeOp: return(image->endian == LSBEndian ? "krad" : "dark"); case DifferenceCompositeOp: return(image->endian == LSBEndian ? "ffid" : "diff"); case DissolveCompositeOp: return(image->endian == LSBEndian ? "ssid" : "diss"); case ExclusionCompositeOp: return(image->endian == LSBEndian ? "dums" : "smud"); case HardLightCompositeOp: return(image->endian == LSBEndian ? "tiLh" : "hLit"); case HardMixCompositeOp: return(image->endian == LSBEndian ? "xiMh" : "hMix"); case HueCompositeOp: return(image->endian == LSBEndian ? " euh" : "hue "); case LightenCompositeOp: return(image->endian == LSBEndian ? "etil" : "lite"); case LinearBurnCompositeOp: return(image->endian == LSBEndian ? "nrbl" : "lbrn"); case LinearDodgeCompositeOp: return(image->endian == LSBEndian ? "gddl" : "lddg"); case LinearLightCompositeOp: return(image->endian == LSBEndian ? "tiLl" : "lLit"); case LuminizeCompositeOp: return(image->endian == LSBEndian ? " mul" : "lum "); case MultiplyCompositeOp: return(image->endian == LSBEndian ? " lum" : "mul "); case OverlayCompositeOp: return(image->endian == LSBEndian ? "revo" : "over"); case PinLightCompositeOp: return(image->endian == LSBEndian ? "tiLp" : "pLit"); case SaturateCompositeOp: return(image->endian == LSBEndian ? " tas" : "sat "); case ScreenCompositeOp: return(image->endian == LSBEndian ? "nrcs" : "scrn"); case SoftLightCompositeOp: return(image->endian == LSBEndian ? "tiLs" : "sLit"); case VividLightCompositeOp: return(image->endian == LSBEndian ? "tiLv" : "vLit"); case OverCompositeOp: default: return(image->endian == LSBEndian ? "mron" : "norm"); } } /* For some reason Photoshop seems to blend semi-transparent pixels with white. This method reverts the blending. This can be disabled by setting the option 'psd:alpha-unblend' to off. */ static MagickBooleanType CorrectPSDAlphaBlend(const ImageInfo *image_info, Image *image,ExceptionInfo* exception) { const char *option; MagickBooleanType status; ssize_t y; if (image->alpha_trait != BlendPixelTrait || image->colorspace != sRGBColorspace) return(MagickTrue); option=GetImageOption(image_info,"psd:alpha-unblend"); if (IsStringFalse(option) != MagickFalse) return(MagickTrue); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double gamma; register ssize_t i; gamma=QuantumScale*GetPixelAlpha(image, q); if (gamma != 0.0 && gamma != 1.0) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); if (channel != AlphaPixelChannel) q[i]=ClampToQuantum((q[i]-((1.0-gamma)*QuantumRange))/gamma); } } q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static inline CompressionType ConvertPSDCompression( PSDCompressionType compression) { switch (compression) { case RLE: return RLECompression; case ZipWithPrediction: case ZipWithoutPrediction: return ZipCompression; default: return NoCompression; } } static MagickBooleanType ApplyPSDLayerOpacity(Image *image,Quantum opacity, MagickBooleanType revert,ExceptionInfo *exception) { MagickBooleanType status; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying layer opacity %.20g", (double) opacity); if (opacity == OpaqueAlpha) return(MagickTrue); if (image->alpha_trait != BlendPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); status=MagickTrue; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (revert == MagickFalse) SetPixelAlpha(image,(Quantum) (QuantumScale*(GetPixelAlpha(image,q))* opacity),q); else if (opacity > 0) SetPixelAlpha(image,(Quantum) (QuantumRange*(GetPixelAlpha(image,q)/ (MagickRealType) opacity)),q); q+=GetPixelChannels(image); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } return(status); } static MagickBooleanType ApplyPSDOpacityMask(Image *image,const Image *mask, Quantum background,MagickBooleanType revert,ExceptionInfo *exception) { Image *complete_mask; MagickBooleanType status; PixelInfo color; ssize_t y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " applying opacity mask"); complete_mask=CloneImage(image,0,0,MagickTrue,exception); if (complete_mask == (Image *) NULL) return(MagickFalse); complete_mask->alpha_trait=BlendPixelTrait; GetPixelInfo(complete_mask,&color); color.red=background; SetImageColor(complete_mask,&color,exception); status=CompositeImage(complete_mask,mask,OverCompositeOp,MagickTrue, mask->page.x-image->page.x,mask->page.y-image->page.y,exception); if (status == MagickFalse) { complete_mask=DestroyImage(complete_mask); return(status); } image->alpha_trait=BlendPixelTrait; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register Quantum *p; register ssize_t x; if (status == MagickFalse) continue; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); p=GetAuthenticPixels(complete_mask,0,y,complete_mask->columns,1,exception); if ((q == (Quantum *) NULL) || (p == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType alpha, intensity; alpha=GetPixelAlpha(image,q); intensity=GetPixelIntensity(complete_mask,p); if (revert == MagickFalse) SetPixelAlpha(image,ClampToQuantum(intensity*(QuantumScale*alpha)),q); else if (intensity > 0) SetPixelAlpha(image,ClampToQuantum((alpha/intensity)*QuantumRange),q); q+=GetPixelChannels(image); p+=GetPixelChannels(complete_mask); } if (SyncAuthenticPixels(image,exception) == MagickFalse) status=MagickFalse; } complete_mask=DestroyImage(complete_mask); return(status); } static void PreservePSDOpacityMask(Image *image,LayerInfo* layer_info, ExceptionInfo *exception) { char *key; RandomInfo *random_info; StringInfo *key_info; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " preserving opacity mask"); random_info=AcquireRandomInfo(); key_info=GetRandomKey(random_info,2+1); key=(char *) GetStringInfoDatum(key_info); key[8]=layer_info->mask.background; key[9]='\0'; layer_info->mask.image->page.x+=layer_info->page.x; layer_info->mask.image->page.y+=layer_info->page.y; (void) SetImageRegistry(ImageRegistryType,(const char *) key, layer_info->mask.image,exception); (void) SetImageArtifact(layer_info->image,"psd:opacity-mask", (const char *) key); key_info=DestroyStringInfo(key_info); random_info=DestroyRandomInfo(random_info); } static ssize_t DecodePSDPixels(const size_t number_compact_pixels, const unsigned char *compact_pixels,const ssize_t depth, const size_t number_pixels,unsigned char *pixels) { #define CheckNumberCompactPixels \ if (packets == 0) \ return(i); \ packets-- #define CheckNumberPixels(count) \ if (((ssize_t) i + count) > (ssize_t) number_pixels) \ return(i); \ i+=count int pixel; register ssize_t i, j; size_t length; ssize_t packets; packets=(ssize_t) number_compact_pixels; for (i=0; (packets > 1) && (i < (ssize_t) number_pixels); ) { packets--; length=(size_t) (*compact_pixels++); if (length == 128) continue; if (length > 128) { length=256-length+1; CheckNumberCompactPixels; pixel=(*compact_pixels++); for (j=0; j < (ssize_t) length; j++) { switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(pixel >> 7) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 6) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 5) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 4) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 3) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 2) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 1) & 0x01 ? 0U : 255U; *pixels++=(pixel >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(unsigned char) ((pixel >> 6) & 0x03); *pixels++=(unsigned char) ((pixel >> 4) & 0x03); *pixels++=(unsigned char) ((pixel >> 2) & 0x03); *pixels++=(unsigned char) ((pixel & 0x03) & 0x03); break; } case 4: { CheckNumberPixels(2); *pixels++=(unsigned char) ((pixel >> 4) & 0xff); *pixels++=(unsigned char) ((pixel & 0x0f) & 0xff); break; } default: { CheckNumberPixels(1); *pixels++=(unsigned char) pixel; break; } } } continue; } length++; for (j=0; j < (ssize_t) length; j++) { CheckNumberCompactPixels; switch (depth) { case 1: { CheckNumberPixels(8); *pixels++=(*compact_pixels >> 7) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 6) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 5) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 4) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 3) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 2) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 1) & 0x01 ? 0U : 255U; *pixels++=(*compact_pixels >> 0) & 0x01 ? 0U : 255U; break; } case 2: { CheckNumberPixels(4); *pixels++=(*compact_pixels >> 6) & 0x03; *pixels++=(*compact_pixels >> 4) & 0x03; *pixels++=(*compact_pixels >> 2) & 0x03; *pixels++=(*compact_pixels & 0x03) & 0x03; break; } case 4: { CheckNumberPixels(2); *pixels++=(*compact_pixels >> 4) & 0xff; *pixels++=(*compact_pixels & 0x0f) & 0xff; break; } default: { CheckNumberPixels(1); *pixels++=(*compact_pixels); break; } } compact_pixels++; } } return(i); } static inline LayerInfo *DestroyLayerInfo(LayerInfo *layer_info, const ssize_t number_layers) { ssize_t i; for (i=0; i<number_layers; i++) { if (layer_info[i].image != (Image *) NULL) layer_info[i].image=DestroyImage(layer_info[i].image); if (layer_info[i].mask.image != (Image *) NULL) layer_info[i].mask.image=DestroyImage(layer_info[i].mask.image); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); } return (LayerInfo *) RelinquishMagickMemory(layer_info); } static inline size_t GetPSDPacketSize(const Image *image) { if (image->storage_class == PseudoClass) { if (image->colors > 256) return(2); } if (image->depth > 16) return(4); if (image->depth > 8) return(2); return(1); } static inline MagickSizeType GetPSDSize(const PSDInfo *psd_info,Image *image) { if (psd_info->version == 1) return((MagickSizeType) ReadBlobLong(image)); return((MagickSizeType) ReadBlobLongLong(image)); } static inline size_t GetPSDRowSize(Image *image) { if (image->depth == 1) return(((image->columns+7)/8)*GetPSDPacketSize(image)); else return(image->columns*GetPSDPacketSize(image)); } static const char *ModeToString(PSDImageType type) { switch (type) { case BitmapMode: return "Bitmap"; case GrayscaleMode: return "Grayscale"; case IndexedMode: return "Indexed"; case RGBMode: return "RGB"; case CMYKMode: return "CMYK"; case MultichannelMode: return "Multichannel"; case DuotoneMode: return "Duotone"; case LabMode: return "L*A*B"; default: return "unknown"; } } static MagickBooleanType NegateCMYK(Image *image,ExceptionInfo *exception) { ChannelType channel_mask; MagickBooleanType status; channel_mask=SetImageChannelMask(image,(ChannelType)(AllChannels &~ AlphaChannel)); status=NegateImage(image,MagickFalse,exception); (void) SetImageChannelMask(image,channel_mask); return(status); } static StringInfo *ParseImageResourceBlocks(Image *image, const unsigned char *blocks,size_t length, MagickBooleanType *has_merged_image,ExceptionInfo *exception) { const unsigned char *p; StringInfo *profile; unsigned char name_length; unsigned int count; unsigned short id, short_sans; if (length < 16) return((StringInfo *) NULL); profile=BlobToStringInfo((const unsigned char *) NULL,length); SetStringInfoDatum(profile,blocks); SetStringInfoName(profile,"8bim"); for (p=blocks; (p >= blocks) && (p < (blocks+length-7)); ) { if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p+=4; p=PushShortPixel(MSBEndian,p,&id); p=PushCharPixel(p,&name_length); if ((name_length % 2) == 0) name_length++; p+=name_length; if (p > (blocks+length-4)) break; p=PushLongPixel(MSBEndian,p,&count); if ((p+count) > (blocks+length)) break; switch (id) { case 0x03ed: { char value[MagickPathExtent]; unsigned short resolution; /* Resolution info. */ if (count < 16) break; p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.x=(double) resolution; (void) FormatLocaleString(value,MagickPathExtent,"%g", image->resolution.x); (void) SetImageProperty(image,"tiff:XResolution",value,exception); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&resolution); image->resolution.y=(double) resolution; (void) FormatLocaleString(value,MagickPathExtent,"%g", image->resolution.y); (void) SetImageProperty(image,"tiff:YResolution",value,exception); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushShortPixel(MSBEndian,p,&short_sans); image->units=PixelsPerInchResolution; break; } case 0x0421: { if ((count > 4) && (*(p+4) == 0)) *has_merged_image=MagickFalse; p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } return(profile); } static CompositeOperator PSDBlendModeToCompositeOperator(const char *mode) { if (mode == (const char *) NULL) return(OverCompositeOp); if (LocaleNCompare(mode,"norm",4) == 0) return(OverCompositeOp); if (LocaleNCompare(mode,"mul ",4) == 0) return(MultiplyCompositeOp); if (LocaleNCompare(mode,"diss",4) == 0) return(DissolveCompositeOp); if (LocaleNCompare(mode,"diff",4) == 0) return(DifferenceCompositeOp); if (LocaleNCompare(mode,"dark",4) == 0) return(DarkenCompositeOp); if (LocaleNCompare(mode,"lite",4) == 0) return(LightenCompositeOp); if (LocaleNCompare(mode,"hue ",4) == 0) return(HueCompositeOp); if (LocaleNCompare(mode,"sat ",4) == 0) return(SaturateCompositeOp); if (LocaleNCompare(mode,"colr",4) == 0) return(ColorizeCompositeOp); if (LocaleNCompare(mode,"lum ",4) == 0) return(LuminizeCompositeOp); if (LocaleNCompare(mode,"scrn",4) == 0) return(ScreenCompositeOp); if (LocaleNCompare(mode,"over",4) == 0) return(OverlayCompositeOp); if (LocaleNCompare(mode,"hLit",4) == 0) return(HardLightCompositeOp); if (LocaleNCompare(mode,"sLit",4) == 0) return(SoftLightCompositeOp); if (LocaleNCompare(mode,"smud",4) == 0) return(ExclusionCompositeOp); if (LocaleNCompare(mode,"div ",4) == 0) return(ColorDodgeCompositeOp); if (LocaleNCompare(mode,"idiv",4) == 0) return(ColorBurnCompositeOp); if (LocaleNCompare(mode,"lbrn",4) == 0) return(LinearBurnCompositeOp); if (LocaleNCompare(mode,"lddg",4) == 0) return(LinearDodgeCompositeOp); if (LocaleNCompare(mode,"lLit",4) == 0) return(LinearLightCompositeOp); if (LocaleNCompare(mode,"vLit",4) == 0) return(VividLightCompositeOp); if (LocaleNCompare(mode,"pLit",4) == 0) return(PinLightCompositeOp); if (LocaleNCompare(mode,"hMix",4) == 0) return(HardMixCompositeOp); return(OverCompositeOp); } static inline void ReversePSDString(Image *image,char *p,size_t length) { char *q; if (image->endian == MSBEndian) return; q=p+length; for(--q; p < q; ++p, --q) { *p = *p ^ *q, *q = *p ^ *q, *p = *p ^ *q; } } static inline void SetPSDPixel(Image *image,const size_t channels, const ssize_t type,const size_t packet_size,const Quantum pixel,Quantum *q, ExceptionInfo *exception) { if (image->storage_class == PseudoClass) { PixelInfo *color; if (type == 0) { if (packet_size == 1) SetPixelIndex(image,ScaleQuantumToChar(pixel),q); else SetPixelIndex(image,ScaleQuantumToShort(pixel),q); } color=image->colormap+(ssize_t) ConstrainColormapIndex(image, GetPixelIndex(image,q),exception); if ((type == 0) && (channels > 1)) return; else color->alpha=(MagickRealType) pixel; SetPixelViaPixelInfo(image,color,q); return; } switch (type) { case -1: { SetPixelAlpha(image,pixel,q); break; } case -2: case 0: { SetPixelRed(image,pixel,q); break; } case -3: case 1: { SetPixelGreen(image,pixel,q); break; } case -4: case 2: { SetPixelBlue(image,pixel,q); break; } case 3: { if (image->colorspace == CMYKColorspace) SetPixelBlack(image,pixel,q); else if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } case 4: { if ((IssRGBCompatibleColorspace(image->colorspace) != MagickFalse) && (channels > 3)) break; if (image->alpha_trait != UndefinedPixelTrait) SetPixelAlpha(image,pixel,q); break; } } } static MagickBooleanType ReadPSDChannelPixels(Image *image, const size_t channels,const size_t row,const ssize_t type, const unsigned char *pixels,ExceptionInfo *exception) { Quantum pixel; register const unsigned char *p; register Quantum *q; register ssize_t x; size_t packet_size; p=pixels; q=GetAuthenticPixels(image,0,row,image->columns,1,exception); if (q == (Quantum *) NULL) return MagickFalse; packet_size=GetPSDPacketSize(image); for (x=0; x < (ssize_t) image->columns; x++) { if (packet_size == 1) pixel=ScaleCharToQuantum(*p++); else if (packet_size == 2) { unsigned short nibble; p=PushShortPixel(MSBEndian,p,&nibble); pixel=ScaleShortToQuantum(nibble); } else { MagickFloatType nibble; p=PushFloatPixel(MSBEndian,p,&nibble); pixel=ClampToQuantum((MagickRealType)QuantumRange*nibble); } if (image->depth > 1) { SetPSDPixel(image,channels,type,packet_size,pixel,q,exception); q+=GetPixelChannels(image); } else { ssize_t bit, number_bits; number_bits=image->columns-x; if (number_bits > 8) number_bits=8; for (bit = 0; bit < number_bits; bit++) { SetPSDPixel(image,channels,type,packet_size,(((unsigned char) pixel) & (0x01 << (7-bit))) != 0 ? 0 : QuantumRange,q,exception); q+=GetPixelChannels(image); x++; } if (x != (ssize_t) image->columns) x--; continue; } } return(SyncAuthenticPixels(image,exception)); } static MagickBooleanType ReadPSDChannelRaw(Image *image,const size_t channels, const ssize_t type,ExceptionInfo *exception) { MagickBooleanType status; size_t count, row_size; ssize_t y; unsigned char *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RAW"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,row_size,pixels); if (count != row_size) { status=MagickFalse; break; } status=ReadPSDChannelPixels(image,channels,y,type,pixels,exception); if (status == MagickFalse) break; } pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } static inline MagickOffsetType *ReadPSDRLESizes(Image *image, const PSDInfo *psd_info,const size_t size) { MagickOffsetType *sizes; ssize_t y; sizes=(MagickOffsetType *) AcquireQuantumMemory(size,sizeof(*sizes)); if(sizes != (MagickOffsetType *) NULL) { for (y=0; y < (ssize_t) size; y++) { if (psd_info->version == 1) sizes[y]=(MagickOffsetType) ReadBlobShort(image); else sizes[y]=(MagickOffsetType) ReadBlobLong(image); } } return sizes; } static MagickBooleanType ReadPSDChannelRLE(Image *image,const PSDInfo *psd_info, const ssize_t type,MagickOffsetType *sizes,ExceptionInfo *exception) { MagickBooleanType status; size_t length, row_size; ssize_t count, y; unsigned char *compact_pixels, *pixels; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is RLE compressed"); row_size=GetPSDRowSize(image); pixels=(unsigned char *) AcquireQuantumMemory(row_size,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); length=0; for (y=0; y < (ssize_t) image->rows; y++) if ((MagickOffsetType) length < sizes[y]) length=(size_t) sizes[y]; if (length > (row_size+512)) // arbitrary number { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"InvalidLength",image->filename); } compact_pixels=(unsigned char *) AcquireQuantumMemory(length,sizeof(*pixels)); if (compact_pixels == (unsigned char *) NULL) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(compact_pixels,0,length*sizeof(*compact_pixels)); status=MagickTrue; for (y=0; y < (ssize_t) image->rows; y++) { status=MagickFalse; count=ReadBlob(image,(size_t) sizes[y],compact_pixels); if (count != (ssize_t) sizes[y]) break; count=DecodePSDPixels((size_t) sizes[y],compact_pixels, (ssize_t) (image->depth == 1 ? 123456 : image->depth),row_size,pixels); if (count != (ssize_t) row_size) break; status=ReadPSDChannelPixels(image,psd_info->channels,y,type,pixels, exception); if (status == MagickFalse) break; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #ifdef MAGICKCORE_ZLIB_DELEGATE static MagickBooleanType ReadPSDChannelZip(Image *image,const size_t channels, const ssize_t type,const PSDCompressionType compression, const size_t compact_size,ExceptionInfo *exception) { MagickBooleanType status; register unsigned char *p; size_t count, length, packet_size, row_size; ssize_t y; unsigned char *compact_pixels, *pixels; z_stream stream; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is ZIP compressed"); if ((MagickSizeType) compact_size > GetBlobSize(image)) ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); compact_pixels=(unsigned char *) AcquireQuantumMemory(compact_size, sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); packet_size=GetPSDPacketSize(image); row_size=image->columns*packet_size; count=image->rows*row_size; pixels=(unsigned char *) AcquireQuantumMemory(count,sizeof(*pixels)); if (pixels == (unsigned char *) NULL) { compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (ReadBlob(image,compact_size,compact_pixels) != (ssize_t) compact_size) { pixels=(unsigned char *) RelinquishMagickMemory(pixels); compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); ThrowBinaryException(CorruptImageError,"UnexpectedEndOfFile", image->filename); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; stream.next_in=(Bytef *)compact_pixels; stream.avail_in=(uInt) compact_size; stream.next_out=(Bytef *)pixels; stream.avail_out=(uInt) count; if (inflateInit(&stream) == Z_OK) { int ret; while (stream.avail_out > 0) { ret=inflate(&stream,Z_SYNC_FLUSH); if ((ret != Z_OK) && (ret != Z_STREAM_END)) { (void) inflateEnd(&stream); compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(MagickFalse); } if (ret == Z_STREAM_END) break; } (void) inflateEnd(&stream); } if (compression == ZipWithPrediction) { p=pixels; while (count > 0) { length=image->columns; while (--length) { if (packet_size == 2) { p[2]+=p[0]+((p[1]+p[3]) >> 8); p[3]+=p[1]; } // else if (packet_size == 4) // { // TODO: Figure out what to do there. // } else *(p+1)+=*p; p+=packet_size; } p+=packet_size; count-=row_size; } } status=MagickTrue; p=pixels; for (y=0; y < (ssize_t) image->rows; y++) { status=ReadPSDChannelPixels(image,channels,y,type,p,exception); if (status == MagickFalse) break; p+=row_size; } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); pixels=(unsigned char *) RelinquishMagickMemory(pixels); return(status); } #endif static MagickBooleanType ReadPSDChannel(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,LayerInfo* layer_info, const size_t channel,const PSDCompressionType compression, ExceptionInfo *exception) { Image *channel_image, *mask; MagickOffsetType offset; MagickBooleanType status; channel_image=image; mask=(Image *) NULL; if ((layer_info->channel_info[channel].type < -1) && (layer_info->mask.page.width > 0) && (layer_info->mask.page.height > 0)) { const char *option; /* Ignore mask that is not a user supplied layer mask, if the mask is disabled or if the flags have unsupported values. */ option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if ((layer_info->channel_info[channel].type != -2) || (layer_info->mask.flags > 2) || ((layer_info->mask.flags & 0x02) && (IsStringTrue(option) == MagickFalse))) { SeekBlob(image,layer_info->channel_info[channel].size-2,SEEK_CUR); return(MagickTrue); } mask=CloneImage(image,layer_info->mask.page.width, layer_info->mask.page.height,MagickFalse,exception); if (mask != (Image *) NULL) { SetImageType(mask,GrayscaleType,exception); channel_image=mask; } } offset=TellBlob(image); status=MagickFalse; switch(compression) { case Raw: status=ReadPSDChannelRaw(channel_image,psd_info->channels, layer_info->channel_info[channel].type,exception); break; case RLE: { MagickOffsetType *sizes; sizes=ReadPSDRLESizes(channel_image,psd_info,channel_image->rows); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=ReadPSDChannelRLE(channel_image,psd_info, layer_info->channel_info[channel].type,sizes,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); } break; case ZipWithPrediction: case ZipWithoutPrediction: #ifdef MAGICKCORE_ZLIB_DELEGATE status=ReadPSDChannelZip(channel_image,layer_info->channels, layer_info->channel_info[channel].type,compression, layer_info->channel_info[channel].size-2,exception); #else (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (ZLIB)",image->filename); #endif break; default: (void) ThrowMagickException(exception,GetMagickModule(),TypeWarning, "CompressionNotSupported","'%.20g'",(double) compression); break; } SeekBlob(image,offset+layer_info->channel_info[channel].size-2,SEEK_SET); if (status == MagickFalse) { if (mask != (Image *) NULL) DestroyImage(mask); ThrowBinaryException(CoderError,"UnableToDecompressImage", image->filename); } if (mask != (Image *) NULL) { if (layer_info->mask.image != (Image *) NULL) layer_info->mask.image=DestroyImage(layer_info->mask.image); layer_info->mask.image=mask; } return(status); } static MagickBooleanType ReadPSDLayer(Image *image,const ImageInfo *image_info, const PSDInfo *psd_info,LayerInfo* layer_info,ExceptionInfo *exception) { char message[MagickPathExtent]; MagickBooleanType status; PSDCompressionType compression; ssize_t j; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " setting up new layer image"); if (psd_info->mode != IndexedMode) (void) SetImageBackgroundColor(layer_info->image,exception); layer_info->image->compose=PSDBlendModeToCompositeOperator( layer_info->blendkey); if (layer_info->visible == MagickFalse) layer_info->image->compose=NoCompositeOp; /* Set up some hidden attributes for folks that need them. */ (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.x); (void) SetImageArtifact(layer_info->image,"psd:layer.x",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g", (double) layer_info->page.y); (void) SetImageArtifact(layer_info->image,"psd:layer.y",message); (void) FormatLocaleString(message,MagickPathExtent,"%.20g",(double) layer_info->opacity); (void) SetImageArtifact(layer_info->image,"psd:layer.opacity",message); (void) SetImageProperty(layer_info->image,"label",(char *) layer_info->name, exception); status=MagickTrue; for (j=0; j < (ssize_t) layer_info->channels; j++) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for channel %.20g",(double) j); compression=(PSDCompressionType) ReadBlobShort(layer_info->image); /* TODO: Remove this when we figure out how to support this */ if ((compression == ZipWithPrediction) && (image->depth == 32)) { (void) ThrowMagickException(exception,GetMagickModule(), TypeError,"CompressionNotSupported","ZipWithPrediction(32 bit)"); return(MagickFalse); } layer_info->image->compression=ConvertPSDCompression(compression); if (layer_info->channel_info[j].type == -1) layer_info->image->alpha_trait=BlendPixelTrait; status=ReadPSDChannel(layer_info->image,image_info,psd_info,layer_info,j, compression,exception); if (status == MagickFalse) break; } if (status != MagickFalse) status=ApplyPSDLayerOpacity(layer_info->image,layer_info->opacity, MagickFalse,exception); if ((status != MagickFalse) && (layer_info->image->colorspace == CMYKColorspace)) status=NegateCMYK(layer_info->image,exception); if ((status != MagickFalse) && (layer_info->mask.image != (Image *) NULL)) { const char *option; layer_info->mask.image->page.x=layer_info->mask.page.x; layer_info->mask.image->page.y=layer_info->mask.page.y; /* Do not composite the mask when it is disabled */ if ((layer_info->mask.flags & 0x02) == 0x02) layer_info->mask.image->compose=NoCompositeOp; else status=ApplyPSDOpacityMask(layer_info->image,layer_info->mask.image, layer_info->mask.background == 0 ? 0 : QuantumRange,MagickFalse, exception); option=GetImageOption(image_info,"psd:preserve-opacity-mask"); if (IsStringTrue(option) != MagickFalse) PreservePSDOpacityMask(image,layer_info,exception); layer_info->mask.image=DestroyImage(layer_info->mask.image); } return(status); } static MagickBooleanType CheckPSDChannels(const PSDInfo *psd_info, LayerInfo *layer_info) { int channel_type; register ssize_t i; if (layer_info->channels < psd_info->min_channels) return(MagickFalse); channel_type=RedChannel; if (psd_info->min_channels >= 3) channel_type|=(GreenChannel | BlueChannel); if (psd_info->min_channels >= 4) channel_type|=BlackChannel; for (i=0; i < layer_info->channels; i++) { short type; type=layer_info->channel_info[i].type; if (type == -1) { channel_type|=AlphaChannel; continue; } if (type < -1) continue; if (type == 0) channel_type&=~RedChannel; else if (type == 1) channel_type&=~GreenChannel; else if (type == 2) channel_type&=~BlueChannel; else if (type == 3) channel_type&=~BlackChannel; } if (channel_type == 0) return(MagickTrue); if ((channel_type == AlphaChannel) && (layer_info->channels >= psd_info->min_channels + 1)) return(MagickTrue); return(MagickFalse); } static MagickBooleanType ReadPSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info, const MagickBooleanType skip_layers,ExceptionInfo *exception) { char type[4]; LayerInfo *layer_info; MagickSizeType size; MagickBooleanType status; register ssize_t i; ssize_t count, j, number_layers; size=GetPSDSize(psd_info,image); if (size == 0) { /* Skip layers & masks. */ (void) ReadBlobLong(image); count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,count); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) return(MagickTrue); else { count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count == 4) && ((LocaleNCompare(type,"Lr16",4) == 0) || (LocaleNCompare(type,"Lr32",4) == 0))) size=GetPSDSize(psd_info,image); else return(MagickTrue); } } status=MagickTrue; if (size != 0) { layer_info=(LayerInfo *) NULL; number_layers=(short) ReadBlobShort(image); if (number_layers < 0) { /* The first alpha channel in the merged result contains the transparency data for the merged result. */ number_layers=MagickAbsoluteValue(number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " negative layer count corrected for"); image->alpha_trait=BlendPixelTrait; } /* We only need to know if the image has an alpha channel */ if (skip_layers != MagickFalse) return(MagickTrue); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image contains %.20g layers",(double) number_layers); if (number_layers == 0) ThrowBinaryException(CorruptImageError,"InvalidNumberOfLayers", image->filename); layer_info=(LayerInfo *) AcquireQuantumMemory((size_t) number_layers, sizeof(*layer_info)); if (layer_info == (LayerInfo *) NULL) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of LayerInfo failed"); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } (void) memset(layer_info,0,(size_t) number_layers* sizeof(*layer_info)); for (i=0; i < number_layers; i++) { ssize_t x, y; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading layer #%.20g",(double) i+1); layer_info[i].page.y=ReadBlobSignedLong(image); layer_info[i].page.x=ReadBlobSignedLong(image); y=ReadBlobSignedLong(image); x=ReadBlobSignedLong(image); layer_info[i].page.width=(size_t) (x-layer_info[i].page.x); layer_info[i].page.height=(size_t) (y-layer_info[i].page.y); layer_info[i].channels=ReadBlobShort(image); if (layer_info[i].channels > MaxPSDChannels) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"MaximumChannelsExceeded", image->filename); } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " offset(%.20g,%.20g), size(%.20g,%.20g), channels=%.20g", (double) layer_info[i].page.x,(double) layer_info[i].page.y, (double) layer_info[i].page.height,(double) layer_info[i].page.width,(double) layer_info[i].channels); for (j=0; j < (ssize_t) layer_info[i].channels; j++) { layer_info[i].channel_info[j].type=(short) ReadBlobShort(image); if ((layer_info[i].channel_info[j].type < -4) || (layer_info[i].channel_info[j].type > 4)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"NoSuchImageChannel", image->filename); } layer_info[i].channel_info[j].size=(size_t) GetPSDSize(psd_info, image); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " channel[%.20g]: type=%.20g, size=%.20g",(double) j, (double) layer_info[i].channel_info[j].type, (double) layer_info[i].channel_info[j].size); } if (CheckPSDChannels(psd_info,&layer_info[i]) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) type); if (count == 4) ReversePSDString(image,type,4); if ((count != 4) || (LocaleNCompare(type,"8BIM",4) != 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer type was %.4s instead of 8BIM", type); layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } count=ReadBlob(image,4,(unsigned char *) layer_info[i].blendkey); if (count != 4) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError,"ImproperImageHeader", image->filename); } ReversePSDString(image,layer_info[i].blendkey,4); layer_info[i].opacity=(Quantum) ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); layer_info[i].clipping=(unsigned char) ReadBlobByte(image); layer_info[i].flags=(unsigned char) ReadBlobByte(image); layer_info[i].visible=!(layer_info[i].flags & 0x02); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " blend=%.4s, opacity=%.20g, clipping=%s, flags=%d, visible=%s", layer_info[i].blendkey,(double) layer_info[i].opacity, layer_info[i].clipping ? "true" : "false",layer_info[i].flags, layer_info[i].visible ? "true" : "false"); (void) ReadBlobByte(image); /* filler */ size=ReadBlobLong(image); if (size != 0) { MagickSizeType combined_length, length; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer contains additional info"); length=ReadBlobLong(image); combined_length=length+4; if (length != 0) { /* Layer mask info. */ layer_info[i].mask.page.y=ReadBlobSignedLong(image); layer_info[i].mask.page.x=ReadBlobSignedLong(image); layer_info[i].mask.page.height=(size_t) (ReadBlobSignedLong(image)- layer_info[i].mask.page.y); layer_info[i].mask.page.width=(size_t) (ReadBlobSignedLong(image)- layer_info[i].mask.page.x); layer_info[i].mask.background=(unsigned char) ReadBlobByte( image); layer_info[i].mask.flags=(unsigned char) ReadBlobByte(image); if (!(layer_info[i].mask.flags & 0x01)) { layer_info[i].mask.page.y=layer_info[i].mask.page.y- layer_info[i].page.y; layer_info[i].mask.page.x=layer_info[i].mask.page.x- layer_info[i].page.x; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer mask: offset(%.20g,%.20g), size(%.20g,%.20g), length=%.20g", (double) layer_info[i].mask.page.x,(double) layer_info[i].mask.page.y,(double) layer_info[i].mask.page.width,(double) layer_info[i].mask.page.height,(double) ((MagickOffsetType) length)-18); /* Skip over the rest of the layer mask information. */ if (DiscardBlobBytes(image,(MagickSizeType) (length-18)) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=ReadBlobLong(image); combined_length+=length+4; if (length != 0) { /* Layer blending ranges info. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer blending ranges: length=%.20g",(double) ((MagickOffsetType) length)); if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } /* Layer name. */ length=(MagickSizeType) (unsigned char) ReadBlobByte(image); combined_length+=length+1; if (length > 0) (void) ReadBlob(image,(size_t) length++,layer_info[i].name); layer_info[i].name[length]='\0'; if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer name: %s",layer_info[i].name); if ((length % 4) != 0) { length=4-(length % 4); combined_length+=length; /* Skip over the padding of the layer name */ if (DiscardBlobBytes(image,length) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } length=(MagickSizeType) size-combined_length; if (length > 0) { unsigned char *info; if (length > GetBlobSize(image)) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "InsufficientImageDataInFile",image->filename); } layer_info[i].info=AcquireStringInfo((const size_t) length); info=GetStringInfoDatum(layer_info[i].info); (void) ReadBlob(image,(const size_t) length,info); } } } for (i=0; i < number_layers; i++) { if ((layer_info[i].page.width == 0) || (layer_info[i].page.height == 0)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " layer data is empty"); if (layer_info[i].info != (StringInfo *) NULL) layer_info[i].info=DestroyStringInfo(layer_info[i].info); continue; } /* Allocate layered image. */ layer_info[i].image=CloneImage(image,layer_info[i].page.width, layer_info[i].page.height,MagickFalse,exception); if (layer_info[i].image == (Image *) NULL) { layer_info=DestroyLayerInfo(layer_info,number_layers); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " allocation of image for layer %.20g failed",(double) i); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } if (layer_info[i].info != (StringInfo *) NULL) { (void) SetImageProfile(layer_info[i].image,"psd:additional-info", layer_info[i].info,exception); layer_info[i].info=DestroyStringInfo(layer_info[i].info); } } if (image_info->ping == MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=0; j < layer_info[i].channels; j++) { if (DiscardBlobBytes(image,(MagickSizeType) layer_info[i].channel_info[j].size) == MagickFalse) { layer_info=DestroyLayerInfo(layer_info,number_layers); ThrowBinaryException(CorruptImageError, "UnexpectedEndOfFile",image->filename); } } continue; } if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading data for layer %.20g",(double) i); status=ReadPSDLayer(image,image_info,psd_info,&layer_info[i], exception); if (status == MagickFalse) break; status=SetImageProgress(image,LoadImagesTag,i,(MagickSizeType) number_layers); if (status == MagickFalse) break; } } if (status != MagickFalse) { for (i=0; i < number_layers; i++) { if (layer_info[i].image == (Image *) NULL) { for (j=i; j < number_layers - 1; j++) layer_info[j] = layer_info[j+1]; number_layers--; i--; } } if (number_layers > 0) { for (i=0; i < number_layers; i++) { if (i > 0) layer_info[i].image->previous=layer_info[i-1].image; if (i < (number_layers-1)) layer_info[i].image->next=layer_info[i+1].image; layer_info[i].image->page=layer_info[i].page; } image->next=layer_info[0].image; layer_info[0].image->previous=image; } layer_info=(LayerInfo *) RelinquishMagickMemory(layer_info); } else layer_info=DestroyLayerInfo(layer_info,number_layers); } return(status); } ModuleExport MagickBooleanType ReadPSDLayers(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=ReadPolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return(ReadPSDLayersInternal(image,image_info,psd_info,MagickFalse, exception)); } static MagickBooleanType ReadPSDMergedImage(const ImageInfo *image_info, Image *image,const PSDInfo *psd_info,ExceptionInfo *exception) { MagickOffsetType *sizes; MagickBooleanType status; PSDCompressionType compression; register ssize_t i; compression=(PSDCompressionType) ReadBlobMSBShort(image); image->compression=ConvertPSDCompression(compression); if (compression != Raw && compression != RLE) { (void) ThrowMagickException(exception,GetMagickModule(), TypeWarning,"CompressionNotSupported","'%.20g'",(double) compression); return(MagickFalse); } sizes=(MagickOffsetType *) NULL; if (compression == RLE) { sizes=ReadPSDRLESizes(image,psd_info,image->rows*psd_info->channels); if (sizes == (MagickOffsetType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } status=MagickTrue; for (i=0; i < (ssize_t) psd_info->channels; i++) { ssize_t type; type=i; if ((type == 1) && (psd_info->channels == 2)) type=-1; if (compression == RLE) status=ReadPSDChannelRLE(image,psd_info,type,sizes+(i*image->rows), exception); else status=ReadPSDChannelRaw(image,psd_info->channels,type,exception); if (status != MagickFalse) status=SetImageProgress(image,LoadImagesTag,i,psd_info->channels); if (status == MagickFalse) break; } if ((status != MagickFalse) && (image->colorspace == CMYKColorspace)) status=NegateCMYK(image,exception); if (status != MagickFalse) status=CorrectPSDAlphaBlend(image_info,image,exception); sizes=(MagickOffsetType *) RelinquishMagickMemory(sizes); return(status); } static Image *ReadPSDImage(const ImageInfo *image_info,ExceptionInfo *exception) { Image *image; MagickBooleanType has_merged_image, skip_layers; MagickOffsetType offset; MagickSizeType length; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t imageListLength; ssize_t count; StringInfo *profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=AcquireImage(image_info,exception); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImageList(image); return((Image *) NULL); } /* Read image header. */ image->endian=MSBEndian; count=ReadBlob(image,4,(unsigned char *) psd_info.signature); psd_info.version=ReadBlobMSBShort(image); if ((count != 4) || (LocaleNCompare(psd_info.signature,"8BPS",4) != 0) || ((psd_info.version != 1) && (psd_info.version != 2))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); (void) ReadBlob(image,6,psd_info.reserved); psd_info.channels=ReadBlobMSBShort(image); if (psd_info.channels < 1) ThrowReaderException(CorruptImageError,"MissingImageChannel"); if (psd_info.channels > MaxPSDChannels) ThrowReaderException(CorruptImageError,"MaximumChannelsExceeded"); psd_info.rows=ReadBlobMSBLong(image); psd_info.columns=ReadBlobMSBLong(image); if ((psd_info.version == 1) && ((psd_info.rows > 30000) || (psd_info.columns > 30000))) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.depth=ReadBlobMSBShort(image); if ((psd_info.depth != 1) && (psd_info.depth != 8) && (psd_info.depth != 16) && (psd_info.depth != 32)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); psd_info.mode=ReadBlobMSBShort(image); if ((psd_info.mode == IndexedMode) && (psd_info.channels > 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image is %.20g x %.20g with channels=%.20g, depth=%.20g, mode=%s", (double) psd_info.columns,(double) psd_info.rows,(double) psd_info.channels,(double) psd_info.depth,ModeToString((PSDImageType) psd_info.mode)); if (EOFBlob(image) != MagickFalse) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Initialize image. */ image->depth=psd_info.depth; image->columns=psd_info.columns; image->rows=psd_info.rows; status=SetImageExtent(image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImageList(image)); status=ResetImagePixels(image,exception); if (status == MagickFalse) return(DestroyImageList(image)); psd_info.min_channels=3; if (psd_info.mode == LabMode) SetImageColorspace(image,LabColorspace,exception); if (psd_info.mode == CMYKMode) { psd_info.min_channels=4; SetImageColorspace(image,CMYKColorspace,exception); if (psd_info.channels > 4) SetImageAlphaChannel(image,ActivateAlphaChannel,exception); } else if ((psd_info.mode == BitmapMode) || (psd_info.mode == GrayscaleMode) || (psd_info.mode == DuotoneMode)) { if (psd_info.depth != 32) { status=AcquireImageColormap(image,psd_info.depth < 16 ? 256 : 65536, exception); if (status == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " Image colormap allocated"); } psd_info.min_channels=1; SetImageColorspace(image,GRAYColorspace,exception); if (psd_info.channels > 1) SetImageAlphaChannel(image,ActivateAlphaChannel,exception); } else if (psd_info.channels > 3) SetImageAlphaChannel(image,ActivateAlphaChannel,exception); if (psd_info.channels < psd_info.min_channels) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); /* Read PSD raster colormap only present for indexed and duotone images. */ length=ReadBlobMSBLong(image); if ((psd_info.mode == IndexedMode) && (length < 3)) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (length != 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading colormap"); if ((psd_info.mode == DuotoneMode) || (psd_info.depth == 32)) { /* Duotone image data; the format of this data is undocumented. 32 bits per pixel; the colormap is ignored. */ (void) SeekBlob(image,(const MagickOffsetType) length,SEEK_CUR); } else { size_t number_colors; /* Read PSD raster colormap. */ number_colors=length/3; if (number_colors > 65536) ThrowReaderException(CorruptImageError,"ImproperImageHeader"); if (AcquireImageColormap(image,number_colors,exception) == MagickFalse) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].red=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].green=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].blue=ScaleCharToQuantum((unsigned char) ReadBlobByte(image)); image->alpha_trait=UndefinedPixelTrait; } } if ((image->depth == 1) && (image->storage_class != PseudoClass)) ThrowReaderException(CorruptImageError, "ImproperImageHeader"); has_merged_image=MagickTrue; profile=(StringInfo *) NULL; length=ReadBlobMSBLong(image); if (length != 0) { unsigned char *blocks; /* Image resources block. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading image resource blocks - %.20g bytes",(double) ((MagickOffsetType) length)); if (length > GetBlobSize(image)) ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); blocks=(unsigned char *) AcquireQuantumMemory((size_t) length, sizeof(*blocks)); if (blocks == (unsigned char *) NULL) ThrowReaderException(ResourceLimitError,"MemoryAllocationFailed"); count=ReadBlob(image,(size_t) length,blocks); if ((count != (ssize_t) length) || (length < 4) || (LocaleNCompare((char *) blocks,"8BIM",4) != 0)) { blocks=(unsigned char *) RelinquishMagickMemory(blocks); ThrowReaderException(CorruptImageError,"ImproperImageHeader"); } profile=ParseImageResourceBlocks(image,blocks,(size_t) length, &has_merged_image,exception); blocks=(unsigned char *) RelinquishMagickMemory(blocks); } /* Layer and mask block. */ length=GetPSDSize(&psd_info,image); if (length == 8) { length=ReadBlobMSBLong(image); length=ReadBlobMSBLong(image); } offset=TellBlob(image); skip_layers=MagickFalse; if ((image_info->number_scenes == 1) && (image_info->scene == 0) && (has_merged_image != MagickFalse)) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " read composite only"); skip_layers=MagickTrue; } if (length == 0) { if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " image has no layers"); } else { if (ReadPSDLayersInternal(image,image_info,&psd_info,skip_layers, exception) != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } /* Skip the rest of the layer and mask information. */ SeekBlob(image,offset+length,SEEK_SET); } /* If we are only "pinging" the image, then we're done - so return. */ if (EOFBlob(image) != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"UnexpectedEndOfFile"); } if (image_info->ping != MagickFalse) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* Read the precombined layer, present for PSD < 4 compatibility. */ if (image->debug != MagickFalse) (void) LogMagickEvent(CoderEvent,GetMagickModule(), " reading the precombined layer"); imageListLength=GetImageListLength(image); if ((has_merged_image != MagickFalse) || (imageListLength == 1)) has_merged_image=(MagickBooleanType) ReadPSDMergedImage(image_info,image, &psd_info,exception); if ((has_merged_image == MagickFalse) && (imageListLength == 1) && (length != 0)) { SeekBlob(image,offset,SEEK_SET); status=ReadPSDLayersInternal(image,image_info,&psd_info,MagickFalse, exception); if (status != MagickTrue) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); (void) CloseBlob(image); image=DestroyImageList(image); return((Image *) NULL); } } if (has_merged_image == MagickFalse) { Image *merged; if (imageListLength == 1) { if (profile != (StringInfo *) NULL) profile=DestroyStringInfo(profile); ThrowReaderException(CorruptImageError,"InsufficientImageDataInFile"); } image->background_color.alpha=TransparentAlpha; image->background_color.alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(image,exception); merged=MergeImageLayers(image,FlattenLayer,exception); ReplaceImageInList(&image,merged); } if (profile != (StringInfo *) NULL) { (void) SetImageProfile(image,GetStringInfoName(profile),profile, exception); profile=DestroyStringInfo(profile); } (void) CloseBlob(image); return(GetFirstImageInList(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RegisterPSDImage() adds properties for the PSD image format to % the list of supported formats. The properties include the image format % tag, a method to read and/or write the format, whether the format % supports the saving of more than one frame to the same file or blob, % whether the format supports native in-memory I/O, and a brief % description of the format. % % The format of the RegisterPSDImage method is: % % size_t RegisterPSDImage(void) % */ ModuleExport size_t RegisterPSDImage(void) { MagickInfo *entry; entry=AcquireMagickInfo("PSD","PSB","Adobe Large Document Format"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); entry=AcquireMagickInfo("PSD","PSD","Adobe Photoshop bitmap"); entry->decoder=(DecodeImageHandler *) ReadPSDImage; entry->encoder=(EncodeImageHandler *) WritePSDImage; entry->magick=(IsImageFormatHandler *) IsPSD; entry->flags|=CoderDecoderSeekableStreamFlag; entry->flags|=CoderEncoderSeekableStreamFlag; (void) RegisterMagickInfo(entry); return(MagickImageCoderSignature); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n r e g i s t e r P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnregisterPSDImage() removes format registrations made by the % PSD module from the list of supported formats. % % The format of the UnregisterPSDImage method is: % % UnregisterPSDImage(void) % */ ModuleExport void UnregisterPSDImage(void) { (void) UnregisterMagickInfo("PSB"); (void) UnregisterMagickInfo("PSD"); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W r i t e P S D I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WritePSDImage() writes an image in the Adobe Photoshop encoded image format. % % The format of the WritePSDImage method is: % % MagickBooleanType WritePSDImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info. % % o image: The image. % % o exception: return any errors or warnings in this structure. % */ static inline ssize_t SetPSDOffset(const PSDInfo *psd_info,Image *image, const size_t offset) { if (psd_info->version == 1) return(WriteBlobMSBShort(image,(unsigned short) offset)); return(WriteBlobMSBLong(image,(unsigned int) offset)); } static inline ssize_t WritePSDOffset(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickSizeType offset) { MagickSizeType current_offset; ssize_t result; current_offset=TellBlob(image); SeekBlob(image,offset,SEEK_SET); if (psd_info->version == 1) result=WriteBlobMSBShort(image,(unsigned short) size); else result=WriteBlobMSBLong(image,(unsigned int) size); SeekBlob(image,current_offset,SEEK_SET); return(result); } static inline ssize_t SetPSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size) { if (psd_info->version == 1) return(WriteBlobLong(image,(unsigned int) size)); return(WriteBlobLongLong(image,size)); } static inline ssize_t WritePSDSize(const PSDInfo *psd_info,Image *image, const MagickSizeType size,const MagickSizeType offset) { MagickSizeType current_offset; ssize_t result; current_offset=TellBlob(image); SeekBlob(image,offset,SEEK_SET); result=SetPSDSize(psd_info, image, size); SeekBlob(image,current_offset,SEEK_SET); return(result); } static size_t PSDPackbitsEncodeImage(Image *image,const size_t length, const unsigned char *pixels,unsigned char *compact_pixels, ExceptionInfo *exception) { int count; register ssize_t i, j; register unsigned char *q; unsigned char *packbits; /* Compress pixels with Packbits encoding. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pixels != (unsigned char *) NULL); assert(compact_pixels != (unsigned char *) NULL); packbits=(unsigned char *) AcquireQuantumMemory(128UL,sizeof(*packbits)); if (packbits == (unsigned char *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); q=compact_pixels; for (i=(ssize_t) length; i != 0; ) { switch (i) { case 1: { i--; *q++=(unsigned char) 0; *q++=(*pixels); break; } case 2: { i-=2; *q++=(unsigned char) 1; *q++=(*pixels); *q++=pixels[1]; break; } case 3: { i-=3; if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { *q++=(unsigned char) ((256-3)+1); *q++=(*pixels); break; } *q++=(unsigned char) 2; *q++=(*pixels); *q++=pixels[1]; *q++=pixels[2]; break; } default: { if ((*pixels == *(pixels+1)) && (*(pixels+1) == *(pixels+2))) { /* Packed run. */ count=3; while (((ssize_t) count < i) && (*pixels == *(pixels+count))) { count++; if (count >= 127) break; } i-=count; *q++=(unsigned char) ((256-count)+1); *q++=(*pixels); pixels+=count; break; } /* Literal run. */ count=0; while ((*(pixels+count) != *(pixels+count+1)) || (*(pixels+count+1) != *(pixels+count+2))) { packbits[count+1]=pixels[count]; count++; if (((ssize_t) count >= (i-3)) || (count >= 127)) break; } i-=count; *packbits=(unsigned char) (count-1); for (j=0; j <= (ssize_t) count; j++) *q++=packbits[j]; pixels+=count; break; } } } *q++=(unsigned char) 128; /* EOD marker */ packbits=(unsigned char *) RelinquishMagickMemory(packbits); return((size_t) (q-compact_pixels)); } static size_t WriteCompressionStart(const PSDInfo *psd_info,Image *image, const Image *next_image,const CompressionType compression, const ssize_t channels) { size_t length; ssize_t i, y; if (compression == RLECompression) { length=WriteBlobShort(image,RLE); for (i=0; i < channels; i++) for (y=0; y < (ssize_t) next_image->rows; y++) length+=SetPSDOffset(psd_info,image,0); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) length=WriteBlobShort(image,ZipWithoutPrediction); #endif else length=WriteBlobShort(image,Raw); return(length); } static size_t WritePSDChannel(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, const QuantumType quantum_type, unsigned char *compact_pixels, MagickOffsetType size_offset,const MagickBooleanType separate, const CompressionType compression,ExceptionInfo *exception) { int y; MagickBooleanType monochrome; QuantumInfo *quantum_info; register const Quantum *p; register ssize_t i; size_t count, length; unsigned char *pixels; #ifdef MAGICKCORE_ZLIB_DELEGATE #define CHUNK 16384 int flush, level; unsigned char *compressed_pixels; z_stream stream; compressed_pixels=(unsigned char *) NULL; flush=Z_NO_FLUSH; #endif count=0; if (separate != MagickFalse) { size_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression,1); } if (next_image->depth > 8) next_image->depth=16; monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; quantum_info=AcquireQuantumInfo(image_info,next_image); if (quantum_info == (QuantumInfo *) NULL) return(0); pixels=(unsigned char *) GetQuantumPixels(quantum_info); #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { compressed_pixels=(unsigned char *) AcquireQuantumMemory(CHUNK, sizeof(*compressed_pixels)); if (compressed_pixels == (unsigned char *) NULL) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } memset(&stream,0,sizeof(stream)); stream.data_type=Z_BINARY; level=Z_DEFAULT_COMPRESSION; if ((image_info->quality > 0 && image_info->quality < 10)) level=(int) image_info->quality; if (deflateInit(&stream,level) != Z_OK) { quantum_info=DestroyQuantumInfo(quantum_info); return(0); } } #endif for (y=0; y < (ssize_t) next_image->rows; y++) { p=GetVirtualPixels(next_image,0,y,next_image->columns,1,exception); if (p == (const Quantum *) NULL) break; length=ExportQuantumPixels(next_image,(CacheView *) NULL,quantum_info, quantum_type,pixels,exception); if (monochrome != MagickFalse) for (i=0; i < (ssize_t) length; i++) pixels[i]=(~pixels[i]); if (compression == RLECompression) { length=PSDPackbitsEncodeImage(image,length,pixels,compact_pixels, exception); count+=WriteBlob(image,length,compact_pixels); size_offset+=WritePSDOffset(psd_info,image,length,size_offset); } #ifdef MAGICKCORE_ZLIB_DELEGATE else if (compression == ZipCompression) { stream.avail_in=(uInt) length; stream.next_in=(Bytef *) pixels; if (y == (ssize_t) next_image->rows-1) flush=Z_FINISH; do { stream.avail_out=(uInt) CHUNK; stream.next_out=(Bytef *) compressed_pixels; if (deflate(&stream,flush) == Z_STREAM_ERROR) break; length=(size_t) CHUNK-stream.avail_out; if (length > 0) count+=WriteBlob(image,length,compressed_pixels); } while (stream.avail_out == 0); } #endif else count+=WriteBlob(image,length,pixels); } #ifdef MAGICKCORE_ZLIB_DELEGATE if (compression == ZipCompression) { (void) deflateEnd(&stream); compressed_pixels=(unsigned char *) RelinquishMagickMemory( compressed_pixels); } #endif quantum_info=DestroyQuantumInfo(quantum_info); return(count); } static unsigned char *AcquireCompactPixels(const Image *image, ExceptionInfo *exception) { size_t packet_size; unsigned char *compact_pixels; packet_size=image->depth > 8UL ? 2UL : 1UL; compact_pixels=(unsigned char *) AcquireQuantumMemory((9* image->columns)+1,packet_size*sizeof(*compact_pixels)); if (compact_pixels == (unsigned char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); } return(compact_pixels); } static size_t WritePSDChannels(const PSDInfo *psd_info, const ImageInfo *image_info,Image *image,Image *next_image, MagickOffsetType size_offset,const MagickBooleanType separate, ExceptionInfo *exception) { CompressionType compression; Image *mask; MagickOffsetType rows_offset; size_t channels, count, length, offset_length; unsigned char *compact_pixels; count=0; offset_length=0; rows_offset=0; compact_pixels=(unsigned char *) NULL; compression=next_image->compression; if (image_info->compression != UndefinedCompression) compression=image_info->compression; if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(next_image,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } channels=1; if (separate == MagickFalse) { if (next_image->storage_class != PseudoClass) { if (IsImageGray(next_image) == MagickFalse) channels=next_image->colorspace == CMYKColorspace ? 4 : 3; if (next_image->alpha_trait != UndefinedPixelTrait) channels++; } rows_offset=TellBlob(image)+2; count+=WriteCompressionStart(psd_info,image,next_image,compression, channels); offset_length=(next_image->rows*(psd_info->version == 1 ? 2 : 4)); } size_offset+=2; if (next_image->storage_class == PseudoClass) { length=WritePSDChannel(psd_info,image_info,image,next_image, IndexQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (IsImageGray(next_image) != MagickFalse) { length=WritePSDChannel(psd_info,image_info,image,next_image, GrayQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } else { if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); length=WritePSDChannel(psd_info,image_info,image,next_image, RedQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, GreenQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; length=WritePSDChannel(psd_info,image_info,image,next_image, BlueQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; if (next_image->colorspace == CMYKColorspace) { length=WritePSDChannel(psd_info,image_info,image,next_image, BlackQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } if (next_image->alpha_trait != UndefinedPixelTrait) { length=WritePSDChannel(psd_info,image_info,image,next_image, AlphaQuantum,compact_pixels,rows_offset,separate,compression, exception); if (separate != MagickFalse) size_offset+=WritePSDSize(psd_info,image,length,size_offset)+2; else rows_offset+=offset_length; count+=length; } } compact_pixels=(unsigned char *) RelinquishMagickMemory(compact_pixels); if (next_image->colorspace == CMYKColorspace) (void) NegateCMYK(next_image,exception); if (separate != MagickFalse) { const char *property; property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property, exception); if (mask != (Image *) NULL) { if (compression == RLECompression) { compact_pixels=AcquireCompactPixels(mask,exception); if (compact_pixels == (unsigned char *) NULL) return(0); } length=WritePSDChannel(psd_info,image_info,image,mask, RedQuantum,compact_pixels,rows_offset,MagickTrue,compression, exception); (void) WritePSDSize(psd_info,image,length,size_offset); count+=length; compact_pixels=(unsigned char *) RelinquishMagickMemory( compact_pixels); } } } return(count); } static size_t WritePascalString(Image *image,const char *value,size_t padding) { size_t count, length; register ssize_t i; /* Max length is 255. */ count=0; length=(strlen(value) > 255UL ) ? 255UL : strlen(value); if (length == 0) count+=WriteBlobByte(image,0); else { count+=WriteBlobByte(image,(unsigned char) length); count+=WriteBlob(image,length,(const unsigned char *) value); } length++; if ((length % padding) == 0) return(count); for (i=0; i < (ssize_t) (padding-(length % padding)); i++) count+=WriteBlobByte(image,0); return(count); } static void WriteResolutionResourceBlock(Image *image) { double x_resolution, y_resolution; unsigned short units; if (image->units == PixelsPerCentimeterResolution) { x_resolution=2.54*65536.0*image->resolution.x+0.5; y_resolution=2.54*65536.0*image->resolution.y+0.5; units=2; } else { x_resolution=65536.0*image->resolution.x+0.5; y_resolution=65536.0*image->resolution.y+0.5; units=1; } (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x03ED); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,16); /* resource size */ (void) WriteBlobMSBLong(image,(unsigned int) (x_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* horizontal resolution unit */ (void) WriteBlobMSBShort(image,units); /* width unit */ (void) WriteBlobMSBLong(image,(unsigned int) (y_resolution+0.5)); (void) WriteBlobMSBShort(image,units); /* vertical resolution unit */ (void) WriteBlobMSBShort(image,units); /* height unit */ } static inline size_t WriteChannelSize(const PSDInfo *psd_info,Image *image, const signed short channel) { size_t count; count=(size_t) WriteBlobShort(image,channel); count+=SetPSDSize(psd_info,image,0); return(count); } static void RemoveICCProfileFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) break; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); if (id == 0x0000040f) { ssize_t quantum; quantum=PSDQuantum(count)+12; if ((quantum >= 12) && (quantum < (ssize_t) length)) { if ((q+quantum < (datum+length-16))) (void) memmove(q,q+quantum,length-quantum-(q-datum)); SetStringInfoLength(bim_profile,length-quantum); } break; } p+=count; if ((count & 0x01) != 0) p++; } } static void RemoveResolutionFromResourceBlock(StringInfo *bim_profile) { register const unsigned char *p; size_t length; unsigned char *datum; unsigned int count, long_sans; unsigned short id, short_sans; length=GetStringInfoLength(bim_profile); if (length < 16) return; datum=GetStringInfoDatum(bim_profile); for (p=datum; (p >= datum) && (p < (datum+length-16)); ) { register unsigned char *q; ssize_t cnt; q=(unsigned char *) p; if (LocaleNCompare((const char *) p,"8BIM",4) != 0) return; p=PushLongPixel(MSBEndian,p,&long_sans); p=PushShortPixel(MSBEndian,p,&id); p=PushShortPixel(MSBEndian,p,&short_sans); p=PushLongPixel(MSBEndian,p,&count); cnt=PSDQuantum(count); if (cnt < 0) return; if ((id == 0x000003ed) && (cnt < (ssize_t) (length-12)) && ((ssize_t) length-(cnt+12)-(q-datum)) > 0) { (void) memmove(q,q+cnt+12,length-(cnt+12)-(q-datum)); SetStringInfoLength(bim_profile,length-(cnt+12)); break; } p+=count; if ((count & 0x01) != 0) p++; } } static const StringInfo *GetAdditionalInformation(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { #define PSDKeySize 5 #define PSDAllowedLength 36 char key[PSDKeySize]; /* Whitelist of keys from: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/ */ const char allowed[PSDAllowedLength][PSDKeySize] = { "blnc", "blwh", "brit", "brst", "clbl", "clrL", "curv", "expA", "FMsk", "GdFl", "grdm", "hue ", "hue2", "infx", "knko", "lclr", "levl", "lnsr", "lfx2", "luni", "lrFX", "lspf", "lyid", "lyvr", "mixr", "nvrt", "phfl", "post", "PtFl", "selc", "shpa", "sn2P", "SoCo", "thrs", "tsly", "vibA" }, *option; const StringInfo *info; MagickBooleanType found; register size_t i; size_t remaining_length, length; StringInfo *profile; unsigned char *p; unsigned int size; info=GetImageProfile(image,"psd:additional-info"); if (info == (const StringInfo *) NULL) return((const StringInfo *) NULL); option=GetImageOption(image_info,"psd:additional-info"); if (LocaleCompare(option,"all") == 0) return(info); if (LocaleCompare(option,"selective") != 0) { profile=RemoveImageProfile(image,"psd:additional-info"); return(DestroyStringInfo(profile)); } length=GetStringInfoLength(info); p=GetStringInfoDatum(info); remaining_length=length; length=0; while (remaining_length >= 12) { /* skip over signature */ p+=4; key[0]=(*p++); key[1]=(*p++); key[2]=(*p++); key[3]=(*p++); key[4]='\0'; size=(unsigned int) (*p++) << 24; size|=(unsigned int) (*p++) << 16; size|=(unsigned int) (*p++) << 8; size|=(unsigned int) (*p++); size=size & 0xffffffff; remaining_length-=12; if ((size_t) size > remaining_length) return((const StringInfo *) NULL); found=MagickFalse; for (i=0; i < PSDAllowedLength; i++) { if (LocaleNCompare(key,allowed[i],PSDKeySize) != 0) continue; found=MagickTrue; break; } remaining_length-=(size_t) size; if (found == MagickFalse) { if (remaining_length > 0) p=(unsigned char *) memmove(p-12,p+size,remaining_length); continue; } length+=(size_t) size+12; p+=size; } profile=RemoveImageProfile(image,"psd:additional-info"); if (length == 0) return(DestroyStringInfo(profile)); SetStringInfoLength(profile,(const size_t) length); SetImageProfile(image,"psd:additional-info",info,exception); return(profile); } static MagickBooleanType WritePSDLayersInternal(Image *image, const ImageInfo *image_info,const PSDInfo *psd_info,size_t *layers_size, ExceptionInfo *exception) { char layer_name[MagickPathExtent]; const char *property; const StringInfo *info; Image *base_image, *next_image; MagickBooleanType status; MagickOffsetType *layer_size_offsets, size_offset; register ssize_t i; size_t layer_count, layer_index, length, name_length, rounded_size, size; status=MagickTrue; base_image=GetNextImageInList(image); if (base_image == (Image *) NULL) base_image=image; size=0; size_offset=TellBlob(image); SetPSDSize(psd_info,image,0); layer_count=0; for (next_image=base_image; next_image != NULL; ) { layer_count++; next_image=GetNextImageInList(next_image); } if (image->alpha_trait != UndefinedPixelTrait) size+=WriteBlobShort(image,-(unsigned short) layer_count); else size+=WriteBlobShort(image,(unsigned short) layer_count); layer_size_offsets=(MagickOffsetType *) AcquireQuantumMemory( (size_t) layer_count,sizeof(MagickOffsetType)); if (layer_size_offsets == (MagickOffsetType *) NULL) ThrowWriterException(ResourceLimitError,"MemoryAllocationFailed"); layer_index=0; for (next_image=base_image; next_image != NULL; ) { Image *mask; unsigned char default_color; unsigned short channels, total_channels; mask=(Image *) NULL; property=GetImageArtifact(next_image,"psd:opacity-mask"); default_color=0; if (property != (const char *) NULL) { mask=(Image *) GetImageRegistry(ImageRegistryType,property,exception); default_color=strlen(property) == 9 ? 255 : 0; } size+=WriteBlobSignedLong(image,(signed int) next_image->page.y); size+=WriteBlobSignedLong(image,(signed int) next_image->page.x); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.y+ next_image->rows)); size+=WriteBlobSignedLong(image,(signed int) (next_image->page.x+ next_image->columns)); channels=1U; if ((next_image->storage_class != PseudoClass) && (IsImageGray(next_image) == MagickFalse)) channels=next_image->colorspace == CMYKColorspace ? 4U : 3U; total_channels=channels; if (next_image->alpha_trait != UndefinedPixelTrait) total_channels++; if (mask != (Image *) NULL) total_channels++; size+=WriteBlobShort(image,total_channels); layer_size_offsets[layer_index++]=TellBlob(image); for (i=0; i < (ssize_t) channels; i++) size+=WriteChannelSize(psd_info,image,(signed short) i); if (next_image->alpha_trait != UndefinedPixelTrait) size+=WriteChannelSize(psd_info,image,-1); if (mask != (Image *) NULL) size+=WriteChannelSize(psd_info,image,-2); size+=WriteBlobString(image,image->endian == LSBEndian ? "MIB8" :"8BIM"); size+=WriteBlobString(image,CompositeOperatorToPSDBlendMode(next_image)); property=GetImageArtifact(next_image,"psd:layer.opacity"); if (property != (const char *) NULL) { Quantum opacity; opacity=(Quantum) StringToInteger(property); size+=WriteBlobByte(image,ScaleQuantumToChar(opacity)); (void) ApplyPSDLayerOpacity(next_image,opacity,MagickTrue,exception); } else size+=WriteBlobByte(image,255); size+=WriteBlobByte(image,0); size+=WriteBlobByte(image,next_image->compose==NoCompositeOp ? 1 << 0x02 : 1); /* layer properties - visible, etc. */ size+=WriteBlobByte(image,0); info=GetAdditionalInformation(image_info,next_image,exception); property=(const char *) GetImageProperty(next_image,"label",exception); if (property == (const char *) NULL) { (void) FormatLocaleString(layer_name,MagickPathExtent,"L%.20g", (double) layer_index); property=layer_name; } name_length=strlen(property)+1; if ((name_length % 4) != 0) name_length+=(4-(name_length % 4)); if (info != (const StringInfo *) NULL) name_length+=GetStringInfoLength(info); name_length+=8; if (mask != (Image *) NULL) name_length+=20; size+=WriteBlobLong(image,(unsigned int) name_length); if (mask == (Image *) NULL) size+=WriteBlobLong(image,0); else { if (mask->compose != NoCompositeOp) (void) ApplyPSDOpacityMask(next_image,mask,ScaleCharToQuantum( default_color),MagickTrue,exception); mask->page.y+=image->page.y; mask->page.x+=image->page.x; size+=WriteBlobLong(image,20); size+=WriteBlobSignedLong(image,mask->page.y); size+=WriteBlobSignedLong(image,mask->page.x); size+=WriteBlobSignedLong(image,(const signed int) mask->rows+ mask->page.y); size+=WriteBlobSignedLong(image,(const signed int) mask->columns+ mask->page.x); size+=WriteBlobByte(image,default_color); size+=WriteBlobByte(image,mask->compose == NoCompositeOp ? 2 : 0); size+=WriteBlobMSBShort(image,0); } size+=WriteBlobLong(image,0); size+=WritePascalString(image,property,4); if (info != (const StringInfo *) NULL) size+=WriteBlob(image,GetStringInfoLength(info), GetStringInfoDatum(info)); next_image=GetNextImageInList(next_image); } /* Now the image data! */ next_image=base_image; layer_index=0; while (next_image != NULL) { length=WritePSDChannels(psd_info,image_info,image,next_image, layer_size_offsets[layer_index++],MagickTrue,exception); if (length == 0) { status=MagickFalse; break; } size+=length; next_image=GetNextImageInList(next_image); } /* Write the total size */ if (layers_size != (size_t*) NULL) *layers_size=size; if ((size/2) != ((size+1)/2)) rounded_size=size+1; else rounded_size=size; (void) WritePSDSize(psd_info,image,rounded_size,size_offset); layer_size_offsets=(MagickOffsetType *) RelinquishMagickMemory( layer_size_offsets); /* Remove the opacity mask from the registry */ next_image=base_image; while (next_image != (Image *) NULL) { property=GetImageArtifact(next_image,"psd:opacity-mask"); if (property != (const char *) NULL) DeleteImageRegistry(property); next_image=GetNextImageInList(next_image); } return(status); } ModuleExport MagickBooleanType WritePSDLayers(Image * image, const ImageInfo *image_info,const PSDInfo *psd_info,ExceptionInfo *exception) { PolicyDomain domain; PolicyRights rights; domain=CoderPolicyDomain; rights=WritePolicyRights; if (IsRightsAuthorized(domain,rights,"PSD") == MagickFalse) return(MagickTrue); return WritePSDLayersInternal(image,image_info,psd_info,(size_t*) NULL, exception); } static MagickBooleanType WritePSDImage(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const StringInfo *icc_profile; MagickBooleanType status; PSDInfo psd_info; register ssize_t i; size_t length, num_channels, packet_size; StringInfo *bim_profile; /* Open image file. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); status=OpenBlob(image_info,image,WriteBinaryBlobMode,exception); if (status == MagickFalse) return(status); packet_size=(size_t) (image->depth > 8 ? 6 : 3); if (image->alpha_trait != UndefinedPixelTrait) packet_size+=image->depth > 8 ? 2 : 1; psd_info.version=1; if ((LocaleCompare(image_info->magick,"PSB") == 0) || (image->columns > 30000) || (image->rows > 30000)) psd_info.version=2; (void) WriteBlob(image,4,(const unsigned char *) "8BPS"); (void) WriteBlobMSBShort(image,psd_info.version); /* version */ for (i=1; i <= 6; i++) (void) WriteBlobByte(image, 0); /* 6 bytes of reserved */ /* When the image has a color profile it won't be converted to gray scale */ if ((GetImageProfile(image,"icc") == (StringInfo *) NULL) && (SetImageGray(image,exception) != MagickFalse)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else if ((image_info->type != TrueColorType) && (image_info->type != TrueColorAlphaType) && (image->storage_class == PseudoClass)) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 2UL : 1UL); else { if (image->storage_class == PseudoClass) (void) SetImageStorageClass(image,DirectClass,exception); if (image->colorspace != CMYKColorspace) num_channels=(image->alpha_trait != UndefinedPixelTrait ? 4UL : 3UL); else num_channels=(image->alpha_trait != UndefinedPixelTrait ? 5UL : 4UL); } (void) WriteBlobMSBShort(image,(unsigned short) num_channels); (void) WriteBlobMSBLong(image,(unsigned int) image->rows); (void) WriteBlobMSBLong(image,(unsigned int) image->columns); if (IsImageGray(image) != MagickFalse) { MagickBooleanType monochrome; /* Write depth & mode. */ monochrome=IsImageMonochrome(image) && (image->depth == 1) ? MagickTrue : MagickFalse; (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? 1 : image->depth > 8 ? 16 : 8)); (void) WriteBlobMSBShort(image,(unsigned short) (monochrome != MagickFalse ? BitmapMode : GrayscaleMode)); } else { (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? 8 : image->depth > 8 ? 16 : 8)); if (((image_info->colorspace != UndefinedColorspace) || (image->colorspace != CMYKColorspace)) && (image_info->colorspace != CMYKColorspace)) { (void) TransformImageColorspace(image,sRGBColorspace,exception); (void) WriteBlobMSBShort(image,(unsigned short) (image->storage_class == PseudoClass ? IndexedMode : RGBMode)); } else { if (image->colorspace != CMYKColorspace) (void) TransformImageColorspace(image,CMYKColorspace,exception); (void) WriteBlobMSBShort(image,CMYKMode); } } if ((IsImageGray(image) != MagickFalse) || (image->storage_class == DirectClass) || (image->colors > 256)) (void) WriteBlobMSBLong(image,0); else { /* Write PSD raster colormap. */ (void) WriteBlobMSBLong(image,768); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].red)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar( image->colormap[i].green)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); for (i=0; i < (ssize_t) image->colors; i++) (void) WriteBlobByte(image,ScaleQuantumToChar(image->colormap[i].blue)); for ( ; i < 256; i++) (void) WriteBlobByte(image,0); } /* Image resource block. */ length=28; /* 0x03EB */ bim_profile=(StringInfo *) GetImageProfile(image,"8bim"); icc_profile=GetImageProfile(image,"icc"); if (bim_profile != (StringInfo *) NULL) { bim_profile=CloneStringInfo(bim_profile); if (icc_profile != (StringInfo *) NULL) RemoveICCProfileFromResourceBlock(bim_profile); RemoveResolutionFromResourceBlock(bim_profile); length+=PSDQuantum(GetStringInfoLength(bim_profile)); } if (icc_profile != (const StringInfo *) NULL) length+=PSDQuantum(GetStringInfoLength(icc_profile))+12; (void) WriteBlobMSBLong(image,(unsigned int) length); WriteResolutionResourceBlock(image); if (bim_profile != (StringInfo *) NULL) { (void) WriteBlob(image,GetStringInfoLength(bim_profile), GetStringInfoDatum(bim_profile)); bim_profile=DestroyStringInfo(bim_profile); } if (icc_profile != (StringInfo *) NULL) { (void) WriteBlob(image,4,(const unsigned char *) "8BIM"); (void) WriteBlobMSBShort(image,0x0000040F); (void) WriteBlobMSBShort(image,0); (void) WriteBlobMSBLong(image,(unsigned int) GetStringInfoLength( icc_profile)); (void) WriteBlob(image,GetStringInfoLength(icc_profile), GetStringInfoDatum(icc_profile)); if ((MagickOffsetType) GetStringInfoLength(icc_profile) != PSDQuantum(GetStringInfoLength(icc_profile))) (void) WriteBlobByte(image,0); } if (status != MagickFalse) { MagickOffsetType size_offset; size_t size; size_offset=TellBlob(image); SetPSDSize(&psd_info,image,0); status=WritePSDLayersInternal(image,image_info,&psd_info,&size, exception); size_offset+=WritePSDSize(&psd_info,image,size+ (psd_info.version == 1 ? 8 : 12),size_offset); } (void) WriteBlobMSBLong(image,0); /* user mask data */ /* Write composite image. */ if (status != MagickFalse) { CompressionType compression; compression=image->compression; if (image->compression == ZipCompression) image->compression=RLECompression; if (image_info->compression != UndefinedCompression) image->compression=image_info->compression; if (WritePSDChannels(&psd_info,image_info,image,image,0,MagickFalse, exception) == 0) status=MagickFalse; image->compression=compression; } (void) CloseBlob(image); return(status); }
GB_subassign_06n.c
//------------------------------------------------------------------------------ // GB_subassign_06n: C(I,J)<M> = A ; no S //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Method 06n: C(I,J)<M> = A ; no S // M: present // Mask_comp: false // C_replace: false // accum: NULL // A: matrix // S: none (see also GB_subassign_06s) // FULL: if A and C are dense, then C remains dense. // If A is sparse and C dense, C will likely become sparse, except if M(i,j)=0 // wherever A(i,j) is not present. So if M==A is aliased and A is sparse, then // C remains dense. Need C(I,J)<A,struct>=A kernel. Then in that case, if C // is dense it remains dense, even if A is sparse. If that change is made, // this kernel can start with converting C to sparse if A is sparse. // C is not bitmap: GB_bitmap_assign is used if C is bitmap. // M and A are not bitmap: 06s is used instead, if M or A are bitmap. #include "GB_subassign_methods.h" GrB_Info GB_subassign_06n ( GrB_Matrix C, // input: const GrB_Index *I, const int64_t nI, const int Ikind, const int64_t Icolon [3], const GrB_Index *J, const int64_t nJ, const int Jkind, const int64_t Jcolon [3], const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (!GB_IS_BITMAP (C)) ; ASSERT (!GB_IS_FULL (C)) ; ASSERT (!GB_IS_BITMAP (M)) ; // Method 06n is not used for M bitmap ASSERT (!GB_IS_BITMAP (A)) ; // Method 06n is not used for A bitmap ASSERT (!GB_aliased (C, M)) ; // NO ALIAS of C==M ASSERT (!GB_aliased (C, A)) ; // NO ALIAS of C==A ASSERT_MATRIX_OK (C, "C input for 06n", GB0) ; ASSERT_MATRIX_OK (M, "M input for 06n", GB0) ; ASSERT_MATRIX_OK (A, "A input for 06n", GB0) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GB_EMPTY_TASKLIST ; GB_MATRIX_WAIT_IF_JUMBLED (C) ; GB_MATRIX_WAIT_IF_JUMBLED (M) ; GB_MATRIX_WAIT_IF_JUMBLED (A) ; GB_GET_C ; // C must not be bitmap int64_t zorig = C->nzombies ; const int64_t Cnvec = C->nvec ; const int64_t *GB_RESTRICT Ch = C->h ; const int64_t *GB_RESTRICT Cp = C->p ; const bool C_is_hyper = (Ch != NULL) ; GB_GET_MASK ; GB_GET_A ; const int64_t *GB_RESTRICT Ah = A->h ; const int64_t Anvec = A->nvec ; const bool A_is_hyper = (Ah != NULL) ; GrB_BinaryOp accum = NULL ; //-------------------------------------------------------------------------- // Method 06n: C(I,J)<M> = A ; no S //-------------------------------------------------------------------------- // Time: O(nnz(M)*(log(a)+log(c)), where a and c are the # of entries in a // vector of A and C, respectively. The entries in the intersection of M // (where the entries are true) and the matrix addition C(I,J)+A must be // examined. This method scans M, and searches for entries in A and C(I,J) // using two binary searches. If M is very dense, this method can be // slower than Method 06s. This method is selected if nnz (A) >= nnz (M). // Compare with Methods 05 and 07, which use a similar algorithmic outline // and parallelization strategy. //-------------------------------------------------------------------------- // Parallel: slice M into coarse/fine tasks (Method 05, 06n, 07) //-------------------------------------------------------------------------- GB_SUBASSIGN_ONE_SLICE (M) ; // M cannot be jumbled //-------------------------------------------------------------------------- // phase 1: create zombies, update entries, and count pending tuples //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:nzombies) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE1 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get j, the kth vector of M //------------------------------------------------------------------ int64_t j = GBH (Mh, k) ; GB_GET_VECTOR (pM, pM_end, pA, pA_end, Mp, k, Mvlen) ; int64_t mjnz = pM_end - pM ; if (mjnz == 0) continue ; //------------------------------------------------------------------ // get A(:,j) //------------------------------------------------------------------ int64_t pA, pA_end ; GB_VECTOR_LOOKUP (pA, pA_end, A, j) ; int64_t ajnz = pA_end - pA ; bool ajdense = (ajnz == Avlen) ; int64_t pA_start = pA ; //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; int64_t cjnz = pC_end - pC_start ; if (cjnz == 0 && ajnz == 0) continue ; bool cjdense = (cjnz == Cvlen) ; //------------------------------------------------------------------ // C(I,jC)<M(:,j)> = A(:,j) ; no S //------------------------------------------------------------------ if (cjdense && ajdense) { //-------------------------------------------------------------- // C(:,jC) and A(:,j) are both dense //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // update C(iC,jC), but only if M(iA,j) allows it //---------------------------------------------------------- if (GB_mcast (Mx, pM, msize)) { int64_t iA = GBI (Mi, pM, Mvlen) ; GB_iC_DENSE_LOOKUP ; // find iA in A(:,j) // A(:,j) is dense; no need for binary search pA = pA_start + iA ; ASSERT (GBI (Ai, pA, Avlen) == iA) ; // ----[C A 1] or [X A 1]----------------------- // [C A 1]: action: ( =A ): copy A to C, no acc // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_matrix ; } } } else if (cjdense) { //-------------------------------------------------------------- // C(:,jC) is dense, A(:,j) is sparse //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // update C(iC,jC), but only if M(iA,j) allows it //---------------------------------------------------------- if (GB_mcast (Mx, pM, msize)) { int64_t iA = GBI (Mi, pM, Mvlen) ; GB_iC_DENSE_LOOKUP ; // find iA in A(:,j) bool aij_found ; int64_t apright = pA_end - 1 ; GB_BINARY_SEARCH (iA, Ai, pA, apright, aij_found) ; if (!aij_found) { // C (iC,jC) is present but A (i,j) is not // ----[C . 1] or [X . 1]--------------------------- // [C . 1]: action: ( delete ): becomes zombie // [X . 1]: action: ( X ): still zombie GB_DELETE_ENTRY ; } else { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =A ): copy A to C, no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_matrix ; } } } } else if (ajdense) { //-------------------------------------------------------------- // C(:,jC) is sparse, A(:,j) is dense //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // update C(iC,jC), but only if M(iA,j) allows it //---------------------------------------------------------- if (GB_mcast (Mx, pM, msize)) { int64_t iA = GBI (Mi, pM, Mvlen) ; // find C(iC,jC) in C(:,jC) GB_iC_BINARY_SEARCH ; // lookup iA in A(:,j) pA = pA_start + iA ; ASSERT (GBI (Ai, pA, Avlen) == iA) ; if (cij_found) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =A ): copy A into C, no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_matrix ; } else { // C (iC,jC) is not present, A (i,j) is present // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } } } } else { //-------------------------------------------------------------- // C(:,jC) and A(:,j) are both sparse //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // update C(iC,jC), but only if M(iA,j) allows it //---------------------------------------------------------- if (GB_mcast (Mx, pM, msize)) { int64_t iA = GBI (Mi, pM, Mvlen) ; // find C(iC,jC) in C(:,jC) GB_iC_BINARY_SEARCH ; // find iA in A(:,j) bool aij_found ; int64_t apright = pA_end - 1 ; GB_BINARY_SEARCH (iA, Ai, pA, apright, aij_found) ; if (cij_found && aij_found) { // ----[C A 1] or [X A 1]--------------------------- // [C A 1]: action: ( =A ): copy A into C, no accum // [X A 1]: action: ( undelete ): zombie lives GB_noaccum_C_A_1_matrix ; } else if (!cij_found && aij_found) { // C (iC,jC) is not present, A (i,j) is present // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) task_pending++ ; } else if (cij_found && !aij_found) { // C (iC,jC) is present but A (i,j) is not // ----[C . 1] or [X . 1]--------------------------- // [C . 1]: action: ( delete ): becomes zombie // [X . 1]: action: ( X ): still zombie GB_DELETE_ENTRY ; } } } } } GB_PHASE1_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // phase 2: insert pending tuples //-------------------------------------------------------------------------- GB_PENDING_CUMSUM ; zorig = C->nzombies ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(&&:pending_sorted) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- GB_GET_TASK_DESCRIPTOR_PHASE2 ; //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // get j, the kth vector of M //------------------------------------------------------------------ int64_t j = GBH (Mh, k) ; GB_GET_VECTOR (pM, pM_end, pA, pA_end, Mp, k, Mvlen) ; int64_t mjnz = pM_end - pM ; if (mjnz == 0) continue ; //------------------------------------------------------------------ // get A(:,j) //------------------------------------------------------------------ int64_t pA, pA_end ; GB_VECTOR_LOOKUP (pA, pA_end, A, j) ; int64_t ajnz = pA_end - pA ; if (ajnz == 0) continue ; bool ajdense = (ajnz == Avlen) ; int64_t pA_start = pA ; //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ GB_GET_jC ; bool cjdense = ((pC_end - pC_start) == Cvlen) ; //------------------------------------------------------------------ // C(I,jC)<M(:,j)> = A(:,j) //------------------------------------------------------------------ if (!cjdense) { //-------------------------------------------------------------- // C(:,jC) is sparse; use binary search for C //-------------------------------------------------------------- for ( ; pM < pM_end ; pM++) { //---------------------------------------------------------- // update C(iC,jC), but only if M(iA,j) allows it //---------------------------------------------------------- if (GB_mcast (Mx, pM, msize)) { int64_t iA = GBI (Mi, pM, Mvlen) ; // find iA in A(:,j) if (ajdense) { // A(:,j) is dense; no need for binary search pA = pA_start + iA ; ASSERT (GBI (Ai, pA, Avlen) == iA) ; } else { // A(:,j) is sparse; use binary search int64_t apright = pA_end - 1 ; bool aij_found ; GB_BINARY_SEARCH (iA, Ai, pA, apright, aij_found) ; if (!aij_found) continue ; } // find C(iC,jC) in C(:,jC) GB_iC_BINARY_SEARCH ; if (!cij_found) { // C (iC,jC) is not present, A (i,j) is present // ----[. A 1]-------------------------------------- // [. A 1]: action: ( insert ) GB_PENDING_INSERT (Ax +(pA*asize)) ; } } } } } GB_PHASE2_TASK_WRAPUP ; } //-------------------------------------------------------------------------- // finalize the matrix and return result //-------------------------------------------------------------------------- GB_SUBASSIGN_WRAPUP ; }
GB_binop__gt_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__gt_bool // A.*B function (eWiseMult): GB_AemultB__gt_bool // A*D function (colscale): GB_AxD__gt_bool // D*A function (rowscale): GB_DxB__gt_bool // C+=B function (dense accum): GB_Cdense_accumB__gt_bool // C+=b function (dense accum): GB_Cdense_accumb__gt_bool // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__gt_bool // C=scalar+B GB_bind1st__gt_bool // C=scalar+B' GB_bind1st_tran__gt_bool // C=A+scalar GB_bind2nd__gt_bool // C=A'+scalar GB_bind2nd_tran__gt_bool // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = (aij > bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ bool bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x > y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GT || GxB_NO_BOOL || GxB_NO_GT_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__gt_bool ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__gt_bool ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__gt_bool ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__gt_bool ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__gt_bool ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__gt_bool ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__gt_bool ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__gt_bool ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool bij = Bx [p] ; Cx [p] = (x > bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__gt_bool ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; Cx [p] = (aij > y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (x > aij) ; \ } GrB_Info GB_bind1st_tran__gt_bool ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (aij > y) ; \ } GrB_Info GB_bind2nd_tran__gt_bool ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
opencl_pgpsda_fmt_plug.c
/* * Format for brute-forcing PGP SDAs (self-decrypting archives). * * This software is Copyright (c) 2017 Dhiru Kholia <dhiru at openwall.net> and * it is hereby released to the general public under the following terms: * * Redistribution and use in source and binary forms, with or without * modification, are permitted. */ #ifdef HAVE_OPENCL #if FMT_EXTERNS_H extern struct fmt_main fmt_opencl_pgpsda; #elif FMT_REGISTERS_H john_register_one(&fmt_opencl_pgpsda); #else #include <stdint.h> #include <string.h> #include <openssl/cast.h> #ifdef _OPENMP #include <omp.h> #endif #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #include "misc.h" #include "sha.h" #include "common-opencl.h" #include "options.h" #include "pgpsda_common.h" #define FORMAT_LABEL "pgpsda-opencl" #define ALGORITHM_NAME "SHA1 OpenCL" #define BINARY_SIZE 8 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN sizeof(uint32_t) #define PLAINTEXT_LENGTH 124 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 1 #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1001 typedef struct { uint32_t length; uint8_t v[PLAINTEXT_LENGTH]; } pgpsda_password; typedef struct { uint8_t v[16]; } pgpsda_hash; typedef struct { uint32_t iterations; uint8_t salt[8]; } pgpsda_salt; static uint32_t (*crypt_out)[BINARY_SIZE * 2 / sizeof(uint32_t)]; static struct custom_salt *cur_salt; static cl_int cl_error; static pgpsda_password *inbuffer; static pgpsda_hash *outbuffer; static pgpsda_salt currentsalt; static cl_mem mem_in, mem_out, mem_setting; static struct fmt_main *self; size_t insize, outsize, settingsize; // This file contains auto-tuning routine(s). Has to be included after formats definitions. #include "opencl-autotune.h" #include "memdbg.h" static const char *warn[] = { "xfer: ", ", crypt: ", ", xfer: " }; static size_t get_task_max_work_group_size() { return autotune_get_task_max_work_group_size(FALSE, 0, crypt_kernel); } static void create_clobj(size_t gws, struct fmt_main *self) { insize = sizeof(pgpsda_password) * gws; outsize = sizeof(pgpsda_hash) * gws; settingsize = sizeof(pgpsda_salt); crypt_out = mem_calloc(gws, sizeof(*crypt_out)); inbuffer = mem_calloc(1, insize); outbuffer = mem_alloc(outsize); // Allocate memory mem_in = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, insize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem in"); mem_setting = clCreateBuffer(context[gpu_id], CL_MEM_READ_ONLY, settingsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem setting"); mem_out = clCreateBuffer(context[gpu_id], CL_MEM_WRITE_ONLY, outsize, NULL, &cl_error); HANDLE_CLERROR(cl_error, "Error allocating mem out"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 0, sizeof(mem_in), &mem_in), "Error while setting mem_in kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 1, sizeof(mem_out), &mem_out), "Error while setting mem_out kernel argument"); HANDLE_CLERROR(clSetKernelArg(crypt_kernel, 2, sizeof(mem_setting), &mem_setting), "Error while setting mem_salt kernel argument"); } static void release_clobj(void) { if (inbuffer) { HANDLE_CLERROR(clReleaseMemObject(mem_in), "Release mem in"); HANDLE_CLERROR(clReleaseMemObject(mem_setting), "Release mem setting"); HANDLE_CLERROR(clReleaseMemObject(mem_out), "Release mem out"); MEM_FREE(inbuffer); MEM_FREE(outbuffer); } } static void init(struct fmt_main *_self) { self = _self; opencl_prepare_dev(gpu_id); } static void reset(struct db_main *db) { if (!autotuned) { char build_opts[64]; snprintf(build_opts, sizeof(build_opts), "-DPLAINTEXT_LENGTH=%d", PLAINTEXT_LENGTH); opencl_init("$JOHN/kernels/pgpsda_kernel.cl", gpu_id, build_opts); crypt_kernel = clCreateKernel(program[gpu_id], "pgpsda", &cl_error); HANDLE_CLERROR(cl_error, "Error creating kernel"); // Initialize openCL tuning (library) for this format. opencl_init_auto_setup(SEED, 0, NULL, warn, 1, self, create_clobj, release_clobj, sizeof(pgpsda_password), 0, db); // Auto tune execution from shared/included code. autotune_run(self, 1, 0, 300); } } static void done(void) { if (autotuned) { release_clobj(); HANDLE_CLERROR(clReleaseKernel(crypt_kernel), "Release kernel"); HANDLE_CLERROR(clReleaseProgram(program[gpu_id]), "Release Program"); autotuned--; } } static void *get_binary(char *ciphertext) { static union { unsigned char c[BINARY_SIZE]; uint32_t dummy; } buf; unsigned char *out = buf.c; char *p; int i; p = strrchr(ciphertext, '*') + 1; for (i = 0; i < BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } return out; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; currentsalt.iterations = cur_salt->iterations; memcpy((char*)currentsalt.salt, cur_salt->salt, 8); HANDLE_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_setting, CL_FALSE, 0, settingsize, &currentsalt, 0, NULL, NULL), "Copy setting to gpu"); } #undef set_key static void set_key(char *key, int index) { uint32_t length = strlen(key); if (length > PLAINTEXT_LENGTH) length = PLAINTEXT_LENGTH; inbuffer[index].length = length; memcpy(inbuffer[index].v, key, length); } static char *get_key(int index) { static char ret[PLAINTEXT_LENGTH + 1]; uint32_t length = inbuffer[index].length; memcpy(ret, inbuffer[index].v, length); ret[length] = '\0'; return ret; } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int index = 0; size_t *lws = local_work_size ? &local_work_size : NULL; global_work_size = GET_MULTIPLE_OR_BIGGER(count, local_work_size); // Copy data to gpu BENCH_CLERROR(clEnqueueWriteBuffer(queue[gpu_id], mem_in, CL_FALSE, 0, insize, inbuffer, 0, NULL, multi_profilingEvent[0]), "Copy data to gpu"); // Run kernel BENCH_CLERROR(clEnqueueNDRangeKernel(queue[gpu_id], crypt_kernel, 1, NULL, &global_work_size, lws, 0, NULL, multi_profilingEvent[1]), "Run kernel"); // Read the result back BENCH_CLERROR(clEnqueueReadBuffer(queue[gpu_id], mem_out, CL_TRUE, 0, outsize, outbuffer, 0, NULL, multi_profilingEvent[2]), "Copy result back"); if (ocl_autotune_running) return count; #ifdef _OPENMP #pragma omp parallel for #endif for (index = 0; index < count; index++) { unsigned char *key; CAST_KEY ck; key = outbuffer[index].v; CAST_set_key(&ck, 16, key); memset((unsigned char*)crypt_out[index], 0, BINARY_SIZE); CAST_ecb_encrypt(key, (unsigned char*)crypt_out[index], &ck, CAST_ENCRYPT); } return count; } static int cmp_all(void *binary, int count) { int index = 0; for (; index < count; index++) if (!memcmp(binary, crypt_out[index], ARCH_SIZE)) return 1; return 0; } static int cmp_one(void *binary, int index) { return !memcmp(binary, crypt_out[index], BINARY_SIZE); } static int cmp_exact(char *source, int index) { return 1; } struct fmt_main fmt_opencl_pgpsda = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_OMP, { "iteration count", }, { FORMAT_TAG }, pgpsda_tests, }, { init, done, reset, fmt_default_prepare, pgpsda_common_valid, fmt_default_split, get_binary, pgpsda_common_get_salt, { pgpsda_iteration_count, }, fmt_default_source, { fmt_default_binary_hash }, fmt_default_salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { fmt_default_get_hash }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */ #endif /* HAVE_OPENCL */
jacobi-task-dep.c
# include "poisson.h" /* #pragma omp task depend version of SWEEP. */ void sweep_task_dep (int nx, int ny, double dx, double dy, double *f_, int itold, int itnew, double *u_, double *unew_, int block_size) { int i; int it; int j; double (*f)[nx][ny] = (double (*)[nx][ny])f_; double (*u)[nx][ny] = (double (*)[nx][ny])u_; double (*unew)[nx][ny] = (double (*)[nx][ny])unew_; #pragma omp parallel shared (u, unew, f) private (i, j, it) firstprivate(nx, ny, dx, dy, itold, itnew) #pragma omp single { for (it = itold + 1; it <= itnew; it++) { // Save the current estimate. for (i = 0; i < nx; i++) { #pragma omp task shared(u, unew) firstprivate(i) private(j) depend(in: unew[i]) depend(out: u[i]) for (j = 0; j < ny; j++) { (*u)[i][j] = (*unew)[i][j]; } } // Compute a new estimate. for (i = 0; i < nx; i++) { #pragma omp task shared(u, unew, f) firstprivate(i, nx, ny, dx, dy) private(j) depend(in: f[i], u[i-1], u[i], u[i+1]) depend(out: unew[i]) for (j = 0; j < ny; j++) { if (i == 0 || j == 0 || i == nx - 1 || j == ny - 1) { (*unew)[i][j] = (*f)[i][j]; } else { (*unew)[i][j] = 0.25 * ((*u)[i-1][j] + (*u)[i][j+1] + (*u)[i][j-1] + (*u)[i+1][j] + (*f)[i][j] * dx * dy); } } } } } }
DRB104-nowait-barrier-orig-no.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: [email protected], [email protected], [email protected], [email protected], [email protected]) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This example is based on one code snippet extracted from a paper: Ma etc. Symbolic Analysis of Concurrency Errors in OpenMP Programs, ICPP 2013 Explicit barrier to counteract nowait */ #include <stdio.h> #include <assert.h> int main() { int i, error; int len = 1000; int a[len], b = 5; int _ret_val_0; #pragma cetus private(i) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<len; i ++ ) { a[i]=i; } #pragma cetus private(i) #pragma loop name main#1 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<len; i ++ ) { a[i]=(b+(a[i]*5)); } error=(a[9]+1); (((void)sizeof ((error==51) ? 1 : 0)), ({ if (error==51) { ; } else { __assert_fail("error == 51", "DRB104-nowait-barrier-orig-no.c", 69, __PRETTY_FUNCTION__); } })); printf("error = %d\n", error); _ret_val_0=0; return _ret_val_0; }
GB_binop__land_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_uint32) // A.*B function (eWiseMult): GB (_AemultB_08__land_uint32) // A.*B function (eWiseMult): GB (_AemultB_02__land_uint32) // A.*B function (eWiseMult): GB (_AemultB_04__land_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_uint32) // A*D function (colscale): GB (_AxD__land_uint32) // D*A function (rowscale): GB (_DxB__land_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__land_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__land_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_uint32) // C=scalar+B GB (_bind1st__land_uint32) // C=scalar+B' GB (_bind1st_tran__land_uint32) // C=A+scalar GB (_bind2nd__land_uint32) // C=A'+scalar GB (_bind2nd_tran__land_uint32) // C type: uint32_t // A type: uint32_t // A pattern? 0 // B type: uint32_t // B pattern? 0 // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint32_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint32_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_UINT32 || GxB_NO_LAND_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__land_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_uint32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint32_t alpha_scalar ; uint32_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint32_t *) alpha_scalar_in)) ; beta_scalar = (*((uint32_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__land_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__land_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = GBX (Bx, p, false) ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = GBX (Ax, p, false) ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
1747.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <[email protected]> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for schedule(static, 2) simd num_threads(2) for (i = 1; i < _PB_NI - 1; ++i) { #pragma omp parallel for schedule(static, 2) simd num_threads(2) for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
helloWorld.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> void hello(void); int main(int argc, char* argv[]){ int thread_count = strtol(argv[1], NULL, 10); #pragma omp parallel num_threads(thread_count) hello(); return 0; } void hello(void){ int my_rank = omp_get_thread_num(); int thread_count = omp_get_num_threads(); printf("Hello from thread %d of %d\n", my_rank, thread_count); }
SupportReportC.c
/// PMlib report controll routine for C program /// - [1] stop the Root section /// - [2] merge thread serial/parallel sections /// - [3] select the type of the report and start producing the report /// /// @param[in] char* fc output file name /// /// @note if fc is NULL ("") , stdout is chosen. /// #include <stdio.h> #include <unistd.h> #include <stdbool.h> // #include "pmlib_api_C.h" void C_pm_report (char *filename); extern void C_pm_serial_parallel (int id, int *mid, int *inside); extern void C_pm_stop_Root (void); extern void C_pm_sections (int *nSections); extern void C_pm_mergethreads (int id); extern void C_pm_select_report (char *filename); void C_pm_report (char *filename) { int id, mid, inside; int nSections; // stop the Root section before report id = 0; C_pm_serial_parallel (id, &mid, &inside); if (inside==0) { C_pm_stop_Root (); ; } else if (inside==1) { #pragma omp parallel C_pm_stop_Root (); ; } else { ; } // count the number of SHARED sections C_pm_sections (&nSections); // merge thread data into the master thread for (id=0; id<nSections; id++) { C_pm_serial_parallel (id, &mid, &inside); if (inside==0) { // The section is defined outside of parallel context C_pm_mergethreads (id); } else if (inside==1) { // The section is defined inside parallel context // If an OpenMP parallel region is started by a C routine, // the merge operation must be triggered by a C routine, // which is outside of PMlib C++ class parallel context // The followng OpenMP parallel block profives such merging support. #pragma omp parallel C_pm_mergethreads (id); ; } else { ; } } // end of for loop // now start reporting the PMlib stats C_pm_select_report (filename); return; }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/ASTConcept.h" #include "clang/AST/ASTFwd.h" #include "clang/AST/Attr.h" #include "clang/AST/Availability.h" #include "clang/AST/ComparisonCategories.h" #include "clang/AST/DeclTemplate.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprCXX.h" #include "clang/AST/ExprConcepts.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExprOpenMP.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/LocInfoType.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/StmtCXX.h" #include "clang/AST/TypeLoc.h" #include "clang/APINotes/APINotesManager.h" #include "clang/AST/TypeOrdering.h" #include "clang/Basic/BitmaskEnum.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenCLOptions.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/PragmaKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/CleanupInfo.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/SemaConcept.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallBitVector.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include "llvm/Frontend/OpenMP/OMPConstants.h" #include <deque> #include <functional> #include <memory> #include <string> #include <tuple> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; struct InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class ParsedAttr; class BindingDecl; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDeleteExpr; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class CoroutineBodyStmt; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPRequiresDecl; class OMPDeclareReductionDecl; class OMPDeclareSimdDecl; class OMPClause; struct OMPVarListLocTy; struct OverloadCandidate; enum class OverloadCandidateParamOrder : char; enum OverloadCandidateRewriteKind : unsigned; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateInstantiationCallback; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class Capture; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class SemaPPCallbacks; class TemplateDeductionInfo; } namespace threadSafety { class BeforeSet; void threadSafetyCleanup(BeforeSet* Cache); } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Describes whether we've seen any nullability information for the given /// file. struct FileNullability { /// The first pointer declarator (of any pointer kind) in the file that does /// not have a corresponding nullability annotation. SourceLocation PointerLoc; /// The end location for the first pointer declarator in the file. Used for /// placing fix-its. SourceLocation PointerEndLoc; /// Which kind of pointer declarator we saw. uint8_t PointerKind; /// Whether we saw any type nullability annotations in the given file. bool SawTypeNullability = false; }; /// A mapping from file IDs to a record of whether we've seen nullability /// information in that file. class FileNullabilityMap { /// A mapping from file IDs to the nullability information for each file ID. llvm::DenseMap<FileID, FileNullability> Map; /// A single-element cache based on the file ID. struct { FileID File; FileNullability Nullability; } Cache; public: FileNullability &operator[](FileID file) { // Check the single-element cache. if (file == Cache.File) return Cache.Nullability; // It's not in the single-element cache; flush the cache if we have one. if (!Cache.File.isInvalid()) { Map[Cache.File] = Cache.Nullability; } // Pull this entry into the cache. Cache.File = file; Cache.Nullability = Map[file]; return Cache.Nullability; } }; /// Keeps track of expected type during expression parsing. The type is tied to /// a particular token, all functions that update or consume the type take a /// start location of the token they are looking at as a parameter. This allows /// to avoid updating the type on hot paths in the parser. class PreferredTypeBuilder { public: PreferredTypeBuilder() = default; explicit PreferredTypeBuilder(QualType Type) : Type(Type) {} void enterCondition(Sema &S, SourceLocation Tok); void enterReturn(Sema &S, SourceLocation Tok); void enterVariableInit(SourceLocation Tok, Decl *D); /// Computing a type for the function argument may require running /// overloading, so we postpone its computation until it is actually needed. /// /// Clients should be very careful when using this funciton, as it stores a /// function_ref, clients should make sure all calls to get() with the same /// location happen while function_ref is alive. void enterFunctionArgument(SourceLocation Tok, llvm::function_ref<QualType()> ComputeType); void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc); void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind, SourceLocation OpLoc); void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op); void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base); void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS); /// Handles all type casts, including C-style cast, C++ casts, etc. void enterTypeCast(SourceLocation Tok, QualType CastType); QualType get(SourceLocation Tok) const { if (Tok != ExpectedLoc) return QualType(); if (!Type.isNull()) return Type; if (ComputeType) return ComputeType(); return QualType(); } private: /// Start position of a token for which we store expected type. SourceLocation ExpectedLoc; /// Expected type for a token starting at ExpectedLoc. QualType Type; /// A function to compute expected type at ExpectedLoc. It is only considered /// if Type is null. llvm::function_ref<QualType()> ComputeType; }; /// Sema - This implements semantic analysis and AST building for C. class Sema final { Sema(const Sema &) = delete; void operator=(const Sema &) = delete; /// A key method to reduce duplicate debug info from Sema. virtual void anchor(); ///Source of additional semantic information. ExternalSemaSource *ExternalSource; ///Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); bool isVisibleSlow(const NamedDecl *D); /// Determine whether two declarations should be linked together, given that /// the old declaration might not be visible and the new declaration might /// not have external linkage. bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { if (isVisible(Old)) return true; // See comment in below overload for why it's safe to compute the linkage // of the new declaration here. if (New->isExternallyDeclarable()) { assert(Old->isExternallyDeclarable() && "should not have found a non-externally-declarable previous decl"); return true; } return false; } bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New); void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem, QualType ResultTy, ArrayRef<QualType> Args); public: /// The maximum alignment, same as in llvm::Value. We duplicate them here /// because that allows us not to duplicate the constants in clang code, /// which we must to since we can't directly use the llvm constants. /// The value is verified against llvm here: lib/CodeGen/CGDecl.cpp /// /// This is the greatest alignment value supported by load, store, and alloca /// instructions, and global values. static const unsigned MaxAlignmentExponent = 29; static const unsigned MaximumAlignment = 1u << MaxAlignmentExponent; typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions CurFPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; api_notes::APINotesManager APINotes; /// Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; bool MSStructPragmaOn; // True when \#pragma ms_struct on /// Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; /// Stack of active SEH __finally scopes. Can be empty. SmallVector<Scope*, 2> CurrentSEHFinally; /// Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; /// Holds TypoExprs that are created from `createDelayedTypo`. This is used by /// `TransformTypos` in order to keep track of any TypoExprs that are created /// recursively during typo correction and wipe them away if the correction /// fails. llvm::SmallVector<TypoExpr *, 2> TypoExprs; /// pragma clang section kind enum PragmaClangSectionKind { PCSK_Invalid = 0, PCSK_BSS = 1, PCSK_Data = 2, PCSK_Rodata = 3, PCSK_Text = 4, PCSK_Relro = 5 }; enum PragmaClangSectionAction { PCSA_Set = 0, PCSA_Clear = 1 }; struct PragmaClangSection { std::string SectionName; bool Valid = false; SourceLocation PragmaLocation; void Act(SourceLocation PragmaLocation, PragmaClangSectionAction Action, StringLiteral* Name); }; PragmaClangSection PragmaClangBSSSection; PragmaClangSection PragmaClangDataSection; PragmaClangSection PragmaClangRodataSection; PragmaClangSection PragmaClangRelroSection; PragmaClangSection PragmaClangTextSection; enum PragmaMsStackAction { PSK_Reset = 0x0, // #pragma () PSK_Set = 0x1, // #pragma (value) PSK_Push = 0x2, // #pragma (push[, id]) PSK_Pop = 0x4, // #pragma (pop[, id]) PSK_Show = 0x8, // #pragma (show) -- only for "pack"! PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value) PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value) }; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; SourceLocation PragmaPushLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation, SourceLocation PragmaPushLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation), PragmaPushLocation(PragmaPushLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value) { if (Action == PSK_Reset) { CurrentValue = DefaultValue; CurrentPragmaLocation = PragmaLocation; return; } if (Action & PSK_Push) Stack.emplace_back(StackSlotLabel, CurrentValue, CurrentPragmaLocation, PragmaLocation); else if (Action & PSK_Pop) { if (!StackSlotLabel.empty()) { // If we've got a label, try to find it and jump there. auto I = llvm::find_if(llvm::reverse(Stack), [&](const Slot &x) { return x.StackSlotLabel == StackSlotLabel; }); // If we found the label so pop from there. if (I != Stack.rend()) { CurrentValue = I->Value; CurrentPragmaLocation = I->PragmaLocation; Stack.erase(std::prev(I.base()), Stack.end()); } } else if (!Stack.empty()) { // We do not have a label, just pop the last entry. CurrentValue = Stack.back().Value; CurrentPragmaLocation = Stack.back().PragmaLocation; Stack.pop_back(); } } if (Action & PSK_Set) { CurrentValue = Value; CurrentPragmaLocation = PragmaLocation; } } // MSVC seems to add artificial slots to #pragma stacks on entering a C++ // method body to restore the stacks on exit, so it works like this: // // struct S { // #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>) // void Method {} // #pragma <name>(pop, InternalPragmaSlot) // }; // // It works even with #pragma vtordisp, although MSVC doesn't support // #pragma vtordisp(push [, id], n) // syntax. // // Push / pop a named sentinel slot. void SentinelAction(PragmaMsStackAction Action, StringRef Label) { assert((Action == PSK_Push || Action == PSK_Pop) && "Can only push / pop #pragma stack sentinels!"); Act(CurrentPragmaLocation, Action, Label, CurrentValue); } // Constructors. explicit PragmaStack(const ValueType &Default) : DefaultValue(Default), CurrentValue(Default) {} bool hasValue() const { return CurrentValue != DefaultValue; } SmallVector<Slot, 2> Stack; ValueType DefaultValue; // Value used for PSK_Reset action. ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). /// Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects PragmaStack<MSVtorDispMode> VtorDispStack; // #pragma pack. // Sentinel to represent when the stack is set to mac68k alignment. static const unsigned kMac68kAlignmentSentinel = ~0U; PragmaStack<unsigned> PackStack; // The current #pragma pack values and locations at each #include. struct PackIncludeState { unsigned CurrentValue; SourceLocation CurrentPragmaLocation; bool HasNonDefaultValue, ShouldWarnOnInclude; }; SmallVector<PackIncludeState, 8> PackIncludeStack; // Segment #pragmas. PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; // This stack tracks the current state of Sema.CurFPFeatures. PragmaStack<FPOptionsOverride> FpPragmaStack; FPOptionsOverride CurFPFeatureOverrides() { FPOptionsOverride result; if (!FpPragmaStack.hasValue()) { result = FPOptionsOverride(); } else { result = FpPragmaStack.CurrentValue; } return result; } // RAII object to push / pop sentinel slots for all MS #pragma stacks. // Actions should be performed only if we enter / exit a C++ method body. class PragmaStackSentinelRAII { public: PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct); ~PragmaStackSentinelRAII(); private: Sema &S; StringRef SlotLabel; bool ShouldAct; }; /// A mapping that describes the nullability we've seen in each header file. FileNullabilityMap NullabilityMap; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// This an attribute introduced by \#pragma clang attribute. struct PragmaAttributeEntry { SourceLocation Loc; ParsedAttr *Attribute; SmallVector<attr::SubjectMatchRule, 4> MatchRules; bool IsUsed; }; /// A push'd group of PragmaAttributeEntries. struct PragmaAttributeGroup { /// The location of the push attribute. SourceLocation Loc; /// The namespace of this push group. const IdentifierInfo *Namespace; SmallVector<PragmaAttributeEntry, 2> Entries; }; SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack; /// The declaration that is currently receiving an attribute from the /// #pragma attribute stack. const Decl *PragmaAttributeCurrentTargetDecl; /// This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// Used to control the generation of ExprWithCleanups. CleanupInfo Cleanup; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. SmallVector<ExprWithCleanups::CleanupObject, 8> ExprCleanupObjects; /// Store a set of either DeclRefExprs or MemberExprs that contain a reference /// to a variable (constant) that may or may not be odr-used in this Expr, and /// we won't know until all lvalue-to-rvalue and discarded value conversions /// have been applied to all subexpressions of the enclosing full expression. /// This is cleared at the end of each full expression. using MaybeODRUseExprSet = llvm::SetVector<Expr *, SmallVector<Expr *, 4>, llvm::SmallPtrSet<Expr *, 4>>; MaybeODRUseExprSet MaybeODRUseExprs; std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope; /// Stack containing information about each of the nested /// function, block, and method scopes that are currently active. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; /// The index of the first FunctionScope that corresponds to the current /// context. unsigned FunctionScopesStart = 0; ArrayRef<sema::FunctionScopeInfo*> getFunctionScopes() const { return llvm::makeArrayRef(FunctionScopes.begin() + FunctionScopesStart, FunctionScopes.end()); } /// Stack containing information needed when in C++2a an 'auto' is encountered /// in a function declaration parameter type specifier in order to invent a /// corresponding template parameter in the enclosing abbreviated function /// template. This information is also present in LambdaScopeInfo, stored in /// the FunctionScopes stack. SmallVector<InventedTemplateParameterInfo, 4> InventedParameterInfos; /// The index of the first InventedParameterInfo that refers to the current /// context. unsigned InventedParameterInfosStart = 0; ArrayRef<InventedTemplateParameterInfo> getInventedParameterInfos() const { return llvm::makeArrayRef(InventedParameterInfos.begin() + InventedParameterInfosStart, InventedParameterInfos.end()); } typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType; /// Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; /// Delete-expressions to be analyzed at the end of translation unit /// /// This list contains class members, and locations of delete-expressions /// that could not be proven as to whether they mismatch with new-expression /// used in initializer of the field. typedef std::pair<SourceLocation, bool> DeleteExprLoc; typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs; llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; /// All the external declarations encoutered and used in the TU. SmallVector<VarDecl *, 4> ExternalDeclarations; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedOverridingExceptionSpecChecks; /// All the function redeclarations seen during a class definition that had /// their exception spec checks delayed, plus the prior declaration they /// should be checked against. Except during error recovery, the new decl /// should always be a friend declaration, as that's the only valid way to /// redeclare a special member before its class is complete. SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2> DelayedEquivalentExceptionSpecChecks; typedef llvm::MapVector<const FunctionDecl *, std::unique_ptr<LateParsedTemplate>> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } /// \brief Callback to the parser to parse a type expressed as a string. std::function<TypeResult(StringRef, StringRef, SourceLocation)> ParseTypeFromStringCallback; class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; unsigned SavedFunctionScopesStart; unsigned SavedInventedParameterInfosStart; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride), SavedFunctionScopesStart(S.FunctionScopesStart), SavedInventedParameterInfosStart(S.InventedParameterInfosStart) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); // Any saved FunctionScopes do not refer to this context. S.FunctionScopesStart = S.FunctionScopes.size(); S.InventedParameterInfosStart = S.InventedParameterInfos.size(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; S.FunctionScopesStart = SavedFunctionScopesStart; S.InventedParameterInfosStart = SavedInventedParameterInfosStart; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// Whether the AST is currently being rebuilt to correct immediate /// invocations. Immediate invocation candidates and references to consteval /// functions aren't tracked when this is set. bool RebuildingImmediateInvocation = false; /// Used to change context to isConstantEvaluated without pushing a heavy /// ExpressionEvaluationContextRecord object. bool isConstantEvaluatedOverride; bool isConstantEvaluated() { return ExprEvalContexts.back().isConstantEvaluated() || isConstantEvaluatedOverride; } /// RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; bool PushedCodeSynthesisContext = false; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::PotentiallyEvaluated); if (auto *FD = dyn_cast<FunctionDecl>(DC)) FD->setWillHaveBody(true); else assert(isa<ObjCMethodDecl>(DC)); } void addContextNote(SourceLocation UseLoc) { assert(!PushedCodeSynthesisContext); Sema::CodeSynthesisContext Ctx; Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction; Ctx.PointOfInstantiation = UseLoc; Ctx.Entity = cast<Decl>(S.CurContext); S.pushCodeSynthesisContext(Ctx); PushedCodeSynthesisContext = true; } ~SynthesizedFunctionScope() { if (PushedCodeSynthesisContext) S.popCodeSynthesisContext(); if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext)) FD->setWillHaveBody(false); S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// The C++ "std::align_val_t" enum class, which is defined by the C++ /// standard library. LazyDeclPtr StdAlignValT; /// The C++ "std::experimental" namespace, where the experimental parts /// of the standard library resides. NamespaceDecl *StdExperimentalNamespaceCache; /// The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// The C++ "std::coroutine_traits" template, which is defined in /// \<coroutine_traits> ClassTemplateDecl *StdCoroutineTraitsCache; /// The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// The declaration of the Objective-C NSValue class. ObjCInterfaceDecl *NSValueDecl; /// Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// Pointer to NSValue type (NSValue *). QualType NSValuePointer; /// The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// Pointer to NSString type (NSString *). QualType NSStringPointer; /// The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// The declaration of the valueWithBytes:objCType: method. ObjCMethodDecl *ValueWithBytesObjCTypeMethod; /// The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// id<NSCopying> type. QualType QIDNSCopying; /// will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum class ExpressionEvaluationContext { /// The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// The current expression occurs within a braced-init-list within /// an unevaluated operand. This is mostly like a regular unevaluated /// context, except that we still instantiate constexpr functions that are /// referenced here so that we can perform narrowing checks correctly. UnevaluatedList, /// The current expression occurs within a discarded statement. /// This behaves largely similarly to an unevaluated operand in preventing /// definitions from being required, but not in other ways. DiscardedStatement, /// The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; using ImmediateInvocationCandidate = llvm::PointerIntPair<ConstantExpr *, 1>; /// Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// The expression evaluation context. ExpressionEvaluationContext Context; /// Whether the enclosing context needed a cleanup. CleanupInfo ParentCleanup; /// Whether we are in a decltype expression. bool IsDecltype; /// The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; MaybeODRUseExprSet SavedMaybeODRUseExprs; /// The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs; /// Expressions appearing as the LHS of a volatile assignment in this /// context. We produce a warning for these when popping the context if /// they are not discarded-value expressions nor unevaluated operands. SmallVector<Expr*, 2> VolatileAssignmentLHSs; /// Set of candidates for starting an immediate invocation. llvm::SmallVector<ImmediateInvocationCandidate, 4> ImmediateInvocationCandidates; /// Set of DeclRefExprs referencing a consteval function when used in a /// context not already known to be immediately invoked. llvm::SmallPtrSet<DeclRefExpr *, 4> ReferenceToConsteval; /// \brief Describes whether we are in an expression constext which we have /// to handle differently. enum ExpressionKind { EK_Decltype, EK_TemplateArgument, EK_Other } ExprContext; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, CleanupInfo ParentCleanup, Decl *ManglingContextDecl, ExpressionKind ExprContext) : Context(Context), ParentCleanup(ParentCleanup), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), ExprContext(ExprContext) {} bool isUnevaluated() const { return Context == ExpressionEvaluationContext::Unevaluated || Context == ExpressionEvaluationContext::UnevaluatedAbstract || Context == ExpressionEvaluationContext::UnevaluatedList; } bool isConstantEvaluated() const { return Context == ExpressionEvaluationContext::ConstantEvaluated; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// Emit a warning for all pending noderef expressions that we recorded. void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec); /// Compute the mangling number context for a lambda expression or /// block literal. Also return the extra mangling decl if any. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. std::tuple<MangleNumberingContext *, Decl *> getCurrentMangleNumberContext(const DeclContext *DC); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult() : Pair() {} SpecialMemberOverloadResult(CXXMethodDecl *MD) : Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; class SpecialMemberOverloadResultEntry : public llvm::FastFoldingSetNode, public SpecialMemberOverloadResult { public: SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} }; /// A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache; /// A cache of the flags available in enumerations with the flag_bits /// attribute. mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache; /// The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed; /// Determine if VD, which must be a variable or function, is an external /// symbol that nonetheless can't be referenced from outside this translation /// unit because its type has no linkage and it's not extern "C". bool isExternalWithNoLinkageType(ValueDecl *VD); /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); /// Retrieves list of suspicious delete-expressions that will be checked at /// the end of translation unit. const llvm::MapVector<FieldDecl *, DeleteLocs> & getMismatchingDeleteExpressions() const; typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::MapVector<Selector, SourceLocation> ReferencedSelectors; /// List of SourceLocations where 'self' is implicitly retained inside a /// block. llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1> ImplicitlyRetainedSelfLocs; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; /// Kinds of defaulted comparison operator functions. enum class DefaultedComparisonKind : unsigned char { /// This is not a defaultable comparison operator. None, /// This is an operator== that should be implemented as a series of /// subobject comparisons. Equal, /// This is an operator<=> that should be implemented as a series of /// subobject comparisons. ThreeWay, /// This is an operator!= that should be implemented as a rewrite in terms /// of a == comparison. NotEqual, /// This is an <, <=, >, or >= that should be implemented as a rewrite in /// terms of a <=> comparison. Relational, }; /// The function definitions which were renamed as part of typo-correction /// to match their respective declarations. We want to keep track of them /// to ensure that we don't emit a "redefinition" error if we encounter a /// correctly named definition after the renamed definition. llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions; /// Stack of types that correspond to the parameter entities that are /// currently being copy-initialized. Can be empty. llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes; void ReadMethodPool(Selector Sel); void updateOutOfDateSelector(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the CurFPFeatures state on entry/exit of compound /// statements. class FPFeaturesStateRAII { public: FPFeaturesStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.CurFPFeatures) { OldOverrides = S.FpPragmaStack.CurrentValue; } ~FPFeaturesStateRAII() { S.CurFPFeatures = OldFPFeaturesState; S.FpPragmaStack.CurrentValue = OldOverrides; } FPOptionsOverride getOverrides() { return OldOverrides; } private: Sema& S; FPOptions OldFPFeaturesState; FPOptionsOverride OldOverrides; }; void addImplicitTypedef(StringRef Name, QualType T); bool WarnedStackExhausted = false; public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getCurFPFeatures() { return CurFPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// Warn that the stack is nearly exhausted. void warnStackExhausted(SourceLocation Loc); /// Run some code with "sufficient" stack space. (Currently, at least 256K is /// guaranteed). Produces a warning if we're low on stack space and allocates /// more in that case. Use this in code that may recurse deeply (for example, /// in template instantiation) to avoid stack overflow. void runWithSufficientStackSpace(SourceLocation Loc, llvm::function_ref<void()> Fn); /// Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } // This is a cunning lie. DiagnosticBuilder actually performs move // construction in its copy constructor (but due to varied uses, it's not // possible to conveniently express this as actual move construction). So // the default copy ctor here is fine, because the base class disables the // source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op // in that case anwyay. SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default; ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; /// Invent a new identifier for parameters of abbreviated templates. IdentifierInfo * InventAbbreviatedTemplateParameterTypeName(IdentifierInfo *ParamName, unsigned Index); void emitAndClearUnusedLocalTypedefWarnings(); private: /// Function or variable declarations to be checked for whether the deferred /// diagnostics should be emitted. SmallVector<Decl *, 4> DeclsToCheckForDeferredDiags; public: // Emit all deferred diagnostics. void emitDeferredDiags(); enum TUFragmentKind { /// The global module fragment, between 'module;' and a module-declaration. Global, /// A normal translation unit fragment. For a non-module unit, this is the /// entire translation unit. Otherwise, it runs from the module-declaration /// to the private-module-fragment (if any) or the end of the TU (if not). Normal, /// The private module fragment, between 'module :private;' and the end of /// the translation unit. Private }; void ActOnStartOfTranslationUnit(); void ActOnEndOfTranslationUnit(); void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K, unsigned OpenMPCaptureLevel = 0); /// Custom deleter to allow FunctionScopeInfos to be kept alive for a short /// time after they've been popped. class PoppedFunctionScopeDeleter { Sema *Self; public: explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {} void operator()(sema::FunctionScopeInfo *Scope) const; }; using PoppedFunctionScopePtr = std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>; PoppedFunctionScopePtr PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, QualType BlockType = QualType()); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.empty() ? nullptr : FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const; void setFunctionHasBranchIntoScope(); void setFunctionHasBranchProtectedScope(); void setFunctionHasIndirectGoto(); void PushCompoundScope(bool IsStmtExpr); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// Get the innermost lambda enclosing the current location, if any. This /// looks through intervening non-lambda scopes such as local functions and /// blocks. sema::LambdaScopeInfo *getEnclosingLambda() const; /// Retrieve the current lambda scope info, if any. /// \param IgnoreNonLambdaCapturingScope true if should find the top-most /// lambda scope info ignoring all inner capturing scopes that are not /// lambda scopes. sema::LambdaScopeInfo * getCurLambda(bool IgnoreNonLambdaCapturingScope = false); /// Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } /// Called before parsing a function declarator belonging to a function /// declaration. void ActOnStartFunctionDeclarationDeclarator(Declarator &D, unsigned TemplateParameterDepth); /// Called after parsing a function declarator belonging to a function /// declaration. void ActOnFinishFunctionDeclarationDeclarator(Declarator &D); void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); QualType BuildMatrixType(QualType T, Expr *NumRows, Expr *NumColumns, SourceLocation AttrLoc); QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace, SourceLocation AttrLoc); /// Same as above, but constructs the AddressSpace index if not provided. QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace, SourceLocation AttrLoc); bool CheckQualifiedFunctionForTypeId(QualType T, SourceLocation Loc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); QualType BuildReadPipeType(QualType T, SourceLocation Loc); QualType BuildWritePipeType(QualType T, SourceLocation Loc); QualType BuildExtIntType(bool IsUnsigned, Expr *BitWidth, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); /// Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Stmt *E); /// Determine whether the callee of a particular function call can throw. /// E, D and Loc are all optional. static CanThrowResult canCalleeThrow(Sema &S, const Expr *E, const Decl *D, SourceLocation Loc = SourceLocation()); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool handlerCanCatch(QualType HandlerType, QualType ExceptionType); bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID, const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const PartialDiagnostic &NoThrowDiagID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID, const PartialDiagnostic &NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// Abstract class used to diagnose incomplete types. struct TypeDiagnoser { TypeDiagnoser() {} virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser { protected: unsigned DiagID; std::tuple<const Ts &...> Args; template <std::size_t... Is> void emit(const SemaDiagnosticBuilder &DB, std::index_sequence<Is...>) const { // Apply all tuple elements to the builder in order. bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...}; (void)Dummy; } public: BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args) : TypeDiagnoser(), DiagID(DiagID), Args(Args...) { assert(DiagID != 0 && "no diagnostic for type diagnoser"); } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID); emit(DB, std::index_sequence_for<Ts...>()); DB << T; } }; /// Do a check to make sure \p Name looks like a legal swift_name /// attribute for the decl \p D. Raise a diagnostic if the name is invalid /// for the given declaration. /// /// For a function, this will validate a compound Swift name, /// e.g. <code>init(foo:bar:baz:)</code> or <code>controllerForName(_:)</code>, /// and the function will output the number of parameter names, and whether /// this is a single-arg initializer. /// /// For a type, enum constant, property, or variable declaration, this will /// validate either a simple identifier, or a qualified /// <code>context.identifier</code> name. /// /// \returns true if the name is a valid swift name for \p D, false otherwise. bool DiagnoseSwiftName(Decl *D, StringRef Name, SourceLocation ArgLoc, const IdentifierInfo *AttrName); /// A derivative of BoundTypeDiagnoser for which the diagnostic's type /// parameter is preceded by a 0/1 enum that is 1 if the type is sizeless. /// For example, a diagnostic with no other parameters would generally have /// the form "...%select{incomplete|sizeless}0 type %1...". template <typename... Ts> class SizelessTypeDiagnoser : public BoundTypeDiagnoser<Ts...> { public: SizelessTypeDiagnoser(unsigned DiagID, const Ts &... Args) : BoundTypeDiagnoser<Ts...>(DiagID, Args...) {} void diagnose(Sema &S, SourceLocation Loc, QualType T) override { const SemaDiagnosticBuilder &DB = S.Diag(Loc, this->DiagID); this->emit(DB, std::index_sequence_for<Ts...>()); DB << T->isSizelessType() << T; } }; enum class CompleteTypeKind { /// Apply the normal rules for complete types. In particular, /// treat all sizeless types as incomplete. Normal, /// Relax the normal rules for complete types so that they include /// sizeless built-in types. AcceptSizeless, // FIXME: Eventually we should flip the default to Normal and opt in // to AcceptSizeless rather than opt out of it. Default = AcceptSizeless }; private: /// Methods for marking which expressions involve dereferencing a pointer /// marked with the 'noderef' attribute. Expressions are checked bottom up as /// they are parsed, meaning that a noderef pointer may not be accessed. For /// example, in `&*p` where `p` is a noderef pointer, we will first parse the /// `*p`, but need to check that `address of` is called on it. This requires /// keeping a container of all pending expressions and checking if the address /// of them are eventually taken. void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E); void CheckAddressOfNoDeref(const Expr *E); void CheckMemberAccessOfNoDeref(const MemberExpr *E); bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser *Diagnoser); struct ModuleScope { SourceLocation BeginLoc; clang::Module *Module = nullptr; bool ModuleInterface = false; bool ImplicitGlobalModuleFragment = false; VisibleModuleSet OuterVisibleModules; }; /// The modules we're currently parsing. llvm::SmallVector<ModuleScope, 16> ModuleScopes; /// Namespace definitions that we will export when they finish. llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces; /// Get the module whose scope we are currently within. Module *getCurrentModule() const { return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module; } VisibleModuleSet VisibleModules; public: /// Get the module owning an entity. Module *getOwningModule(const Decl *Entity) { return Entity->getOwningModule(); } /// Make a merged definition of an existing hidden definition \p ND /// visible at the specified location. void makeMergedDefinitionVisible(NamedDecl *ND); bool isModuleVisible(const Module *M, bool ModulePrivate = false); // When loading a non-modular PCH files, this is used to restore module // visibility. void makeModuleVisible(Module *Mod, SourceLocation ImportLoc) { VisibleModules.setVisible(Mod, ImportLoc); } /// Determine whether a declaration is visible to name lookup. bool isVisible(const NamedDecl *D) { return D->isUnconditionallyVisible() || isVisibleSlow(D); } /// Determine whether any declaration of an entity is visible. bool hasVisibleDeclaration(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr) { return isVisible(D) || hasVisibleDeclarationSlow(D, Modules); } bool hasVisibleDeclarationSlow(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules); bool hasVisibleMergedDefinition(NamedDecl *Def); bool hasMergedDefinitionInCurrentModule(NamedDecl *Def); /// Determine if \p D and \p Suggested have a structurally compatible /// layout as described in C11 6.2.7/1. bool hasStructuralCompatLayout(Decl *D, Decl *Suggested); /// Determine if \p D has a visible definition. If not, suggest a declaration /// that should be made visible to expose the definition. bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested, bool OnlyNeedComplete = false); bool hasVisibleDefinition(const NamedDecl *D) { NamedDecl *Hidden; return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden); } /// Determine if the template parameter \p D has a visible default argument. bool hasVisibleDefaultArgument(const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is an explicit /// specialization declaration for a specialization of a template. (For a /// member specialization, use hasVisibleMemberSpecialization.) bool hasVisibleExplicitSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if there is a visible declaration of \p D that is a member /// specialization declaration (as opposed to an instantiated declaration). bool hasVisibleMemberSpecialization( const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr); /// Determine if \p A and \p B are equivalent internal linkage declarations /// from different modules, and thus an ambiguity error can be downgraded to /// an extension warning. bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A, const NamedDecl *B); void diagnoseEquivalentInternalLinkageDeclarations( SourceLocation Loc, const NamedDecl *D, ArrayRef<const NamedDecl *> Equiv); bool isUsualDeallocationFunction(const CXXMethodDecl *FD); bool isCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind = CompleteTypeKind::Default) { return !RequireCompleteTypeImpl(Loc, T, Kind, nullptr); } bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, CompleteTypeKind Kind, unsigned DiagID); bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, Diagnoser); } bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID) { return RequireCompleteType(Loc, T, CompleteTypeKind::Default, DiagID); } template <typename... Ts> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteType(Loc, T, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); CompleteTypeKind Kind = CompleteTypeKind::Normal; if (T->isVLST()) Kind = CompleteTypeKind::AcceptSizeless; return RequireCompleteType(Loc, T, Kind, Diagnoser); } void completeExprArrayBound(Expr *E); bool RequireCompleteExprType(Expr *E, CompleteTypeKind Kind, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template <typename... Ts> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireCompleteExprType(E, CompleteTypeKind::Default, Diagnoser); } template <typename... Ts> bool RequireCompleteSizedExprType(Expr *E, unsigned DiagID, const Ts &... Args) { SizelessTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); CompleteTypeKind Kind = CompleteTypeKind::Normal; if (E->getType()->isVLST()) Kind = CompleteTypeKind::AcceptSizeless; return RequireCompleteExprType(E, Kind, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template <typename... Ts> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T, TagDecl *OwnedTagDecl = nullptr); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // struct SkipBodyInfo { SkipBodyInfo() : ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr), New(nullptr) {} bool ShouldSkip; bool CheckSameAsPrevious; NamedDecl *Previous; NamedDecl *New; }; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = nullptr, bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, bool IsClassTemplateDeductionContext = true, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool IsTemplateName = false); /// Attempt to behave like MSVC in situations where lookup of an unqualified /// type name has failed in a dependent context. In these situations, we /// automatically form a DependentTypeName that will retry lookup in a related /// scope during instantiation. ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II, SourceLocation NameLoc, bool IsTemplateTypeArg); /// Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { /// This name is not a type or template in this context, but might be /// something else. NC_Unknown, /// Classification failed; an error has been produced. NC_Error, /// The name has been typo-corrected to a keyword. NC_Keyword, /// The name was classified as a type. NC_Type, /// The name was classified as a specific non-type, non-template /// declaration. ActOnNameClassifiedAsNonType should be called to /// convert the declaration to an expression. NC_NonType, /// The name was classified as an ADL-only function name. /// ActOnNameClassifiedAsUndeclaredNonType should be called to convert the /// result to an expression. NC_UndeclaredNonType, /// The name denotes a member of a dependent type that could not be /// resolved. ActOnNameClassifiedAsDependentNonType should be called to /// convert the result to an expression. NC_DependentNonType, /// The name was classified as an overload set, and an expression /// representing that overload set has been formed. /// ActOnNameClassifiedAsOverloadSet should be called to form a suitable /// expression referencing the overload set. NC_OverloadSet, /// The name was classified as a template whose specializations are types. NC_TypeTemplate, /// The name was classified as a variable template name. NC_VarTemplate, /// The name was classified as a function template name. NC_FunctionTemplate, /// The name was classified as an ADL-only function template name. NC_UndeclaredTemplate, /// The name was classified as a concept name. NC_Concept, }; class NameClassification { NameClassificationKind Kind; union { ExprResult Expr; NamedDecl *NonTypeDecl; TemplateName Template; ParsedType Type; }; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {} static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification OverloadSet(ExprResult E) { NameClassification Result(NC_OverloadSet); Result.Expr = E; return Result; } static NameClassification NonType(NamedDecl *D) { NameClassification Result(NC_NonType); Result.NonTypeDecl = D; return Result; } static NameClassification UndeclaredNonType() { return NameClassification(NC_UndeclaredNonType); } static NameClassification DependentNonType() { return NameClassification(NC_DependentNonType); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } static NameClassification Concept(TemplateName Name) { NameClassification Result(NC_Concept); Result.Template = Name; return Result; } static NameClassification UndeclaredTemplate(TemplateName Name) { NameClassification Result(NC_UndeclaredTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ExprResult getExpression() const { assert(Kind == NC_OverloadSet); return Expr; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } NamedDecl *getNonTypeDecl() const { assert(Kind == NC_NonType); return NonTypeDecl; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate || Kind == NC_Concept || Kind == NC_UndeclaredTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; case NC_Concept: return TNK_Concept_template; case NC_UndeclaredTemplate: return TNK_Undeclared_template; default: llvm_unreachable("unsupported name classification."); } } }; /// Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, CorrectionCandidateCallback *CCC = nullptr); /// Act on the result of classifying a name as an undeclared (ADL-only) /// non-type declaration. ExprResult ActOnNameClassifiedAsUndeclaredNonType(IdentifierInfo *Name, SourceLocation NameLoc); /// Act on the result of classifying a name as an undeclared member of a /// dependent base class. ExprResult ActOnNameClassifiedAsDependentNonType(const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, bool IsAddressOfOperand); /// Act on the result of classifying a name as a specific non-type /// declaration. ExprResult ActOnNameClassifiedAsNonType(Scope *S, const CXXScopeSpec &SS, NamedDecl *Found, SourceLocation NameLoc, const Token &NextToken); /// Act on the result of classifying a name as an overload set. ExprResult ActOnNameClassifiedAsOverloadSet(Scope *S, Expr *OverloadSet); /// Describes the detailed kind of a template name. Used in diagnostics. enum class TemplateNameKindForDiagnostics { ClassTemplate, FunctionTemplate, VarTemplate, AliasTemplate, TemplateTemplateParam, Concept, DependentTemplate }; TemplateNameKindForDiagnostics getTemplateNameKindForDiagnostics(TemplateName Name); /// Determine whether it's plausible that E was intended to be a /// template-name. bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) { if (!getLangOpts().CPlusPlus || E.isInvalid()) return false; Dependent = false; if (auto *DRE = dyn_cast<DeclRefExpr>(E.get())) return !DRE->hasExplicitTemplateArgs(); if (auto *ME = dyn_cast<MemberExpr>(E.get())) return !ME->hasExplicitTemplateArgs(); Dependent = true; if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get())) return !DSDRE->hasExplicitTemplateArgs(); if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get())) return !DSME->hasExplicitTemplateArgs(); // Any additional cases recognized here should also be handled by // diagnoseExprIntendedAsTemplateName. return false; } void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName, SourceLocation Less, SourceLocation Greater); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc, bool IsTemplateId); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation(), SourceLocation UnalignedQualLoc = SourceLocation()); void diagnosePointerAuthDisabled(SourceLocation loc, SourceRange range); bool checkConstantPointerAuthKey(Expr *keyExpr, unsigned &key); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D, const LookupResult &R); NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R); void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl, const LookupResult &R); void CheckShadow(Scope *S, VarDecl *D); /// Warn if 'E', which is an expression that is about to be modified, refers /// to a shadowing declaration. void CheckShadowingDeclModification(Expr *E, SourceLocation Loc); void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI); private: /// Map of current shadowing declarations to shadowed declarations. Warn if /// it looks like the user is trying to modify the shadowing declaration. llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls; public: void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void handleTagNumbering(const TagDecl *Tag, Scope *TagScope); void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec, TypedefNameDecl *NewTD); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope, ArrayRef<BindingDecl *> Bindings = None); NamedDecl * ActOnDecompositionDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit, Expr *Init); void CheckCompleteVariableDeclaration(VarDecl *VD); void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); enum class CheckConstexprKind { /// Diagnose issues that are non-constant or that are extensions. Diagnose, /// Identify whether this function satisfies the formal rules for constexpr /// functions in the current lanugage mode (with no extensions). CheckValid }; bool CheckConstexprFunctionDefinition(const FunctionDecl *FD, CheckConstexprKind Kind); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsMemberSpecialization); bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl); bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD, QualType NewT, QualType OldT); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD, bool IsDefinition); void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); QualType adjustParameterTypeForObjCAutoRefCount(QualType T, SourceLocation NameLoc, TypeSourceInfo *TSInfo); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); ExprResult ConvertParamDefaultArgument(const ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); // Contexts where using non-trivial C union types can be disallowed. This is // passed to err_non_trivial_c_union_in_invalid_context. enum NonTrivialCUnionContext { // Function parameter. NTCUC_FunctionParam, // Function return. NTCUC_FunctionReturn, // Default-initialized object. NTCUC_DefaultInitializedObject, // Variable with automatic storage duration. NTCUC_AutoVar, // Initializer expression that might copy from another object. NTCUC_CopyInit, // Assignment. NTCUC_Assignment, // Compound literal. NTCUC_CompoundLiteral, // Block capture. NTCUC_BlockCapture, // lvalue-to-rvalue conversion of volatile type. NTCUC_LValueToRValueVolatile, }; /// Emit diagnostics if the initializer or any of its explicit or /// implicitly-generated subexpressions require copying or /// default-initializing a type that is or contains a C union type that is /// non-trivial to copy or default-initialize. void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc); // These flags are passed to checkNonTrivialCUnion. enum NonTrivialCUnionKind { NTCUK_Init = 0x1, NTCUK_Destruct = 0x2, NTCUK_Copy = 0x4, }; /// Emit diagnostics if a non-trivial C union type or a struct that contains /// a non-trivial C union is used in an invalid context. void checkNonTrivialCUnion(QualType QT, SourceLocation Loc, NonTrivialCUnionContext UseContext, unsigned NonTrivialKind); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit); void ActOnUninitializedDecl(Decl *dcl); void ActOnInitializerError(Decl *Dcl); void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void CheckStaticLocalForDllExport(VarDecl *VD); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition( FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParamLists, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D, SkipBodyInfo *SkipBody = nullptr); void ActOnStartTrailingRequiresClause(Scope *S, Declarator &D); ExprResult ActOnFinishTrailingRequiresClause(ExprResult ConstraintExpr); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineFunctionDef(FunctionDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters); /// Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList, SourceLocation SemiLoc); enum class ModuleDeclKind { Interface, ///< 'export module X;' Implementation, ///< 'module X;' }; /// The parser has processed a module-declaration that begins the definition /// of a module interface or implementation. DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc, SourceLocation ModuleLoc, ModuleDeclKind MDK, ModuleIdPath Path, bool IsFirstDecl); /// The parser has processed a global-module-fragment declaration that begins /// the definition of the global module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc); /// The parser has processed a private-module-fragment declaration that begins /// the definition of the private module fragment of the current module unit. /// \param ModuleLoc The location of the 'module' keyword. /// \param PrivateLoc The location of the 'private' keyword. DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc, SourceLocation PrivateLoc); /// The parser has processed a module import declaration. /// /// \param StartLoc The location of the first token in the declaration. This /// could be the location of an '@', 'export', or 'import'. /// \param ExportLoc The location of the 'export' keyword, if any. /// \param ImportLoc The location of the 'import' keyword. /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, ModuleIdPath Path); DeclResult ActOnModuleImport(SourceLocation StartLoc, SourceLocation ExportLoc, SourceLocation ImportLoc, Module *M, ModuleIdPath Path = {}); /// The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// The parsed has entered a submodule. void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod); /// The parser has left a submodule. void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod); /// Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// Kinds of missing import. Note, the values of these enumerators correspond /// to %select values in diagnostics. enum class MissingImportKind { Declaration, Definition, DefaultArgument, ExplicitSpecialization, PartialSpecialization }; /// Diagnose that the specified declaration needs to be visible but /// isn't, and suggest a module import that would resolve the problem. void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, MissingImportKind MIK, bool Recover = true); void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl, SourceLocation DeclLoc, ArrayRef<Module *> Modules, MissingImportKind MIK, bool Recover); Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc, SourceLocation LBraceLoc); Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl, SourceLocation RBraceLoc); /// We've found a use of a templated declaration that would trigger an /// implicit instantiation. Check that any relevant explicit specializations /// and partial specializations are visible, and diagnose if not. void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// We've found a use of a template specialization that would select a /// partial specialization. Check that the partial specialization is visible, /// and diagnose if not. void checkPartialSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec); /// Retrieve a suitable printing policy for diagnostics. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// Retrieve a suitable printing policy for diagnostics. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, RecordDecl *&AnonRecord); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation, RecordDecl *&AnonRecord); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); /// Common ways to introduce type names without a tag for use in diagnostics. /// Keep in sync with err_tag_reference_non_tag. enum NonTagKind { NTK_NonStruct, NTK_NonClass, NTK_NonUnion, NTK_NonEnum, NTK_Typedef, NTK_TypeAlias, NTK_Template, NTK_TypeAliasTemplate, NTK_TemplateTemplateArgument, }; /// Given a non-tag type declaration, returns an enum useful for indicating /// what kind of non-tag type this is. NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo *Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier, bool IsTemplateParamOrArg, SkipBodyInfo *SkipBody = nullptr); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, const ParsedAttr &MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); enum TrivialABIHandling { /// The triviality of a method unaffected by "trivial_abi". TAH_IgnoreTrivialABI, /// The triviality of a method affected by "trivial_abi". TAH_ConsiderTrivialABI }; bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, TrivialABIHandling TAH = TAH_IgnoreTrivialABI, bool Diagnose = false); /// For a defaulted function, the kind of defaulted function that it is. class DefaultedFunctionKind { CXXSpecialMember SpecialMember : 8; DefaultedComparisonKind Comparison : 8; public: DefaultedFunctionKind() : SpecialMember(CXXInvalid), Comparison(DefaultedComparisonKind::None) { } DefaultedFunctionKind(CXXSpecialMember CSM) : SpecialMember(CSM), Comparison(DefaultedComparisonKind::None) {} DefaultedFunctionKind(DefaultedComparisonKind Comp) : SpecialMember(CXXInvalid), Comparison(Comp) {} bool isSpecialMember() const { return SpecialMember != CXXInvalid; } bool isComparison() const { return Comparison != DefaultedComparisonKind::None; } explicit operator bool() const { return isSpecialMember() || isComparison(); } CXXSpecialMember asSpecialMember() const { return SpecialMember; } DefaultedComparisonKind asComparison() const { return Comparison; } /// Get the index of this function kind for use in diagnostics. unsigned getDiagnosticIndex() const { static_assert(CXXInvalid > CXXDestructor, "invalid should have highest index"); static_assert((unsigned)DefaultedComparisonKind::None == 0, "none should be equal to zero"); return SpecialMember + (unsigned)Comparison; } }; DefaultedFunctionKind getDefaultedFunctionKind(const FunctionDecl *FD); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD) { return getDefaultedFunctionKind(MD).asSpecialMember(); } DefaultedComparisonKind getDefaultedComparisonKind(const FunctionDecl *FD) { return getDefaultedFunctionKind(FD).asComparison(); } void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); /// Perform ODR-like check for C/ObjC when merging tag types from modules. /// Differently from C++, actually parse the body and reject / error out /// in case of a structural mismatch. bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev, SkipBodyInfo &SkipBody); typedef void *SkippedDefinitionContext; /// Invoked when we enter a tag definition that we're skipping. SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceRange BraceRange); void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context); void ActOnObjCContainerFinishDefinition(); /// Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, bool IsFixed, const EnumDecl *Prev); /// Determine whether the body of an anonymous enumeration should be skipped. /// \param II The name of the first enumerator. SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II, SourceLocation IILoc); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, const ParsedAttributesView &Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, const ParsedAttributesView &Attr); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Enter a template parameter scope, after it's been associated with a particular /// DeclContext. Causes lookup within the scope to chain through enclosing contexts /// in the correct order. void EnterTemplatedContext(Scope *S, DeclContext *DC); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// Don't merge availability attributes at all. AMK_None, /// Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override, /// Merge availability attributes for an implementation of /// a protocol requirement. AMK_ProtocolImplementation, }; /// Describes the kind of priority given to an availability attribute. /// /// The sum of priorities deteremines the final priority of the attribute. /// The final priority determines how the attribute will be merged. /// An attribute with a lower priority will always remove higher priority /// attributes for the specified platform when it is being applied. An /// attribute with a higher priority will not be applied if the declaration /// already has an availability attribute with a lower priority for the /// specified platform. The final prirority values are not expected to match /// the values in this enumeration, but instead should be treated as a plain /// integer value. This enumeration just names the priority weights that are /// used to calculate that final vaue. enum AvailabilityPriority : int { /// The availability attribute was specified explicitly next to the /// declaration. AP_Explicit = 0, /// The availability attribute was applied using '#pragma clang attribute'. AP_PragmaClangAttribute = 1, /// The availability attribute for a specific platform was inferred from /// an availability attribute for another platform. AP_InferredFromOtherPlatform = 2 }; /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr * mergeAvailabilityAttr(NamedDecl *D, const AttributeCommonInfo &CI, IdentifierInfo *Platform, bool Implicit, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool IsStrict, StringRef Replacement, AvailabilityMergeKind AMK, int Priority); TypeVisibilityAttr * mergeTypeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, TypeVisibilityAttr::VisibilityType Vis); VisibilityAttr *mergeVisibilityAttr(Decl *D, const AttributeCommonInfo &CI, VisibilityAttr::VisibilityType Vis); UuidAttr *mergeUuidAttr(Decl *D, const AttributeCommonInfo &CI, StringRef UuidAsWritten, MSGuidDecl *GuidDecl); DLLImportAttr *mergeDLLImportAttr(Decl *D, const AttributeCommonInfo &CI); DLLExportAttr *mergeDLLExportAttr(Decl *D, const AttributeCommonInfo &CI); MSInheritanceAttr *mergeMSInheritanceAttr(Decl *D, const AttributeCommonInfo &CI, bool BestCase, MSInheritanceModel Model); FormatAttr *mergeFormatAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Format, int FormatIdx, int FirstArg); SectionAttr *mergeSectionAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); CodeSegAttr *mergeCodeSegAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, const AttributeCommonInfo &CI, const IdentifierInfo *Ident); MinSizeAttr *mergeMinSizeAttr(Decl *D, const AttributeCommonInfo &CI); NoSpeculativeLoadHardeningAttr * mergeNoSpeculativeLoadHardeningAttr(Decl *D, const NoSpeculativeLoadHardeningAttr &AL); SpeculativeLoadHardeningAttr * mergeSpeculativeLoadHardeningAttr(Decl *D, const SpeculativeLoadHardeningAttr &AL); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, const AttributeCommonInfo &CI); SwiftNameAttr *mergeSwiftNameAttr(Decl *D, const AttributeCommonInfo &CI, StringRef Name, bool Override); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL); InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const InternalLinkageAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL); CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL); WebAssemblyImportNameAttr *mergeImportNameAttr( Decl *D, const WebAssemblyImportNameAttr &AL); WebAssemblyImportModuleAttr *mergeImportModuleAttr( Decl *D, const WebAssemblyImportModuleAttr &AL); void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn); void notePreviousDefinition(const NamedDecl *Old, SourceLocation New); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl, bool ConsiderCudaAttrs = true, bool ConsiderRequiresClauses = true); enum class AllowedExplicit { /// Allow no explicit functions to be used. None, /// Allow explicit conversion functions but not explicit constructors. Conversions, /// Allow both explicit conversion functions and explicit constructors. All }; ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, AllowedExplicit AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); void maybeExtendBlockObject(ExprResult &E); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess, bool Diagnose = true); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsFunctionConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformAggregateInitializationForOverloadResolution( const InitializedEntity &Entity, InitListExpr *From); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); /// Check that the lifetime of the initializer (and its subobjects) is /// sufficient for initializing the entity, and perform lifetime extension /// (when permitted) if not. void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_ArrayBound, ///< Array bound in array declarator or new-expression. CCEK_ConstexprIf, ///< Condition in a constexpr if statement. CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet; using ADLCallKind = CallExpr::ADLCallKind; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, bool AllowExplicitConversion = false, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, bool SuppressUserConversions = false, bool PartialOverloading = false, bool FirstArgumentIsBase = false); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false, OverloadCandidateParamOrder PO = {}); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, ConversionSequenceList EarlyConversions = None, OverloadCandidateParamOrder PO = {}); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, OverloadCandidateParamOrder PO = {}); void AddTemplateOverloadCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = true, ADLCallKind IsADLCandidate = ADLCallKind::NotADL, OverloadCandidateParamOrder PO = {}); bool CheckNonDependentConversions( FunctionTemplateDecl *FunctionTemplate, ArrayRef<QualType> ParamTypes, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, ConversionSequenceList &Conversions, bool SuppressUserConversions, CXXRecordDecl *ActingContext = nullptr, QualType ObjectType = QualType(), Expr::Classification ObjectClassification = {}, OverloadCandidateParamOrder PO = {}); void AddConversionCandidate( CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddTemplateConversionCandidate( FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit, bool AllowExplicit, bool AllowResultConversion = true); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddNonMemberOperatorCandidates( const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, OverloadCandidateParamOrder PO = {}); void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate( NamedDecl *Found, FunctionDecl *Fn, OverloadCandidateRewriteKind RewriteKind = OverloadCandidateRewriteKind(), QualType DestType = QualType(), bool TakingAddress = false); // Emit as a series of 'note's all template and non-templates identified by // the expression Expr void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(), bool TakingAddress = false); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, SourceLocation CallLoc, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); /// Find the failed Boolean condition within a given Boolean /// constant expression, and describe it with a string. std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// non-ArgDependent DiagnoseIfAttrs. /// /// Argument-dependent diagnose_if attributes should be checked each time a /// function is used as a direct callee of a function call. /// /// Returns true if any errors were emitted. bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function, const Expr *ThisArg, ArrayRef<const Expr *> Args, SourceLocation Loc); /// Emit diagnostics for the diagnose_if attributes on Function, ignoring any /// ArgDependent DiagnoseIfAttrs. /// /// Argument-independent diagnose_if attributes should be checked on every use /// of a function. /// /// Returns true if any errors were emitted. bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND, SourceLocation Loc); /// Returns whether the given function's address can be taken or not, /// optionally emitting a diagnostic if the address can't be taken. /// /// Returns false if taking the address of the function is illegal. bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function, bool Complain = false, SourceLocation Loc = SourceLocation()); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * resolveAddressOfSingleOverloadCandidate(Expr *E, DeclAccessPair &FoundResult); bool resolveAndFixAddressOfSingleOverloadCandidate( ExprResult &SrcExpr, bool DoFunctionPointerConversion = false); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, SourceRange OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc, SourceLocation RangeLoc, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true, bool CalleesAddressIsTaken=false); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateUnresolvedLookupExpr(CXXRecordDecl *NamingClass, NestedNameSpecifierLoc NNSLoc, DeclarationNameInfo DNI, const UnresolvedSetImpl &Fns, bool PerformADL = true); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *input, bool RequiresADL = true); void LookupOverloadedBinOp(OverloadCandidateSet &CandidateSet, OverloadedOperatorKind Op, const UnresolvedSetImpl &Fns, ArrayRef<Expr *> Args, bool RequiresADL = true); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, bool RequiresADL = true, bool AllowRewrittenCandidates = true, FunctionDecl *DefaultedFn = nullptr); ExprResult BuildSynthesizedThreeWayComparison(SourceLocation OpLoc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS, FunctionDecl *DefaultedFn); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up a name following ~ in a destructor name. This is an ordinary /// lookup, but prefers tags to typedefs. LookupDestructorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// Look up the name of an OpenMP user-defined reduction operation. LookupOMPReductionName, /// Look up the name of an OpenMP user-defined mapper. LookupOMPMapperName, /// Look up any declaration with any name. LookupAnyName }; /// Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists and is visible. ForVisibleRedeclaration, /// The lookup results will be used for redeclaration of a name /// with external linkage; non-visible lookup results with external linkage /// may also be found. ForExternalRedeclaration }; RedeclarationKind forRedeclarationInCurContext() { // A declaration with an owning module for linkage can never link against // anything that is not visible. We don't need to check linkage here; if // the context has internal linkage, redeclaration lookup won't find things // from other TUs, and we can't safely compute linkage yet in general. if (cast<Decl>(CurContext) ->getOwningModuleForLinkage(/*IgnoreLinkage*/true)) return ForVisibleRedeclaration; return ForExternalRedeclaration; } /// The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// The lookup resulted in an error. LOLR_Error, /// The lookup found no match but no diagnostic was issued. LOLR_ErrorNoDiagnostic, /// The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState &&other) noexcept; TypoExprState &operator=(TypoExprState &&other) noexcept; }; /// The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, SourceLocation TypoLoc); // The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupBuiltin(LookupResult &R); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, UnresolvedSetImpl &Functions); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate, bool DiagnoseMissing); bool isKnownName(StringRef name); /// Status of the function emission on the CUDA/HIP/OpenMP host/device attrs. enum class FunctionEmissionStatus { Emitted, CUDADiscarded, // Discarded due to CUDA/HIP hostness OMPDiscarded, // Discarded due to OpenMP hostness TemplateDiscarded, // Discarded due to uninstantiated templates Unknown, }; FunctionEmissionStatus getEmissionStatus(FunctionDecl *Decl, bool Final = false); // Whether the callee should be ignored in CUDA/HIP/OpenMP host/device check. bool shouldIgnoreInHostDeviceCheck(FunctionDecl *Callee); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool LoadExternal = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true, bool IncludeDependentBases = false, bool LoadExternal = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, CorrectionCandidateCallback &CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); /// Process any TypoExprs in the given Expr and its children, /// generating diagnostics as appropriate and returning a new Expr if there /// were typos that were all successfully corrected and ExprError if one or /// more typos could not be corrected. /// /// \param E The Expr to check for TypoExprs. /// /// \param InitDecl A VarDecl to avoid because the Expr being corrected is its /// initializer. /// /// \param RecoverUncorrectedTypos If true, when typo correction fails, it /// will rebuild the given Expr with all TypoExprs degraded to RecoveryExprs. /// /// \param Filter A function applied to a newly rebuilt Expr to determine if /// it is an acceptable/usable result from a single combination of typo /// corrections. As long as the filter returns ExprError, different /// combinations of corrections will be tried until all are exhausted. ExprResult CorrectDelayedTyposInExpr( Expr *E, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr( ExprResult ER, VarDecl *InitDecl = nullptr, bool RecoverUncorrectedTypos = false, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), InitDecl, RecoverUncorrectedTypos, Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} /// Attempts to produce a RecoveryExpr after some AST node cannot be created. ExprResult CreateRecoveryExpr(SourceLocation Begin, SourceLocation End, ArrayRef<Expr *> SubExprs, QualType T = QualType()); ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributesForReplaceableGlobalAllocationFunction( FunctionDecl *FD); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); // Helper for delayed processing of attributes. void ProcessDeclAttributeDelayed(Decl *D, const ParsedAttributesView &AttrList); void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const ParsedAttributesView &AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Map any API notes provided for this declaration to attributes on the /// declaration. /// /// Triggered by declaration-attribute processing. void ProcessAPINotes(Decl *D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value); bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckAttrTarget(const ParsedAttr &CurrAttr); bool CheckAttrNoArgs(const ParsedAttr &CurrAttr); bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkSectionName(SourceLocation LiteralLoc, StringRef Str); bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceModel SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor, SourceLocation Loc); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// Check whether a nullability type specifier can be added to the given /// type through some means not written in source (e.g. API notes). /// /// \param type The type to which the nullability specifier will be /// added. On success, this type will be updated appropriately. /// /// \param nullability The nullability specifier to add. /// /// \param diagLoc The location to use for diagnostics. /// /// \param allowArrayTypes Whether to accept nullability specifiers on an /// array type (e.g., because it will decay to a pointer). /// /// \param overrideExisting Whether to override an existing, locally-specified /// nullability specifier rather than complaining about the conflict. /// /// \returns true if nullability cannot be applied, false otherwise. bool checkImplicitNullabilityTypeSpecifier(QualType &type, NullabilityKind nullability, SourceLocation diagLoc, bool allowArrayTypes, bool overrideExisting); /// Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, const ParsedAttributesView &Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// Diagnose any null-resettable synthesized setters. void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl, ObjCInterfaceDecl *IDecl, SourceLocation AtEnd); void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, unsigned &Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, SourceLocation GetterNameLoc, Selector SetterSel, SourceLocation SetterNameLoc, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, QualType T, TypeSourceInfo *TSI, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCInterfaceDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); /// Returns default addr space for method qualifiers. LangAS getDefaultCXXMethodAddrSpace() const; private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool instance); public: /// - Returns instance or factory methods in global method pool for /// given selector. It checks the desired kind first, if none is found, and /// parameter checkTheOther is set, it then checks the other kind. If no such /// method or only one method is found, function returns false; otherwise, it /// returns true. bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool InstanceFirst, bool CheckTheOther, const ObjCObjectType *TypeBound = nullptr); bool AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod, SourceRange R, bool receiverIdOrClass, SmallVectorImpl<ObjCMethodDecl*>& Methods); void DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods, Selector Sel, SourceRange R, bool receiverIdOrClass); private: /// - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance, SmallVectorImpl<ObjCMethodDecl*>& Methods); /// Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg() : E(nullptr) { } FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg( ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(bool IsStmtExpr); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S, bool IsStmtExpr = false) : S(S) { S.ActOnStartOfCompoundStmt(IsStmtExpr); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS, SourceLocation DotDotDotLoc, ExprResult RHS, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); class ConditionResult; StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, SourceLocation LParenLoc, Stmt *InitStmt, ConditionResult Cond, SourceLocation RParenLoc); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, SourceLocation LParenLoc, ConditionResult Cond, SourceLocation RParenLoc, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, ConditionResult Second, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation CoawaitLoc, Stmt *InitStmt, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *Begin, Stmt *End, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params, unsigned OpenMPCaptureLevel = 0); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); enum CopyElisionSemanticsKind { CES_Strict = 0, CES_AllowParameters = 1, CES_AllowDifferentTypes = 2, CES_AllowExceptionVariables = 4, CES_FormerDefault = (CES_AllowParameters), CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes), CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes | CES_AllowExceptionVariables), }; VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, CopyElisionSemanticsKind CESK); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, CopyElisionSemanticsKind CESK); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, unsigned NumLabels, SourceLocation RParenLoc); void FillInlineAsmIdentifierInfo(Expr *Res, llvm::InlineAsmIdentifierInfo &Info); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); void ActOnStartSEHFinallyBlock(); void ActOnAbortSEHFinallyBlock(); StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); /// Warn if a value is moved to itself. void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr, SourceLocation OpLoc); /// Warn if we're implicitly casting from a _Nullable pointer type to a /// _Nonnull one. void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType, SourceLocation Loc); /// Warn when implicitly casting 0 to nullptr. void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { ParsingClassDepth++; return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { ParsingClassDepth--; DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass, bool ObjCPropertyAccess, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReceiver = nullptr); bool makeUnavailableInSystemHeader(SourceLocation loc, UnavailableAttr::ImplicitReason reason); /// Issue any -Wunguarded-availability warnings in \c FD void DiagnoseUnguardedAvailabilityViolations(Decl *FD); void handleDelayedAvailabilityCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid); bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs, const ObjCInterfaceDecl *UnknownObjCClass = nullptr, bool ObjCPropertyAccess = false, bool AvoidPartialAvailabilityChecks = false, ObjCInterfaceDecl *ClassReciever = nullptr); void NoteDeletedFunction(FunctionDecl *FD); void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext( ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, ExpressionEvaluationContextRecord::ExpressionKind Type = ExpressionEvaluationContextRecord::EK_Other); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult CheckUnevaluatedOperand(Expr *E); void CheckUnusedVolatileAssignment(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. // // MightBeOdrUse indicates whether the use could possibly be an odr-use, and // should usually be true. This only needs to be set to false if the lack of // odr-use cannot be determined from the current context (for instance, // because the name denotes a virtual function and was written without an // explicit nested-name-specifier). void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool MightBeOdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr); void MarkMemberReferenced(MemberExpr *E); void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E); void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc, unsigned CapturingScopeIndex); ExprResult CheckLValueToRValueConversionOperand(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); /// Mark all of the declarations referenced within a particular AST node as /// referenced. Used when template instantiation instantiates a non-dependent /// type -- entities referenced by the type are now referenced. void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// Try to convert an expression \p E to type \p Ty. Returns the result of the /// conversion. ExprResult tryConvertExprToType(Expr *E, QualType Ty); /// Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); /// Similar, but diagnostic is only produced if all the specified statements /// are reachable. bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, CorrectionCandidateCallback *CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, CorrectionCandidateCallback &CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); DeclResult LookupIvarInObjCMethod(LookupResult &Lookup, Scope *S, IdentifierInfo *II); ExprResult BuildIvarRefExpr(Scope *S, SourceLocation Loc, ObjCIvarDecl *IV); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); /// If \p D cannot be odr-used in the current expression evaluation context, /// return a reason explaining why. Otherwise, return NOUR_None. NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D); DeclRefExpr *BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); DeclRefExpr * BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, NestedNameSpecifierLoc NNS, NamedDecl *FoundD = nullptr, SourceLocation TemplateKWLoc = SourceLocation(), const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr( const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, UnresolvedLookupExpr *AsULE = nullptr); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance, const Scope *S); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, const Scope *S, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentKind IK); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); ExprResult BuildUniqueStableName(SourceLocation Loc, TypeSourceInfo *Operand); ExprResult BuildUniqueStableName(SourceLocation Loc, Expr *E); ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, ParsedType Ty); ExprResult ActOnUniqueStableNameExpr(SourceLocation OpLoc, SourceLocation LParen, SourceLocation RParen, Expr *E); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); bool isQualifiedMemberAccess(Expr *E); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, SourceRange ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinMatrixSubscriptExpr(Expr *Base, Expr *RowIdx, Expr *ColumnIdx, SourceLocation RBLoc); ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc, Expr *LowerBound, SourceLocation ColonLocFirst, SourceLocation ColonLocSecond, Expr *Length, Expr *Stride, SourceLocation RBLoc); ExprResult ActOnOMPArrayShapingExpr(Expr *Base, SourceLocation LParenLoc, SourceLocation RParenLoc, ArrayRef<Expr *> Dims, ArrayRef<SourceRange> Brackets); /// Data structure for iterator expression. struct OMPIteratorData { IdentifierInfo *DeclIdent = nullptr; SourceLocation DeclIdentLoc; ParsedType Type; OMPIteratorExpr::IteratorRange Range; SourceLocation AssignLoc; SourceLocation ColonLoc; SourceLocation SecColonLoc; }; ExprResult ActOnOMPIteratorExpr(Scope *S, SourceLocation IteratorKwLoc, SourceLocation LLoc, SourceLocation RLoc, ArrayRef<OMPIteratorData> Data); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, const Scope *S, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, FieldDecl *Field, DeclAccessPair FoundDecl, const DeclarationNameInfo &MemberNameInfo); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec *SS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); MemberExpr * BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc, NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc, ValueDecl *Member, DeclAccessPair FoundDecl, bool HadMultipleCandidates, const DeclarationNameInfo &MemberNameInfo, QualType Ty, ExprValueKind VK, ExprObjectKind OK, const TemplateArgumentListInfo *TemplateArgs = nullptr); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr); ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); enum class AtomicArgumentOrder { API, AST }; ExprResult BuildAtomicExpr(SourceRange CallRange, SourceRange ExprRange, SourceLocation RParenLoc, MultiExprArg Args, AtomicExpr::AtomicOp Op, AtomicArgumentOrder ArgOrder = AtomicArgumentOrder::API); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false, ADLCallKind UsesADL = ADLCallKind::NotADL); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult BuildInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation EqualOrColonLoc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); void LookupBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, UnresolvedSetImpl &Functions); void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(Scope *S, SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); ExprResult BuildStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc, unsigned TemplateDepth); // Handle the final expression in a statement expression. ExprResult ActOnStmtExprResult(ExprResult E); void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, ArrayRef<OffsetOfComponent> Components, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(), // __builtin_COLUMN() ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc); // Build a potentially resolved SourceLocExpr. ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind, SourceLocation BuiltinLoc, SourceLocation RPLoc, DeclContext *ParentContext); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// Describes the result of an "if-exists" condition check. enum IfExistsResult { /// The symbol exists. IER_Exists, /// The symbol does not exist. IER_DoesNotExist, /// The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, const ParsedAttributesView &AttrList, UsingDirectiveDecl *&UsingDecl); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); NamespaceDecl *lookupStdExperimentalNamespace(); CXXRecordDecl *getStdBadAlloc() const; EnumDecl *getStdAlignValT() const; private: // A cache representing if we've fully checked the various comparison category // types stored in ASTContext. The bit-index corresponds to the integer value // of a ComparisonCategoryType enumerator. llvm::SmallBitVector FullyCheckedComparisonCategories; ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl, CXXScopeSpec &SS, ParsedType TemplateTypeTy, IdentifierInfo *MemberOrBase); public: enum class ComparisonCategoryUsage { /// The '<=>' operator was used in an expression and a builtin operator /// was selected. OperatorInExpression, /// A defaulted 'operator<=>' needed the comparison category. This /// typically only applies to 'std::strong_ordering', due to the implicit /// fallback return value. DefaultedOperator, }; /// Lookup the specified comparison category types in the standard /// library, an check the VarDecls possibly returned by the operator<=> /// builtins for that type. /// /// \return The type of the comparison category type corresponding to the /// specified Kind, or a null type if an error occurs QualType CheckComparisonCategoryType(ComparisonCategoryType Kind, SourceLocation Loc, ComparisonCategoryUsage Usage); /// Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const FunctionDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, const ParsedAttributesView &AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, bool HasTypename, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration( Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList, bool IsInstantiation); NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom, ArrayRef<NamedDecl *> Expansions); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); /// Given a derived-class using shadow declaration for a constructor and the /// correspnding base class constructor, find or create the implicit /// synthesized derived class constructor to use for this initialization. CXXConstructorDecl * findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor, ConstructorUsingShadowDecl *DerivedShadow); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, SourceLocation UsingLoc, SourceLocation TypenameLoc, CXXScopeSpec &SS, UnqualifiedId &Name, SourceLocation EllipsisLoc, const ParsedAttributesView &AttrList); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, const ParsedAttributesView &AttrList, TypeResult Type, Decl *DeclFromDeclSpec); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); /// Build a CXXConstructExpr whose constructor has already been resolved if /// it denotes an inherited constructor. ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, NamedDecl *FoundDecl, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// Instantiate or parse a C++ default argument expression as necessary. /// Return true on error. bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(!isComputedNoexcept(ComputedEST) && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// Integrate an invoked expression into the collected data. void CalledExpr(Expr *E) { CalledStmt(E); } /// Integrate an invoked statement into the collected data. void CalledStmt(Stmt *S); /// Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_NoexceptFalse; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(SourceLocation Loc, CXXConstructorDecl *CD); /// Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, FunctionDecl *FD); /// Check the given noexcept-specifier, convert its expression, and compute /// the appropriate ExceptionSpecificationType. ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr, ExceptionSpecificationType &EST); /// Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); class InheritedConstructorInfo; /// Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, InheritedConstructorInfo *ICI = nullptr, bool Diagnose = false); /// Produce notes explaining why a defaulted function was defined as deleted. void DiagnoseDeletedDefaultedFunction(FunctionDecl *FD); /// Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor); /// Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// Check a completed declaration of an implicit special member. void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD); /// Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); /// Wrap the expression in a ConstantExpr if it is a potential immediate /// invocation. ExprResult CheckForImmediateInvocation(ExprResult E, FunctionDecl *Decl); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, bool EnteringContext); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorTypeForDecltype(const DeclSpec &DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse /// {dynamic,static,reinterpret,const,addrspace}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl, ExprResult Operand, SourceLocation RParenLoc); ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI, Expr *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(Scope *S, SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(UnresolvedLookupExpr *Callee, SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc, Optional<unsigned> NumExpansions); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// Build a CXXThisExpr and mark it referenced in the current context. Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit); void MarkThisReferenced(CXXThisExpr *This); /// Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr, bool ByCopy = false); /// Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); ExprResult ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs, SourceLocation AtLoc, SourceLocation RParen); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenOrBraceLoc, MultiExprArg Exprs, SourceLocation RParenOrBraceLoc, bool ListInitialization); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc, bool ListInitialization); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Optional<Expr *> ArraySize, SourceRange DirectInitRange, Expr *Initializer); /// Determine whether \p FD is an aligned allocation or deallocation /// function that is unavailable. bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const; /// Produce diagnostics if \p FD is an aligned allocation or deallocation /// function that is unavailable. void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD, SourceLocation Loc); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); /// The scope in which to find allocation functions. enum AllocationFunctionScope { /// Only look for allocation functions in the global scope. AFS_Global, /// Only look for allocation functions in the scope of the /// allocated class. AFS_Class, /// Look for allocation functions in both the global scope /// and in the scope of the allocated class. AFS_Both }; /// Finds the overloads of operator new and delete that are appropriate /// for the allocation. bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, AllocationFunctionScope NewScope, AllocationFunctionScope DeleteScope, QualType AllocType, bool IsArray, bool &PassAlignment, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, ArrayRef<QualType> Params); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, bool Overaligned, DeclarationName Name); FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc, CXXRecordDecl *RD); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc, bool IsDelete, bool CallCanBeVirtual, bool WarnOnNonAbstractTypes, SourceLocation DtorLoc); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the binary type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); MaterializeTemporaryExpr * CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary, bool BoundToLvalueReference); ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) { return ActOnFinishFullExpr( Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue, bool IsConstexpr = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD, bool *CanCorrect = nullptr); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); /// Keeps information about an identifier in a nested-name-spec. /// struct NestedNameSpecInfo { /// The type of the object, if we're parsing nested-name-specifier in /// a member access expression. ParsedType ObjectType; /// The identifier preceding the '::'. IdentifierInfo *Identifier; /// The location of the identifier. SourceLocation IdentifierLoc; /// The location of the '::'. SourceLocation CCLoc; /// Creates info object for the most typical case. NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType()) : ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc, SourceLocation ColonColonLoc, QualType ObjectType) : ObjectType(ParsedType::make(ObjectType)), Identifier(II), IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) { } }; bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo); bool BuildCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); /// The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param IdInfo Parser information about an identifier in the /// nested-name-spec. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \param OnlyNamespace If true, only considers namespaces in lookup. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, NestedNameSpecInfo &IdInfo, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr, bool OnlyNamespace = false); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, NestedNameSpecInfo &IdInfo, bool EnteringContext); /// The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params, ConstexprSpecKind ConstexprKind, Expr *TrailingRequiresClause); /// Number lambda for linkage purposes if necessary. void handleLambdaNumbering( CXXRecordDecl *Class, CXXMethodDecl *Method, Optional<std::tuple<unsigned, bool, Decl *>> Mangling = None); /// Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. ParsedType actOnLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) { return ParsedType::make(buildLambdaInitCaptureInitialization( Loc, ByRef, EllipsisLoc, None, Id, InitKind != LambdaCaptureInitKind::CopyInit, Init)); } QualType buildLambdaInitCaptureInitialization( SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit, Expr *&Init); /// Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, SourceLocation EllipsisLoc, IdentifierInfo *Id, unsigned InitStyle, Expr *Init); /// Add an init-capture to a lambda scope. void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief This is called after parsing the explicit template parameter list /// on a lambda (if it exists) in C++2a. void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc, ArrayRef<NamedDecl *> TParams, SourceLocation RAngleLoc); /// Introduce the lambda parameters into scope. void addLambdaParameters( ArrayRef<LambdaIntroducer::LambdaCapture> Captures, CXXMethodDecl *CallOperator, Scope *CurScope); /// Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope); /// Does copying/destroying the captured variable have side effects? bool CaptureHasSideEffects(const sema::Capture &From); /// Diagnose if an explicit lambda capture is unused. Returns true if a /// diagnostic is emitted. bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange, const sema::Capture &From); /// Build a FieldDecl suitable to hold the given capture. FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture); /// Initialize the given capture with a suitable expression. ExprResult BuildCaptureInit(const sema::Capture &Capture, SourceLocation ImplicitCaptureLoc, bool IsOpenMPMapping = false); /// Complete a lambda-expression having processed and attached the /// lambda body. ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc, sema::LambdaScopeInfo *LSI); /// Get the return type to use for a lambda's conversion function(s) to /// function pointer type, given the type of the call operator. QualType getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType); /// Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); /// Check whether the given expression is a valid constraint expression. /// A diagnostic is emitted if it is not, false is returned, and /// PossibleNonPrimary will be set to true if the failure might be due to a /// non-primary expression being used as an atomic constraint. bool CheckConstraintExpression(const Expr *CE, Token NextToken = Token(), bool *PossibleNonPrimary = nullptr, bool IsTrailingRequiresClause = false); private: /// Caches pairs of template-like decls whose associated constraints were /// checked for subsumption and whether or not the first's constraints did in /// fact subsume the second's. llvm::DenseMap<std::pair<NamedDecl *, NamedDecl *>, bool> SubsumptionCache; /// Caches the normalized associated constraints of declarations (concepts or /// constrained declarations). If an error occurred while normalizing the /// associated constraints of the template or concept, nullptr will be cached /// here. llvm::DenseMap<NamedDecl *, NormalizedConstraint *> NormalizationCache; llvm::ContextualFoldingSet<ConstraintSatisfaction, const ASTContext &> SatisfactionCache; public: const NormalizedConstraint * getNormalizedAssociatedConstraints( NamedDecl *ConstrainedDecl, ArrayRef<const Expr *> AssociatedConstraints); /// \brief Check whether the given declaration's associated constraints are /// at least as constrained than another declaration's according to the /// partial ordering of constraints. /// /// \param Result If no error occurred, receives the result of true if D1 is /// at least constrained than D2, and false otherwise. /// /// \returns true if an error occurred, false otherwise. bool IsAtLeastAsConstrained(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2, bool &Result); /// If D1 was not at least as constrained as D2, but would've been if a pair /// of atomic constraints involved had been declared in a concept and not /// repeated in two separate places in code. /// \returns true if such a diagnostic was emitted, false otherwise. bool MaybeEmitAmbiguousAtomicConstraintsDiagnostic(NamedDecl *D1, ArrayRef<const Expr *> AC1, NamedDecl *D2, ArrayRef<const Expr *> AC2); /// \brief Check whether the given list of constraint expressions are /// satisfied (as if in a 'conjunction') given template arguments. /// \param Template the template-like entity that triggered the constraints /// check (either a concept or a constrained entity). /// \param ConstraintExprs a list of constraint expressions, treated as if /// they were 'AND'ed together. /// \param TemplateArgs the list of template arguments to substitute into the /// constraint expression. /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// \param Satisfaction if true is returned, will contain details of the /// satisfaction, with enough information to diagnose an unsatisfied /// expression. /// \returns true if an error occurred and satisfaction could not be checked, /// false otherwise. bool CheckConstraintSatisfaction( const NamedDecl *Template, ArrayRef<const Expr *> ConstraintExprs, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange, ConstraintSatisfaction &Satisfaction); /// \brief Check whether the given non-dependent constraint expression is /// satisfied. Returns false and updates Satisfaction with the satisfaction /// verdict if successful, emits a diagnostic and returns true if an error /// occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckConstraintSatisfaction(const Expr *ConstraintExpr, ConstraintSatisfaction &Satisfaction); /// Check whether the given function decl's trailing requires clause is /// satisfied, if any. Returns false and updates Satisfaction with the /// satisfaction verdict if successful, emits a diagnostic and returns true if /// an error occured and satisfaction could not be determined. /// /// \returns true if an error occurred, false otherwise. bool CheckFunctionConstraints(const FunctionDecl *FD, ConstraintSatisfaction &Satisfaction, SourceLocation UsageLoc = SourceLocation()); /// \brief Ensure that the given template arguments satisfy the constraints /// associated with the given template, emitting a diagnostic if they do not. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateArgs The converted, canonicalized template arguments. /// /// \param TemplateIDRange The source range of the template id that /// caused the constraints check. /// /// \returns true if the constrains are not satisfied or could not be checked /// for satisfaction, false if the constraints are satisfied. bool EnsureTemplateArgumentListConstraints(TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange TemplateIDRange); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. /// \param First whether this is the first time an unsatisfied constraint is /// diagnosed for this error. void DiagnoseUnsatisfiedConstraint(const ConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied. void DiagnoseUnsatisfiedConstraint(const ASTConstraintSatisfaction &Satisfaction, bool First = true); /// \brief Emit diagnostics explaining why a constraint expression was deemed /// unsatisfied because it was ill-formed. void DiagnoseUnsatisfiedIllFormedConstraint(SourceLocation DiagnosticLocation, StringRef Diagnostic); void DiagnoseRedeclarationConstraintMismatch(SourceLocation Old, SourceLocation New); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, ArrayRef<Expr *> Strings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type /// of ValueType, which is allowed to be a built-in numeric type, "char *", /// "const char *" or C structure with attribute 'objc_boxable'. ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, MutableArrayRef<ObjCDictionaryElement> Elements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS); bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, const ParsedAttributesView &Attrs); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// Mark destructors of virtual bases of this class referenced. In the Itanium /// C++ ABI, this is done when emitting a destructor for any non-abstract /// class. In the Microsoft C++ ABI, this is done any time a class's /// destructor is referenced. void MarkVirtualBaseDestructorsReferenced( SourceLocation Location, CXXRecordDecl *ClassDecl, llvm::SmallPtrSetImpl<const RecordType *> *DirectVirtualBases = nullptr); /// Do semantic checks to allow the complete destructor variant to be emitted /// when the destructor is defined in another translation unit. In the Itanium /// C++ ABI, destructor variants are emitted together. In the MS C++ ABI, they /// can be emitted in separate TUs. To emit the complete variant, run a subset /// of the checks performed when emitting a regular destructor. void CheckCompleteDestructorVariant(SourceLocation CurrentLocation, CXXDestructorDecl *Dtor); /// The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// Load any externally-stored vtable uses. void LoadExternalVTableUses(); /// Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD, bool ConstexprOnly = false); /// Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); /// Check class-level dllimport/dllexport attribute. The caller must /// ensure that referenceDLLExportedClassMethods is called some point later /// when all outer classes of Class are complete. void checkClassLevelDLLAttribute(CXXRecordDecl *Class); void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class); void referenceDLLExportedClassMethods(); void propagateDLLAttrToBaseClassTemplate( CXXRecordDecl *Class, Attr *ClassAttr, ClassTemplateSpecializationDecl *BaseTemplateSpec, SourceLocation BaseLoc); /// Add gsl::Pointer attribute to std::container::iterator /// \param ND The declaration that introduces the name /// std::container::iterator. \param UnderlyingRecord The record named by ND. void inferGslPointerAttribute(NamedDecl *ND, CXXRecordDecl *UnderlyingRecord); /// Add [[gsl::Owner]] and [[gsl::Pointer]] attributes for std:: types. void inferGslOwnerPointerAttribute(CXXRecordDecl *Record); /// Add [[gsl::Pointer]] attributes for std:: types. void inferGslPointerAttribute(TypedefNameDecl *TD); void CheckCompletedCXXClass(Scope *S, CXXRecordDecl *Record); /// Check that the C++ class annoated with "trivial_abi" satisfies all the /// conditions that are needed for the attribute to have an effect. void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD); void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, const ParsedAttributesView &AttrList); void ActOnFinishCXXMemberDecls(); void ActOnFinishCXXNonNestedClass(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Decl *Template, llvm::function_ref<Scope *()> EnterScope); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckDeductionGuideDeclarator(Declarator &D, QualType &R, StorageClass &SC); void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD); void CheckExplicitlyDefaultedFunction(Scope *S, FunctionDecl *MD); bool CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM); void CheckDelayedMemberExceptionSpecs(); bool CheckExplicitlyDefaultedComparison(Scope *S, FunctionDecl *MD, DefaultedComparisonKind DCK); void DeclareImplicitEqualityComparison(CXXRecordDecl *RD, FunctionDecl *Spaceship); void DefineDefaultedComparison(SourceLocation Loc, FunctionDecl *FD, DefaultedComparisonKind DCK); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, MutableArrayRef<CXXBaseSpecifier *> Bases); void ActOnBaseSpecifiers(Decl *ClassDecl, MutableArrayRef<CXXBaseSpecifier *> Bases); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base); bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbiguousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath, bool IgnoreAccess = false); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D, bool Inconsistent); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, DeclAccessPair FoundDecl, const InitializedEntity &Entity, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckStructuredBindingMemberAccess(SourceLocation UseLoc, CXXRecordDecl *DecomposedClass, DeclAccessPair Field); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass, QualType BaseType); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType, SourceLocation Loc, const PartialDiagnostic &Diag); bool isMemberAccessibleForDeletion(CXXRecordDecl *NamingClass, DeclAccessPair Found, QualType ObjectType) { return isMemberAccessibleForDeletion(NamingClass, Found, ObjectType, SourceLocation(), PDiag()); } void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool isAbstractType(SourceLocation Loc, QualType T); bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template <typename... Ts> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const Ts &...Args) { BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true, bool AllowDependent = true, bool AllowNonTemplateFunctions = false); /// Try to interpret the lookup result D as a template-name. /// /// \param D A declaration found by name lookup. /// \param AllowFunctionTemplates Whether function templates should be /// considered valid results. /// \param AllowDependent Whether unresolved using declarations (that might /// name templates) should be considered valid results. NamedDecl *getAsTemplateNameDecl(NamedDecl *D, bool AllowFunctionTemplates = true, bool AllowDependent = true); enum TemplateNameIsRequiredTag { TemplateNameIsRequired }; /// Whether and why a template name is required in this lookup. class RequiredTemplateKind { public: /// Template name is required if TemplateKWLoc is valid. RequiredTemplateKind(SourceLocation TemplateKWLoc = SourceLocation()) : TemplateKW(TemplateKWLoc) {} /// Template name is unconditionally required. RequiredTemplateKind(TemplateNameIsRequiredTag) : TemplateKW() {} SourceLocation getTemplateKeywordLoc() const { return TemplateKW.getValueOr(SourceLocation()); } bool hasTemplateKeyword() const { return getTemplateKeywordLoc().isValid(); } bool isRequired() const { return TemplateKW != SourceLocation(); } explicit operator bool() const { return isRequired(); } private: llvm::Optional<SourceLocation> TemplateKW; }; enum class AssumedTemplateKind { /// This is not assumed to be a template name. None, /// This is assumed to be a template name because lookup found nothing. FoundNothing, /// This is assumed to be a template name because lookup found one or more /// functions (but no function templates). FoundFunctions, }; bool LookupTemplateName( LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization, RequiredTemplateKind RequiredTemplate = SourceLocation(), AssumedTemplateKind *ATK = nullptr, bool AllowTypoCorrection = true); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization, bool Disambiguation = false); /// Try to resolve an undeclared template name as a type template. /// /// Sets II to the identifier corresponding to the template name, and updates /// Name to a corresponding (typo-corrected) type template name and TNK to /// the corresponding kind, if possible. void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name, TemplateNameKind &TNK, SourceLocation NameLoc, IdentifierInfo *&II); bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name, SourceLocation NameLoc, bool Diagnose = true); /// Determine whether a particular identifier might be the name in a C++1z /// deduction-guide declaration. bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name, SourceLocation NameLoc, ParsedTemplateTy *Template = nullptr); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation, NamedDecl *Instantiation, bool InstantiatedFromMember, const NamedDecl *Pattern, const NamedDecl *PatternDef, TemplateSpecializationKind TSK, bool Complain = true); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); NamedDecl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg, bool HasTypeConstraint); bool ActOnTypeConstraint(const CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(NestedNameSpecifierLoc NS, DeclarationNameInfo NameInfo, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs, TemplateTypeParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); bool AttachTypeConstraint(AutoTypeLoc TL, NonTypeTemplateParmDecl *ConstrainedParameter, SourceLocation EllipsisLoc); QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI, SourceLocation Loc); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); NamedDecl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ArrayRef<NamedDecl *> Params, SourceLocation RAngleLoc, Expr *RequiresClause); /// The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC, SkipBodyInfo *SkipBody = nullptr); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsMemberSpecialization, bool &Invalid, bool SuppressDiagnostic = false); DeclResult CheckClassTemplate( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists, SkipBodyInfo *SkipBody = nullptr); TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg, QualType NTTPType, SourceLocation Loc); /// Get a template argument mapping the given template parameter to itself, /// e.g. for X in \c template<int X>, this would return an expression template /// argument referencing X. TemplateArgumentLoc getIdentityTemplateArgumentLoc(NamedDecl *Param, SourceLocation Location); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false, bool IsClassName = false); /// Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); /// Get the specialization of the given variable template corresponding to /// the specified argument list, or a null-but-valid result if the arguments /// are dependent. DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); /// Form a reference to the specialization of the given variable template /// corresponding to the specified argument list, or a null-but-valid result /// if the arguments are dependent. ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult CheckConceptTemplateId(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &ConceptNameInfo, NamedDecl *FoundDecl, ConceptDecl *NamedConcept, const TemplateArgumentListInfo *TemplateArgs); void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnTemplateName( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool AllowInjectedClassName = false); DeclResult ActOnClassTemplateSpecialization( Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, CXXScopeSpec &SS, TemplateIdAnnotation &TemplateId, const ParsedAttributesView &Attr, MultiTemplateParamsArg TemplateParameterLists, SkipBodyInfo *SkipBody = nullptr); bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc, TemplateDecl *PrimaryTemplate, unsigned NumExplicitArgs, ArrayRef<TemplateArgument> Args); void CheckTemplatePartialSpecialization( ClassTemplatePartialSpecializationDecl *Partial); void CheckTemplatePartialSpecialization( VarTemplatePartialSpecializationDecl *Partial); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization( FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous, bool QualifiedFriend = false); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation( Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, const ParsedAttributesView &Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to /// contain the converted forms of the template arguments as written. /// Otherwise, \p TemplateArgs will not be modified. /// /// \param ConstraintsNotSatisfied If provided, and an error occured, will /// receive true if the cause for the error is the associated constraints of /// the template not being satisfied by the template arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted, bool UpdateArgsWithConversions = true, bool *ConstraintsNotSatisfied = nullptr); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateTemplateArgument(TemplateTemplateParmDecl *Param, TemplateParameterList *Params, TemplateArgumentLoc &Arg); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateII The identifier used to name the template. /// \param TemplateIILoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, IdentifierInfo *TemplateII, SourceLocation TemplateIILoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, TypeSourceInfo **TSI, bool DeducedTSTContext); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc, bool DeducedTSTContext = true); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Concepts //===--------------------------------------------------------------------===// Decl *ActOnConceptDefinition( Scope *S, MultiTemplateParamsArg TemplateParameterLists, IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr); RequiresExprBodyDecl * ActOnStartRequiresExpr(SourceLocation RequiresKWLoc, ArrayRef<ParmVarDecl *> LocalParameters, Scope *BodyScope); void ActOnFinishRequiresExpr(); concepts::Requirement *ActOnSimpleRequirement(Expr *E); concepts::Requirement *ActOnTypeRequirement( SourceLocation TypenameKWLoc, CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo *TypeName, TemplateIdAnnotation *TemplateId); concepts::Requirement *ActOnCompoundRequirement(Expr *E, SourceLocation NoexceptLoc); concepts::Requirement * ActOnCompoundRequirement( Expr *E, SourceLocation NoexceptLoc, CXXScopeSpec &SS, TemplateIdAnnotation *TypeConstraint, unsigned Depth); concepts::Requirement *ActOnNestedRequirement(Expr *Constraint); concepts::ExprRequirement * BuildExprRequirement( Expr *E, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::ExprRequirement * BuildExprRequirement( concepts::Requirement::SubstitutionDiagnostic *ExprSubstDiag, bool IsSatisfied, SourceLocation NoexceptLoc, concepts::ExprRequirement::ReturnTypeRequirement ReturnTypeRequirement); concepts::TypeRequirement *BuildTypeRequirement(TypeSourceInfo *Type); concepts::TypeRequirement * BuildTypeRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); concepts::NestedRequirement *BuildNestedRequirement(Expr *E); concepts::NestedRequirement * BuildNestedRequirement( concepts::Requirement::SubstitutionDiagnostic *SubstDiag); ExprResult ActOnRequiresExpr(SourceLocation RequiresKWLoc, RequiresExprBodyDecl *Body, ArrayRef<ParmVarDecl *> LocalParameters, ArrayRef<concepts::Requirement *> Requirements, SourceLocation ClosingBraceLoc); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// An arbitrary expression. UPPC_Expression = 0, /// The base type of a class type. UPPC_BaseType, /// The type of an arbitrary declaration. UPPC_DeclarationType, /// The type of a data member. UPPC_DataMemberType, /// The size of a bit-field. UPPC_BitFieldWidth, /// The expression in a static assertion. UPPC_StaticAssertExpression, /// The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// The enumerator value. UPPC_EnumeratorValue, /// A using declaration. UPPC_UsingDeclaration, /// A friend declaration. UPPC_FriendDeclaration, /// A declaration qualifier. UPPC_DeclarationQualifier, /// An initializer. UPPC_Initializer, /// A default argument. UPPC_DefaultArgument, /// The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// The type of an exception. UPPC_ExceptionType, /// Partial specialization. UPPC_PartialSpecialization, /// Microsoft __if_exists. UPPC_IfExists, /// Microsoft __if_not_exists. UPPC_IfNotExists, /// Lambda expression. UPPC_Lambda, /// Block expression. UPPC_Block, /// A type constraint. UPPC_TypeConstraint, // A requirement in a requires-expression. UPPC_Requirement, }; /// Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// If the given requirees-expression contains an unexpanded reference to one /// of its own parameter packs, diagnose the error. /// /// \param RE The requiress-expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPackInRequiresExpr(RequiresExpr *RE); /// If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param NNS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; /// Given a template argument that contains an unexpanded parameter pack, but /// which has already been substituted, attempt to determine the number of /// elements that will be produced once this argument is fully-expanded. /// /// This is intended for use when transforming 'sizeof...(Arg)' in order to /// avoid actually expanding the pack where possible. Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg); //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// /// Adjust the type \p ArgFunctionType to match the calling convention, /// noreturn, and optionally the exception specification of \p FunctionType. /// Deduction often wants to ignore these properties when matching function /// types. QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType, bool AdjustExceptionSpec = false); /// Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// Template argument deduction was successful. TDK_Success = 0, /// The declaration was invalid; do nothing. TDK_Invalid, /// Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// Template argument deduction did not deduce a value for every /// expansion of an expanded template parameter pack. TDK_IncompletePack, /// Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// After substituting deduced template arguments, a dependent /// parameter type did not match the corresponding argument. TDK_DeducedMismatch, /// After substituting deduced template arguments, an element of /// a dependent parameter type did not match the corresponding element /// of the corresponding argument (when deducing from an initializer list). TDK_DeducedMismatchNested, /// A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// Checking non-dependent argument conversions failed. TDK_NonDependentConversionFailure, /// The deduced arguments did not satisfy the constraints associated /// with the template. TDK_ConstraintsNotSatisfied, /// Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure, /// CUDA Target attributes do not match. TDK_CUDATargetMismatch }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, bool DecomposedParam, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), DecomposedParam(DecomposedParam), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) {} QualType OriginalParamType; bool DecomposedParam; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction( FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr, bool PartialOverloading = false, llvm::function_ref<bool()> CheckNonDependent = []{ return false; }); TemplateDeductionResult DeduceTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool PartialOverloading, llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool IsAddressOfFunction = false); /// Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Completely replace the \c auto in \p TypeWithAuto by /// \p Replacement. This does not retain any \c auto type sugar. QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement); TypeSourceInfo *ReplaceAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result, Optional<unsigned> DependentDeductionDepth = None, bool IgnoreConstraints = false); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); /// Declare implicit deduction guides for a class template if we've /// not already done so. void DeclareImplicitDeductionGuides(TemplateDecl *Template, SourceLocation Loc); QualType DeduceTemplateSpecializationFromInitializer( TypeSourceInfo *TInfo, const InitializedEntity &Entity, const InitializationKind &Kind, MultiExprArg Init); QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name, QualType Type, TypeSourceInfo *TSI, SourceRange Range, bool DirectInit, Expr *Init); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate( FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2, bool Reversed = false); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T, sema::TemplateDeductionInfo &Info); bool isTemplateTemplateParameterAtLeastAsSpecializedAs( TemplateParameterList *PParam, TemplateDecl *AArg, SourceLocation Loc); void MarkUsedTemplateParameters(const Expr *E, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// A context in which code is being synthesized (where a source location /// alone is not sufficient to identify the context). This covers template /// instantiation and various forms of implicitly-generated functions. struct CodeSynthesisContext { /// The kind of template instantiation we are performing enum SynthesisKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template parameter whose argument is /// being instantiated, the Template is the template, and the /// TemplateArgs/NumTemplateArguments provide the template arguments as /// specified. DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or /// a TemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are computing the exception specification for a defaulted special /// member function. ExceptionSpecEvaluation, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation, /// We are instantiating a requirement of a requires expression. RequirementInstantiation, /// We are checking the satisfaction of a nested requirement of a requires /// expression. NestedRequirementConstraintsCheck, /// We are declaring an implicit special member function. DeclaringSpecialMember, /// We are declaring an implicit 'operator==' for a defaulted /// 'operator<=>'. DeclaringImplicitEqualityComparison, /// We are defining a synthesized function (such as a defaulted special /// member). DefiningSynthesizedFunction, // We are checking the constraints associated with a constrained entity or // the constraint expression of a concept. This includes the checks that // atomic constraints have the type 'bool' and that they can be constant // evaluated. ConstraintsCheck, // We are substituting template arguments into a constraint expression. ConstraintSubstitution, // We are normalizing a constraint expression. ConstraintNormalization, // We are substituting into the parameter mapping of an atomic constraint // during normalization. ParameterMappingSubstitution, /// We are rewriting a comparison operator in terms of an operator<=>. RewritingOperatorAsSpaceship, /// We are initializing a structured binding. InitializingStructuredBinding, /// We are marking a class as __dllexport. MarkingClassDllexported, /// Added for Template instantiation observation. /// Memoization means we are _not_ instantiating a template because /// it is already instantiated (but we entered a context where we /// would have had to if it was not already instantiated). Memoization } Kind; /// Was the enclosing context a non-instantiation SFINAE context? bool SavedInNonInstantiationSFINAEContext; /// The point of instantiation or synthesis within the source code. SourceLocation PointOfInstantiation; /// The entity that is being synthesized. Decl *Entity; /// The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; // FIXME: Wrap this union around more members, or perhaps store the // kind-specific members in the RAII object owning the context. union { /// The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// The special member being declared or defined. CXXSpecialMember SpecialMember; }; ArrayRef<TemplateArgument> template_arguments() const { assert(Kind != DeclaringSpecialMember); return {TemplateArgs, NumTemplateArgs}; } /// The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; CodeSynthesisContext() : Kind(TemplateInstantiation), SavedInNonInstantiationSFINAEContext(false), Entity(nullptr), Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; }; /// List of active code synthesis contexts. /// /// This vector is treated as a stack. As synthesis of one entity requires /// synthesis of another, additional contexts are pushed onto the stack. SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts; /// Specializations whose definitions are currently being instantiated. llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations; /// Non-dependent types used in templates that have already been instantiated /// by some template instantiation. llvm::DenseSet<QualType> InstantiatedNonDependentTypes; /// Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> CodeSynthesisContextLookupModules; /// Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// Map from the most recent declaration of a namespace to the most /// recent visible declaration of that namespace. llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache; /// Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// The number of \p CodeSynthesisContexts that are not template /// instantiations and, therefore, should not be counted as part of the /// instantiation depth. /// /// When the instantiation depth reaches the user-configurable limit /// \p LangOptions::InstantiationDepth we will abort instantiation. // FIXME: Should we have a similar limit for other forms of synthesis? unsigned NonInstantiationEntries; /// The depth of the context stack at the point when the most recent /// error or warning was produced. /// /// This value is used to suppress printing of redundant context stacks /// when there are multiple errors or warnings in the same instantiation. // FIXME: Does this belong in Sema? It's tough to implement it anywhere else. unsigned LastEmittedCodeSynthesisContextDepth = 0; /// The template instantiation callbacks to trace or track /// instantiations (objects can be chained). /// /// This callbacks is used to print, trace or track template /// instantiations as they are being constructed. std::vector<std::unique_ptr<TemplateInstantiationCallback>> TemplateInstCallbacks; /// The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// Note that we are instantiating a class template, /// function template, variable template, alias template, /// or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateParameter Param, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting either explicitly-specified or /// deduced template arguments during function template argument deduction. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, CodeSynthesisContext::SynthesisKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template declaration. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// Note that we are instantiating a default argument for a function /// parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintsCheck {}; /// \brief Note that we are checking the constraints associated with some /// constrained entity (a concept declaration or a template with associated /// constraints). InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintsCheck, NamedDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); struct ConstraintSubstitution {}; /// \brief Note that we are checking a constraint expression associated /// with a template declaration or as part of the satisfaction check of a /// concept. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintSubstitution, NamedDecl *Template, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange); struct ConstraintNormalization {}; /// \brief Note that we are normalizing a constraint expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ConstraintNormalization, NamedDecl *Template, SourceRange InstantiationRange); struct ParameterMappingSubstitution {}; /// \brief Note that we are subtituting into the parameter mapping of an /// atomic constraint during constraint normalization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParameterMappingSubstitution, NamedDecl *Template, SourceRange InstantiationRange); /// \brief Note that we are substituting template arguments into a part of /// a requirement of a requires expression. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::Requirement *Req, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are checking the satisfaction of the constraint /// expression inside of a nested requirement. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, concepts::NestedRequirement *Req, ConstraintsCheck, SourceRange InstantiationRange = SourceRange()); /// Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } /// Determine whether we are already instantiating this /// specialization in some surrounding active instantiation. bool isAlreadyInstantiating() const { return AlreadyInstantiating; } private: Sema &SemaRef; bool Invalid; bool AlreadyInstantiating; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); InstantiatingTemplate( Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = None, sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) = delete; InstantiatingTemplate& operator=(const InstantiatingTemplate&) = delete; }; void pushCodeSynthesisContext(CodeSynthesisContext Ctx); void popCodeSynthesisContext(); /// Determine whether we are currently performing template instantiation. bool inTemplateInstantiation() const { return CodeSynthesisContexts.size() > NonInstantiationEntries; } void PrintContextStack() { if (!CodeSynthesisContexts.empty() && CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) { PrintInstantiationStack(); LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size(); } if (PragmaAttributeCurrentTargetDecl) PrintPragmaAttributeInstantiationPoint(); } void PrintInstantiationStack(); void PrintPragmaAttributeInstantiationPoint(); /// Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; bool PrevLastDiagnosticIgnored; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE), PrevLastDiagnosticIgnored( SemaRef.getDiagnostics().isLastDiagnosticIgnored()) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; SemaRef.getDiagnostics().setLastDiagnosticIgnored( PrevLastDiagnosticIgnored); } /// Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; threadSafety::BeforeSet *ThreadSafetyDeclCache; /// An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; /// Queue of implicit template instantiations that cannot be performed /// eagerly. SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations; class GlobalEagerInstantiationScope { public: GlobalEagerInstantiationScope(Sema &S, bool Enabled) : S(S), Enabled(Enabled) { if (!Enabled) return; SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } void perform() { if (Enabled) { S.DefineUsedVTables(); S.PerformPendingInstantiations(); } } ~GlobalEagerInstantiationScope() { if (!Enabled) return; // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. if (S.TUKind != TU_Prefix || !S.LangOpts.PCHInstantiateTemplates) { assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } else { // Template instantiations in the PCH may be delayed until the TU. S.PendingInstantiations.swap(SavedPendingInstantiations); S.PendingInstantiations.insert(S.PendingInstantiations.end(), SavedPendingInstantiations.begin(), SavedPendingInstantiations.end()); } } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; bool Enabled; }; /// The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class LocalEagerInstantiationScope { public: LocalEagerInstantiationScope(Sema &S) : S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); } ~LocalEagerInstantiationScope() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; /// A helper class for building up ExtParameterInfos. class ExtParameterInfoBuilder { SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos; bool HasInteresting = false; public: /// Set the ExtParameterInfo for the parameter at the given index, /// void set(unsigned index, FunctionProtoType::ExtParameterInfo info) { assert(Infos.size() <= index); Infos.resize(index); Infos.push_back(info); if (!HasInteresting) HasInteresting = (info != FunctionProtoType::ExtParameterInfo()); } /// Return a pointer (suitable for setting in an ExtProtoInfo) to the /// ExtParameterInfo array we've built up. const FunctionProtoType::ExtParameterInfo * getPointerOrNull(unsigned numParams) { if (!HasInteresting) return nullptr; Infos.resize(numParams); return Infos.data(); } }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, bool AllowDeducedTST = false); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, Qualifiers ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); bool SubstExceptionSpec(SourceLocation Loc, FunctionProtoType::ExceptionSpecInfo &ESI, SmallVectorImpl<QualType> &ExceptionStorage, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params, const FunctionProtoType::ExtParameterInfo *ExtParamInfos, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams, ExtParameterInfoBuilder &ParamInfos); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateParameterList * SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); bool SubstTemplateArguments(ArrayRef<TemplateArgumentLoc> Args, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateArgumentListInfo &Outputs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); /// Substitute the name and return type of a defaulted 'operator<=>' to form /// an implicit 'operator=='. FunctionDecl *SubstSpaceshipAsEqualEqual(CXXRecordDecl *RD, FunctionDecl *Spaceship); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); void InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool usesPartialOrExplicitSpecialization( SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateDefaultArgument(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); bool CheckInstantiatedFunctionTemplateConstraints( SourceLocation PointOfInstantiation, FunctionDecl *Decl, ArrayRef<TemplateArgument> TemplateArgs, ConstraintSatisfaction &Satisfaction); FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD, const TemplateArgumentList *Args, SourceLocation Loc); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false, VarTemplateSpecializationDecl *PrevVTSD = nullptr); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false, bool AtEndOfTU = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, bool FindingInstantiatedContext = false); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; DeclResult actOnObjCTypeParam(Scope *S, ObjCTypeParamVariance variance, SourceLocation varianceLoc, unsigned index, IdentifierInfo *paramName, SourceLocation paramLoc, SourceLocation colonLoc, ParsedType typeBound); ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc, ArrayRef<Decl *> typeParams, SourceLocation rAngleLoc); void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList); Decl *ActOnStartClassInterface( Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); void ActOnSuperClassOfClassInterface(Scope *S, SourceLocation AtInterfaceLoc, ObjCInterfaceDecl *IDecl, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, SmallVectorImpl<SourceLocation> &ProtocolLocs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl *const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryInterface( SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, ObjCTypeParamList *typeParamList, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl *const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc, const ParsedAttributesView &AttrList); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc, const ParsedAttributesView &AttrList); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, ArrayRef<ObjCTypeParamList *> TypeParamLists, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, ArrayRef<IdentifierLocPair> IdentList, const ParsedAttributesView &attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer, ArrayRef<IdentifierLocPair> ProtocolId, SmallVectorImpl<Decl *> &Protocols); void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId, SourceLocation ProtocolLoc, IdentifierInfo *TypeArgId, SourceLocation TypeArgLoc, bool SelectProtocolFirst = false); /// Given a list of identifiers (and their locations), resolve the /// names to either Objective-C protocol qualifiers or type /// arguments, as appropriate. void actOnObjCTypeArgsOrProtocolQualifiers( Scope *S, ParsedType baseType, SourceLocation lAngleLoc, ArrayRef<IdentifierInfo *> identifiers, ArrayRef<SourceLocation> identifierLocs, SourceLocation rAngleLoc, SourceLocation &typeArgsLAngleLoc, SmallVectorImpl<ParsedType> &typeArgs, SourceLocation &typeArgsRAngleLoc, SourceLocation &protocolLAngleLoc, SmallVectorImpl<Decl *> &protocols, SourceLocation &protocolRAngleLoc, bool warnOnIncompleteProtocols); /// Build a an Objective-C protocol-qualified 'id' type where no /// base type was specified. TypeResult actOnObjCProtocolQualifierType( SourceLocation lAngleLoc, ArrayRef<Decl *> protocols, ArrayRef<SourceLocation> protocolLocs, SourceLocation rAngleLoc); /// Build a specialized and/or protocol-qualified Objective-C type. TypeResult actOnObjCTypeArgsAndProtocolQualifiers( Scope *S, SourceLocation Loc, ParsedType BaseType, SourceLocation TypeArgsLAngleLoc, ArrayRef<ParsedType> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<Decl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc); /// Build an Objective-C type parameter type. QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Build an Objective-C object pointer type. QualType BuildObjCObjectType(QualType BaseType, SourceLocation Loc, SourceLocation TypeArgsLAngleLoc, ArrayRef<TypeSourceInfo *> TypeArgs, SourceLocation TypeArgsRAngleLoc, SourceLocation ProtocolLAngleLoc, ArrayRef<ObjCProtocolDecl *> Protocols, ArrayRef<SourceLocation> ProtocolLocs, SourceLocation ProtocolRAngleLoc, bool FailOnError = false); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed void ProcessPropertyDecl(ObjCPropertyDecl *property); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc, ObjCPropertyQueryKind QueryKind); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. ParsedAttributesView ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); void deduceOpenCLAddressSpace(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// The message is sent to 'super'. ObjCSuperMessage, /// The message is an instance message. ObjCInstanceMessage, /// The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs, bool Diagnose = true); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr, bool Diagnose = true); bool CheckConversionToObjCLiteral(QualType DstType, Expr *&SrcExpr, bool Diagnose = true); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; /// Check whether the declared result type of the given Objective-C /// method declaration is compatible with the method's class. ResultTypeCompatibilityKind checkRelatedResultTypeCompatibility(const ObjCMethodDecl *Method, const ObjCInterfaceDecl *CurrentClass); void CheckObjCMethodDirectOverrides(ObjCMethodDecl *method, ObjCMethodDecl *overridden); void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaClangSection - Called on well formed \#pragma clang section void ActOnPragmaClangSection(SourceLocation PragmaLoc, PragmaClangSectionAction Action, PragmaClangSectionKind SecKind, StringRef SecName); /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action, StringRef SlotLabel, Expr *Alignment); enum class PragmaPackDiagnoseKind { NonDefaultStateAtInclude, ChangedStateAtExit }; void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind, SourceLocation IncludeLoc); void DiagnoseUnterminatedPragmaPack(); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action, SourceLocation PragmaLoc, MSVtorDispMode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// Called on #pragma clang __debug dump II void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name, StringRef Value); /// Are precise floating point semantics currently enabled? bool isPreciseFPEnabled() { return !CurFPFeatures.getAllowFPReassociate() && !CurFPFeatures.getNoSignedZero() && !CurFPFeatures.getAllowReciprocal() && !CurFPFeatures.getAllowApproxFunc(); } /// ActOnPragmaFloatControl - Call on well-formed \#pragma float_control void ActOnPragmaFloatControl(SourceLocation Loc, PragmaMsStackAction Action, PragmaFloatControlKind Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT and /// \#pragma clang fp contract void ActOnPragmaFPContract(SourceLocation Loc, LangOptions::FPModeKind FPC); /// Called on well formed /// \#pragma clang fp reassociate void ActOnPragmaFPReassociate(SourceLocation Loc, bool IsEnabled); /// ActOnPragmaFenvAccess - Called on well formed /// \#pragma STDC FENV_ACCESS void ActOnPragmaFEnvAccess(SourceLocation Loc, bool IsEnabled); /// Called to set rounding mode for floating point operations. void setRoundingMode(SourceLocation Loc, llvm::RoundingMode); /// Called to set exception behavior for floating point operations. void setExceptionMode(SourceLocation Loc, LangOptions::FPExceptionModeKind); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute, SourceLocation PragmaLoc, attr::ParsedSubjectMatchRuleSet Rules); void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Called on well-formed '\#pragma clang attribute pop'. void ActOnPragmaAttributePop(SourceLocation PragmaLoc, const IdentifierInfo *Namespace); /// Adds the attributes that have been specified using the /// '\#pragma clang attribute push' directives to the given declaration. void AddPragmaAttributes(Scope *S, Decl *D); void DiagnoseUnterminatedPragmaAttribute(); /// Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, bool IsPackExpansion); void AddAlignedAttr(Decl *D, const AttributeCommonInfo &CI, TypeSourceInfo *T, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E, Expr *OE); /// AddAllocAlignAttr - Adds an alloc_align attribute to a particular /// declaration. void AddAllocAlignAttr(Decl *D, const AttributeCommonInfo &CI, Expr *ParamExpr); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(Decl *D, const AttributeCommonInfo &CI, Expr *E); /// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular /// declaration. void AddLaunchBoundsAttr(Decl *D, const AttributeCommonInfo &CI, Expr *MaxThreads, Expr *MinBlocks); /// AddModeAttr - Adds a mode attribute to a particular declaration. void AddModeAttr(Decl *D, const AttributeCommonInfo &CI, IdentifierInfo *Name, bool InInstantiation = false); void AddParameterABIAttr(Decl *D, const AttributeCommonInfo &CI, ParameterABI ABI); enum class RetainOwnershipKind {NS, CF, OS}; void AddXConsumedAttr(Decl *D, const AttributeCommonInfo &CI, RetainOwnershipKind K, bool IsTemplateInstantiation); /// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size /// attribute to a particular declaration. void addAMDGPUFlatWorkGroupSizeAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); /// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a /// particular declaration. void addAMDGPUWavesPerEUAttr(Decl *D, const AttributeCommonInfo &CI, Expr *Min, Expr *Max); bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type); //===--------------------------------------------------------------------===// // C++ Coroutines TS // bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc, StringRef Keyword); ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E); StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E); ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E, UnresolvedLookupExpr* Lookup); ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E); StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E, bool IsImplicit = false); StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs); bool buildCoroutineParameterMoves(SourceLocation Loc); VarDecl *buildCoroutinePromise(SourceLocation Loc); void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body); ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc, SourceLocation FuncLoc); /// Check that the expression co_await promise.final_suspend() shall not be /// potentially-throwing. bool checkFinalSuspendNoThrow(const Stmt *FinalSuspend); //===--------------------------------------------------------------------===// // OpenCL extensions. // private: std::string CurrOpenCLExtension; /// Extensions required by an OpenCL type. llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap; /// Extensions required by an OpenCL declaration. llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap; public: llvm::StringRef getCurrentOpenCLExtension() const { return CurrOpenCLExtension; } /// Check if a function declaration \p FD associates with any /// extensions present in OpenCLDeclExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD); /// Check if a function type \p FT associates with any /// extensions present in OpenCLTypeExtMap and if so return the /// extension(s) name(s). std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT); /// Find an extension in an appropriate extension map and return its name template<typename T, typename MapT> std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map); void setCurrentOpenCLExtension(llvm::StringRef Ext) { CurrOpenCLExtension = std::string(Ext); } /// Set OpenCL extensions for a type which can only be used when these /// OpenCL extensions are enabled. If \p Exts is empty, do nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts); /// Set OpenCL extensions for a declaration which can only be /// used when these OpenCL extensions are enabled. If \p Exts is empty, do /// nothing. /// \param Exts A space separated list of OpenCL extensions. void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts); /// Set current OpenCL extensions for a type which can only be used /// when these OpenCL extensions are enabled. If current OpenCL extension is /// empty, do nothing. void setCurrentOpenCLExtensionForType(QualType T); /// Set current OpenCL extensions for a declaration which /// can only be used when these OpenCL extensions are enabled. If current /// OpenCL extension is empty, do nothing. void setCurrentOpenCLExtensionForDecl(Decl *FD); bool isOpenCLDisabledDecl(Decl *FD); /// Check if type \p T corresponding to declaration specifier \p DS /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T); /// Check if declaration \p D used by expression \p E /// is disabled due to required OpenCL extensions being disabled. If so, /// emit diagnostics. /// \return true if type is disabled. bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E); //===--------------------------------------------------------------------===// // OpenMP directives and clauses. // private: void *VarDataSharingAttributesStack; /// Number of nested '#pragma omp declare target' directives. SmallVector<SourceLocation, 4> DeclareTargetNesting; /// Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind, bool StrictlyPositive = true); /// Returns OpenMP nesting level for current directive. unsigned getOpenMPNestingLevel() const; /// Adjusts the function scopes index for the target-based regions. void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex, unsigned Level) const; /// Returns the number of scopes associated with the construct on the given /// OpenMP level. int getNumberOfConstructScopes(unsigned Level) const; /// Push new OpenMP function region for non-capturing function. void pushOpenMPFunctionRegion(); /// Pop OpenMP function region for non-capturing function. void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI); /// Checks if a type or a declaration is disabled due to the owning extension /// being disabled, and emits diagnostic messages if it is disabled. /// \param D type or declaration to be checked. /// \param DiagLoc source location for the diagnostic message. /// \param DiagInfo information to be emitted for the diagnostic message. /// \param SrcRange source range of the declaration. /// \param Map maps type or declaration to the extensions. /// \param Selector selects diagnostic message: 0 for type and 1 for /// declaration. /// \return true if the type or declaration is disabled. template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT> bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo, MapT &Map, unsigned Selector = 0, SourceRange SrcRange = SourceRange()); /// Helper to keep information about the current `omp begin/end declare /// variant` nesting. struct OMPDeclareVariantScope { /// The associated OpenMP context selector. OMPTraitInfo *TI; /// The associated OpenMP context selector mangling. std::string NameSuffix; OMPDeclareVariantScope(OMPTraitInfo &TI); }; /// The current `omp begin/end declare variant` scopes. SmallVector<OMPDeclareVariantScope, 4> OMPDeclareVariantScopes; /// The declarator \p D defines a function in the scope \p S which is nested /// in an `omp begin/end declare variant` scope. In this method we create a /// declaration for \p D and rename \p D according to the OpenMP context /// selector of the surrounding scope. FunctionDecl * ActOnStartOfFunctionDefinitionInOpenMPDeclareVariantScope(Scope *S, Declarator &D); /// Register \p FD as specialization of \p BaseFD in the current `omp /// begin/end declare variant` scope. void ActOnFinishedFunctionDefinitionInOpenMPDeclareVariantScope( FunctionDecl *FD, FunctionDecl *BaseFD); public: /// Can we exit a scope at the moment. bool isInOpenMPDeclareVariantScope() { return !OMPDeclareVariantScopes.empty(); } /// Given the potential call expression \p Call, determine if there is a /// specialization via the OpenMP declare variant mechanism available. If /// there is, return the specialized call expression, otherwise return the /// original \p Call. ExprResult ActOnOpenMPCall(ExprResult Call, Scope *Scope, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig); /// Handle a `omp begin declare variant`. void ActOnOpenMPBeginDeclareVariant(SourceLocation Loc, OMPTraitInfo &TI); /// Handle a `omp end declare variant`. void ActOnOpenMPEndDeclareVariant(); /// Checks if the variant/multiversion functions are compatible. bool areMultiversionVariantFunctionsCompatible( const FunctionDecl *OldFD, const FunctionDecl *NewFD, const PartialDiagnostic &NoProtoDiagID, const PartialDiagnosticAt &NoteCausedDiagIDAt, const PartialDiagnosticAt &NoSupportDiagIDAt, const PartialDiagnosticAt &DiffDiagIDAt, bool TemplatesSupported, bool ConstexprSupported, bool CLinkageMayDiffer); /// Function tries to capture lambda's captured variables in the OpenMP region /// before the original lambda is captured. void tryCaptureOpenMPLambdas(ValueDecl *V); /// Return true if the provided declaration \a VD should be captured by /// reference. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. /// \param OpenMPCaptureLevel Capture level within an OpenMP construct. bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level, unsigned OpenMPCaptureLevel) const; /// Check if the specified variable is used in one of the private /// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP /// constructs. VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false, unsigned StopAt = 0); ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK, ExprObjectKind OK, SourceLocation Loc); /// If the current region is a loop-based region, mark the start of the loop /// construct. void startOpenMPLoop(); /// If the current region is a range loop-based region, mark the start of the /// loop construct. void startOpenMPCXXRangeFor(); /// Check if the specified variable is used in 'private' clause. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. OpenMPClauseKind isOpenMPPrivateDecl(ValueDecl *D, unsigned Level, unsigned CapLevel) const; /// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.) /// for \p FD based on DSA for the provided corresponding captured declaration /// \p D. void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level); /// Check if the specified variable is captured by 'target' directive. /// \param Level Relative level of nested OpenMP construct for that the check /// is performed. bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; /// Check if the specified global variable must be captured by outer capture /// regions. /// \param Level Relative level of nested OpenMP construct for that /// the check is performed. bool isOpenMPGlobalCapturedDecl(ValueDecl *D, unsigned Level, unsigned CaptureLevel) const; ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// Start analysis of clauses. void StartOpenMPClause(OpenMPClauseKind K); /// End analysis of clauses. void EndOpenMPClause(); /// Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); /// Check if the current region is an OpenMP loop region and if it is, /// mark loop control variable, used in \p Init for loop initialization, as /// private by default. /// \param Init First part of the for loop. void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init); // OpenMP directives and clauses. /// Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, OpenMPDirectiveKind Kind); /// Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc, ArrayRef<Expr *> VarList); /// Called on well-formed '#pragma omp allocate'. DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc, ArrayRef<Expr *> VarList, ArrayRef<OMPClause *> Clauses, DeclContext *Owner = nullptr); /// Called on well-formed '#pragma omp requires'. DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc, ArrayRef<OMPClause *> ClauseList); /// Check restrictions on Requires directive OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc, ArrayRef<OMPClause *> Clauses); /// Check if the specified type is allowed to be used in 'omp declare /// reduction' construct. QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart( Scope *S, DeclContext *DC, DeclarationName Name, ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes, AccessSpecifier AS, Decl *PrevDeclInScope = nullptr); /// Initialize declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner); /// Initialize declare reduction construct initializer. /// \return omp_priv variable. VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D); /// Finish current declare reduction construct initializer. void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer, VarDecl *OmpPrivParm); /// Called at the end of '#pragma omp declare reduction'. DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd( Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid); /// Check variable declaration in 'omp declare mapper' construct. TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D); /// Check if the specified type is allowed to be used in 'omp declare /// mapper' construct. QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc, TypeResult ParsedType); /// Called on start of '#pragma omp declare mapper'. DeclGroupPtrTy ActOnOpenMPDeclareMapperDirective( Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType, SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS, Expr *MapperVarRef, ArrayRef<OMPClause *> Clauses, Decl *PrevDeclInScope = nullptr); /// Build the mapper variable of '#pragma omp declare mapper'. ExprResult ActOnOpenMPDeclareMapperDirectiveVarDecl(Scope *S, QualType MapperType, SourceLocation StartLoc, DeclarationName VN); bool isOpenMPDeclareMapperVarDeclAllowed(const VarDecl *VD) const; const ValueDecl *getOpenMPDeclareMapperVarName() const; /// Called on the start of target region i.e. '#pragma omp declare target'. bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc); /// Called at the end of target region i.e. '#pragme omp end declare target'. void ActOnFinishOpenMPDeclareTargetDirective(); /// Searches for the provided declaration name for OpenMP declare target /// directive. NamedDecl * lookupOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id, NamedDeclSetType &SameDirectiveDecls); /// Called on correct id-expression from the '#pragma omp declare target'. void ActOnOpenMPDeclareTargetName(NamedDecl *ND, SourceLocation Loc, OMPDeclareTargetDeclAttr::MapTypeTy MT, OMPDeclareTargetDeclAttr::DevTypeTy DT); /// Check declaration inside target region. void checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D, SourceLocation IdLoc = SourceLocation()); /// Finishes analysis of the deferred functions calls that may be declared as /// host/nohost during device/host compilation. void finalizeOpenMPDelayedAnalysis(const FunctionDecl *Caller, const FunctionDecl *Callee, SourceLocation Loc); /// Return true inside OpenMP declare target region. bool isInOpenMPDeclareTargetContext() const { return !DeclareTargetNesting.empty(); } /// Return true inside OpenMP target region. bool isInOpenMPTargetExecutionDirective() const; /// Return the number of captured regions created for an OpenMP directive. static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind); /// Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); /// End of OpenMP region. /// /// \param S Statement associated with the current OpenMP region. /// \param Clauses List of clauses for the current OpenMP region. /// /// \returns Statement for finished OpenMP region. StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses); StmtResult ActOnOpenMPExecutableDirective( OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); using VarsWithInheritedDSAType = llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>; /// Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp taskgroup'. StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp depobj'. StmtResult ActOnOpenMPDepobjDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp scan'. StmtResult ActOnOpenMPScanDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target data' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target enter data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target exit data' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp target parallel' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp cancellation point'. StmtResult ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp cancel'. StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, OpenMPDirectiveKind CancelRegion); /// Called on well-formed '\#pragma omp taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp master taskloop simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp parallel master taskloop simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelMasterTaskLoopSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target update'. StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc, Stmt *AStmt); /// Called on well-formed '\#pragma omp distribute parallel for' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target simd' after parsing of /// the associated statement. StmtResult ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute' after parsing of /// the associated statement. StmtResult ActOnOpenMPTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for simd' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed '\#pragma omp target teams distribute' after parsing /// of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for' /// after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute parallel for /// simd' after parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Called on well-formed '\#pragma omp target teams distribute simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA); /// Checks correctness of linear modifiers. bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind, SourceLocation LinLoc); /// Checks that the specified declaration matches requirements for the linear /// decls. bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc, OpenMPLinearClauseKind LinKind, QualType Type, bool IsDeclareSimd = false); /// Called on well-formed '\#pragma omp declare simd' after parsing of /// the associated method/function. DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective( DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS, Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds, ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears, ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR); /// Checks '\#pragma omp declare variant' variant function and original /// functions after parsing of the associated method/function. /// \param DG Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The trait info object representing the match clause. /// \returns None, if the function/variant function are not compatible with /// the pragma, pair of original function/variant ref expression otherwise. Optional<std::pair<FunctionDecl *, Expr *>> checkOpenMPDeclareVariantFunction(DeclGroupPtrTy DG, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); /// Called on well-formed '\#pragma omp declare variant' after parsing of /// the associated method/function. /// \param FD Function declaration to which declare variant directive is /// applied to. /// \param VariantRef Expression that references the variant function, which /// must be used instead of the original one, specified in \p DG. /// \param TI The context traits associated with the function variant. void ActOnOpenMPDeclareVariantDirective(FunctionDecl *FD, Expr *VariantRef, OMPTraitInfo &TI, SourceRange SR); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocator' clause. OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier, Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation NameModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'simdlen' clause. OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'ordered' clause. OMPClause * ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc, SourceLocation LParenLoc = SourceLocation(), Expr *NumForLoops = nullptr); /// Called on well-formed 'grainsize' clause. OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'num_tasks' clause. OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'hint' clause. OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'detach' clause. OMPClause *ActOnOpenMPDetachClause(Expr *Evt, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(llvm::omp::DefaultKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(llvm::omp::ProcBindKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'order' clause. OMPClause *ActOnOpenMPOrderClause(OpenMPOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(OpenMPDependClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause( OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc, SourceLocation EndLoc); /// Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause( OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2, OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acq_rel' clause. OMPClause *ActOnOpenMPAcqRelClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'acquire' clause. OMPClause *ActOnOpenMPAcquireClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'release' clause. OMPClause *ActOnOpenMPReleaseClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'relaxed' clause. OMPClause *ActOnOpenMPRelaxedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'destroy' clause. OMPClause *ActOnOpenMPDestroyClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'threads' clause. OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'simd' clause. OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'nogroup' clause. OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'unified_address' clause. OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'reverse_offload' clause. OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'dynamic_allocators' clause. OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc, SourceLocation EndLoc); /// Called on well-formed 'atomic_default_mem_order' clause. OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause( OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPVarListClause( OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *DepModOrTailExpr, const OMPVarListLocTy &Locs, SourceLocation ColonLoc, CXXScopeSpec &ReductionOrMapperIdScopeSpec, DeclarationNameInfo &ReductionOrMapperId, int ExtraModifier, ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, bool IsMapTypeImplicit, SourceLocation ExtraModifierLoc, ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc); /// Called on well-formed 'inclusive' clause. OMPClause *ActOnOpenMPInclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'exclusive' clause. OMPClause *ActOnOpenMPExclusiveClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'allocate' clause. OMPClause * ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation ColonLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause( ArrayRef<Expr *> VarList, OpenMPLastprivateModifier LPKind, SourceLocation LPKindLoc, SourceLocation ColonLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'reduction' clause. OMPClause *ActOnOpenMPReductionClause( ArrayRef<Expr *> VarList, OpenMPReductionClauseModifier Modifier, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'task_reduction' clause. OMPClause *ActOnOpenMPTaskReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'in_reduction' clause. OMPClause *ActOnOpenMPInReductionClause( ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId, ArrayRef<Expr *> UnresolvedReductions = llvm::None); /// Called on well-formed 'linear' clause. OMPClause * ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, OpenMPLinearClauseKind LinKind, SourceLocation LinLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depobj' pseudo clause. OMPClause *ActOnOpenMPDepobjClause(Expr *Depobj, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'depend' clause. OMPClause * ActOnOpenMPDependClause(Expr *DepModifier, OpenMPDependClauseKind DepKind, SourceLocation DepLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'device' clause. OMPClause *ActOnOpenMPDeviceClause(OpenMPDeviceClauseModifier Modifier, Expr *Device, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ModifierLoc, SourceLocation EndLoc); /// Called on well-formed 'map' clause. OMPClause * ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers, ArrayRef<SourceLocation> MapTypeModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, OpenMPMapClauseKind MapType, bool IsMapTypeImplicit, SourceLocation MapLoc, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'num_teams' clause. OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'thread_limit' clause. OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'priority' clause. OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Called on well-formed 'dist_schedule' clause. OMPClause *ActOnOpenMPDistScheduleClause( OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// Called on well-formed 'defaultmap' clause. OMPClause *ActOnOpenMPDefaultmapClause( OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc, SourceLocation KindLoc, SourceLocation EndLoc); /// Called on well-formed 'to' clause. OMPClause * ActOnOpenMPToClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'from' clause. OMPClause * ActOnOpenMPFromClause(ArrayRef<OpenMPMotionModifierKind> MotionModifiers, ArrayRef<SourceLocation> MotionModifiersLoc, CXXScopeSpec &MapperIdScopeSpec, DeclarationNameInfo &MapperId, SourceLocation ColonLoc, ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs, ArrayRef<Expr *> UnresolvedMappers = llvm::None); /// Called on well-formed 'use_device_ptr' clause. OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'use_device_addr' clause. OMPClause *ActOnOpenMPUseDeviceAddrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'is_device_ptr' clause. OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs); /// Called on well-formed 'nontemporal' clause. OMPClause *ActOnOpenMPNontemporalClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// Data for list of allocators. struct UsesAllocatorsData { /// Allocator. Expr *Allocator = nullptr; /// Allocator traits. Expr *AllocatorTraits = nullptr; /// Locations of '(' and ')' symbols. SourceLocation LParenLoc, RParenLoc; }; /// Called on well-formed 'uses_allocators' clause. OMPClause *ActOnOpenMPUsesAllocatorClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc, ArrayRef<UsesAllocatorsData> Data); /// Called on well-formed 'affinity' clause. OMPClause *ActOnOpenMPAffinityClause(SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, Expr *Modifier, ArrayRef<Expr *> Locators); /// The kind of conversion being performed. enum CheckedConversionKind { /// An implicit conversion. CCK_ImplicitConversion, /// A C-style cast. CCK_CStyleCast, /// A functional-style cast. CCK_FunctionalCast, /// A cast other than a C-style cast. CCK_OtherCast, /// A conversion for an operand of a builtin overloaded operator. CCK_ForBuiltinOverloadedOp }; static bool isCast(CheckedConversionKind CCK) { return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast || CCK == CCK_OtherCast; } /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E, bool Diagnose = true); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This function is a no-op if the operand has a function type // or an array type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); /// If \p E is a prvalue denoting an unmaterialized temporary, materialize /// it as an xvalue. In C++98, the result will still be a prvalue, because /// we don't have xvalues there. ExprResult TemporaryMaterializationConversion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); /// Context in which we're performing a usual arithmetic conversion. enum ArithConvKind { /// An arithmetic operation. ACK_Arithmetic, /// A bitwise operation. ACK_BitwiseOp, /// A comparison. ACK_Comparison, /// A conditional (?:) operator. ACK_Conditional, /// A compound assignment expression. ACK_CompAssign, }; // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, ArithConvKind ACK); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatibleFunctionPointer - The assignment is between two function /// pointers types that are not compatible, but we accept them as an /// extension. IncompatibleFunctionPointer, /// IncompatiblePointerSign - The assignment is between two pointers types /// which point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerAddressSpaceMismatch - The assignment /// changes address spaces in nested pointer types which is not allowed. /// For instance, converting __private int ** to __generic int ** is /// illegal even though __private could be converted to __generic. IncompatibleNestedPointerAddressSpaceMismatch, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and optionally prepare for a conversion of /// the RHS to the LHS type. The conversion is prepared for if ConvertRHS /// is true. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind, bool ConvertRHS = true); /// Check assignment constraints for an assignment of RHS to LHSType. /// /// \param LHSType The destination type for the assignment. /// \param RHS The source expression for the assignment. /// \param Diagnose If \c true, diagnostics may be produced when checking /// for assignability. If a diagnostic is produced, \p RHS will be /// set to ExprError(). Note that this function may still return /// without producing a diagnostic, even for an invalid assignment. /// \param DiagnoseCFAudited If \c true, the target is a function parameter /// in an audited Core Foundation API and does not need to be checked /// for ARC retain issues. /// \param ConvertRHS If \c true, \p RHS will be updated to model the /// conversions necessary to perform the assignment. If \c false, /// \p Diagnose must also be \c false. AssignConvertType CheckSingleAssignmentConstraints( QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false, bool ConvertRHS = true); // If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); ExprResult PerformQualificationConversion( Expr *E, QualType Ty, ExprValueKind VK = VK_RValue, CheckedConversionKind CCK = CCK_ImplicitConversion); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc, bool IsCompAssign = false); void CheckPtrComparisonWithNullChar(ExprResult &E, ExprResult &NullE); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType CheckGNUVectorConditionalTypes(ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool ConvertArgs = true); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool ConvertArgs = true) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool AllowBothBool, bool AllowBoolConversion); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, BinaryOperatorKind Opc); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); /// Type checking for matrix binary operators. QualType CheckMatrixElementwiseOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType CheckMatrixMultiplyOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible - The two types are reference-compatible. Ref_Compatible }; // Fake up a scoped enumeration that still contextually converts to bool. struct ReferenceConversionsScope { /// The conversions that would be performed on an lvalue of type T2 when /// binding a reference of type T1 to it, as determined when evaluating /// whether T1 is reference-compatible with T2. enum ReferenceConversions { Qualification = 0x1, NestedQualification = 0x2, Function = 0x4, DerivedToBase = 0x8, ObjC = 0x10, ObjCLifetime = 0x20, LLVM_MARK_AS_BITMASK_ENUM(/*LargestValue=*/ObjCLifetime) }; }; using ReferenceConversions = ReferenceConversionsScope::ReferenceConversions; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, ReferenceConversions *Conv = nullptr); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); /// Prepare `SplattedExpr` for a vector splat operation, adding /// implicit casts if necessary. ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error }; /// Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds for ARC and Weak. ARCConversionResult CheckObjCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool Diagnose = true, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); class ConditionResult { Decl *ConditionVar; FullExprArg Condition; bool Invalid; bool HasKnownValue; bool KnownValue; friend class Sema; ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition, bool IsConstexpr) : ConditionVar(ConditionVar), Condition(Condition), Invalid(false), HasKnownValue(IsConstexpr && Condition.get() && !Condition.get()->isValueDependent()), KnownValue(HasKnownValue && !!Condition.get()->EvaluateKnownConstInt(S.Context)) {} explicit ConditionResult(bool Invalid) : ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid), HasKnownValue(false), KnownValue(false) {} public: ConditionResult() : ConditionResult(false) {} bool isInvalid() const { return Invalid; } std::pair<VarDecl *, Expr *> get() const { return std::make_pair(cast_or_null<VarDecl>(ConditionVar), Condition.get()); } llvm::Optional<bool> getKnownValue() const { if (!HasKnownValue) return None; return KnownValue; } }; static ConditionResult ConditionError() { return ConditionResult(true); } enum class ConditionKind { Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'. ConstexprIf, ///< A constant boolean condition from 'if constexpr'. Switch ///< An integral condition for a 'switch' statement. }; ConditionResult ActOnCondition(Scope *S, SourceLocation Loc, Expr *SubExpr, ConditionKind CK); ConditionResult ActOnConditionVariable(Decl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, ConditionKind CK); ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E, bool IsConstexpr = false); /// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression /// found in an explicit(bool) specifier. ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E); /// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier. /// Returns true if the explicit specifier is now resolved. bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual SemaDiagnosticBuilder diagnoseNotICEType(Sema &S, SourceLocation Loc, QualType T); virtual SemaDiagnosticBuilder diagnoseNotICE(Sema &S, SourceLocation Loc) = 0; virtual SemaDiagnosticBuilder diagnoseFold(Sema &S, SourceLocation Loc); virtual ~VerifyICEDiagnoser() {} }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); private: unsigned ForceCUDAHostDeviceDepth = 0; public: /// Increments our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. So long as this count is greater /// than zero, all functions encountered will be __host__ __device__. void PushForceCUDAHostDevice(); /// Decrements our count of the number of times we've seen a pragma forcing /// functions to be __host__ __device__. Returns false if the count is 0 /// before incrementing, so you can emit an error. bool PopForceCUDAHostDevice(); /// Diagnostics that are emitted only if we discover that the given function /// must be codegen'ed. Because handling these correctly adds overhead to /// compilation, this is currently only enabled for CUDA compilations. llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>, std::vector<PartialDiagnosticAt>> DeviceDeferredDiags; /// A pair of a canonical FunctionDecl and a SourceLocation. When used as the /// key in a hashtable, both the FD and location are hashed. struct FunctionDeclAndLoc { CanonicalDeclPtr<FunctionDecl> FD; SourceLocation Loc; }; /// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a /// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the /// same deferred diag twice. llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags; /// An inverse call graph, mapping known-emitted functions to one of their /// known-emitted callers (plus the location of the call). /// /// Functions that we can tell a priori must be emitted aren't added to this /// map. llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>, /* Caller = */ FunctionDeclAndLoc> DeviceKnownEmittedFns; /// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be /// deferred. /// /// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch) /// which are not allowed to appear inside __device__ functions and are /// allowed to appear in __host__ __device__ functions only if the host+device /// function is never codegen'ed. /// /// To handle this, we use the notion of "deferred diagnostics", where we /// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed. /// /// This class lets you emit either a regular diagnostic, a deferred /// diagnostic, or no diagnostic at all, according to an argument you pass to /// its constructor, thus simplifying the process of creating these "maybe /// deferred" diagnostics. class DeviceDiagBuilder { public: enum Kind { /// Emit no diagnostics. K_Nop, /// Emit the diagnostic immediately (i.e., behave like Sema::Diag()). K_Immediate, /// Emit the diagnostic immediately, and, if it's a warning or error, also /// emit a call stack showing how this function can be reached by an a /// priori known-emitted function. K_ImmediateWithCallStack, /// Create a deferred diagnostic, which is emitted only if the function /// it's attached to is codegen'ed. Also emit a call stack as with /// K_ImmediateWithCallStack. K_Deferred }; DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID, FunctionDecl *Fn, Sema &S); DeviceDiagBuilder(DeviceDiagBuilder &&D); DeviceDiagBuilder(const DeviceDiagBuilder &) = default; ~DeviceDiagBuilder(); /// Convertible to bool: True if we immediately emitted an error, false if /// we didn't emit an error or we created a deferred error. /// /// Example usage: /// /// if (DeviceDiagBuilder(...) << foo << bar) /// return ExprError(); /// /// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably /// want to use these instead of creating a DeviceDiagBuilder yourself. operator bool() const { return ImmediateDiag.hasValue(); } template <typename T> friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag, const T &Value) { if (Diag.ImmediateDiag.hasValue()) *Diag.ImmediateDiag << Value; else if (Diag.PartialDiagId.hasValue()) Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second << Value; return Diag; } private: Sema &S; SourceLocation Loc; unsigned DiagID; FunctionDecl *Fn; bool ShowCallStack; // Invariant: At most one of these Optionals has a value. // FIXME: Switch these to a Variant once that exists. llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag; llvm::Optional<unsigned> PartialDiagId; }; /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as device code". /// /// - If CurContext is a __host__ function, does not emit any diagnostics. /// - If CurContext is a __device__ or __global__ function, emits the /// diagnostics immediately. /// - If CurContext is a __host__ __device__ function and we are compiling for /// the device, creates a diagnostic which is emitted if and when we realize /// that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in CUDA device code. /// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget()) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current context /// is "used as host code". /// /// Same as CUDADiagIfDeviceCode, with "host" and "device" switched. DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the device, emits the diagnostics immediately. /// - If CurContext is a non-`declare target` function and we are compiling /// for the device, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID); /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as host code". /// /// - If CurContext is a `declare target` function or it is known that the /// function is emitted for the host, emits the diagnostics immediately. /// - If CurContext is a non-host function, just ignore it. /// /// Example usage: /// /// // Variable-length arrays are not allowed in NVPTX device code. /// if (diagIfOpenMPHostode(Loc, diag::err_vla_unsupported)) /// return ExprError(); /// // Otherwise, continue parsing as normal. DeviceDiagBuilder diagIfOpenMPHostCode(SourceLocation Loc, unsigned DiagID); DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID); /// Check if the expression is allowed to be used in expressions for the /// offloading devices. void checkDeviceDecl(const ValueDecl *D, SourceLocation Loc); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; /// Determines whether the given function is a CUDA device/host/kernel/etc. /// function. /// /// Use this rather than examining the function's attributes yourself -- you /// will get it wrong. Returns CFT_Host if D is null. CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D, bool IgnoreImplicitHDAttr = false); CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs); /// Gets the CUDA target for the current context. CUDAFunctionTarget CurrentCUDATarget() { return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext)); } static bool isCUDAImplicitHostDeviceFunction(const FunctionDecl *D); // CUDA function call preference. Must be ordered numerically from // worst to best. enum CUDAFunctionPreference { CFP_Never, // Invalid caller/callee combination. CFP_WrongSide, // Calls from host-device to host or device // function that do not match current compilation // mode. CFP_HostDevice, // Any calls to host/device functions. CFP_SameSide, // Calls from host-device to host or device // function matching current compilation mode. CFP_Native, // host-to-host or device-to-device calls. }; /// Identifies relative preference of a given Caller/Callee /// combination, based on their host/device attributes. /// \param Caller function which needs address of \p Callee. /// nullptr in case of global context. /// \param Callee target function /// /// \returns preference value for particular Caller/Callee combination. CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Determines whether Caller may invoke Callee, based on their CUDA /// host/device attributes. Returns false if the call is not allowed. /// /// Note: Will return true for CFP_WrongSide calls. These may appear in /// semantically correct CUDA programs, but only if they're never codegen'ed. bool IsAllowedCUDACall(const FunctionDecl *Caller, const FunctionDecl *Callee) { return IdentifyCUDAPreference(Caller, Callee) != CFP_Never; } /// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD, /// depending on FD and the current compilation settings. void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD, const LookupResult &Previous); /// May add implicit CUDAConstantAttr attribute to VD, depending on VD /// and current compilation settings. void MaybeAddCUDAConstantAttr(VarDecl *VD); public: /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// (CFP_Never), emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to /// be emitted if and when the caller is codegen'ed, and returns true. /// /// Will only create deferred diagnostics for a given SourceLocation once, /// so you can safely call this multiple times without generating duplicate /// deferred errors. /// /// - Otherwise, returns true without emitting any diagnostics. bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee); void CUDACheckLambdaCapture(CXXMethodDecl *D, const sema::Capture &Capture); /// Set __device__ or __host__ __device__ attributes on the given lambda /// operator() method. /// /// CUDA lambdas by default is host device function unless it has explicit /// host or device attribute. void CUDASetLambdaAttrs(CXXMethodDecl *Method); /// Finds a function in \p Matches with highest calling priority /// from \p Caller context and erases all functions with lower /// calling priority. void EraseUnwantedCUDAMatches( const FunctionDecl *Caller, SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \return true if \p CD can be considered empty according to CUDA /// (E.2.3.1 in CUDA 7.5 Programming guide). bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD); bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD); // \brief Checks that initializers of \p Var satisfy CUDA restrictions. In // case of error emits appropriate diagnostic and invalidates \p Var. // // \details CUDA allows only empty constructors as initializers for global // variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all // __shared__ variables whether they are local or not (they all are implicitly // static in CUDA). One exception is that CUDA allows constant initializers // for __constant__ and __device__ variables. void checkAllowedCUDAInitializer(VarDecl *VD); /// Check whether NewFD is a valid overload for CUDA. Emits /// diagnostics and invalidates NewFD if not. void checkCUDATargetOverload(FunctionDecl *NewFD, const LookupResult &Previous); /// Copies target attributes from the template TD to the function FD. void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD); /// Returns the name of the launch configuration function. This is the name /// of the function that will be called to configure kernel call, with the /// parameters specified via <<<>>>. std::string getCudaConfigureFuncName() const; /// \name Code completion //@{ /// Describes the context in which code completion occurs. enum ParserCompletionContext { /// Code completion occurs at top-level or namespace context. PCC_Namespace, /// Code completion occurs within a class, struct, or union. PCC_Class, /// Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// Code completion occurs following one or more template /// headers. PCC_Template, /// Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// Code completion occurs within an expression. PCC_Expression, /// Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// Code completion occurs where only a type is permitted. PCC_Type, /// Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteExpression(Scope *S, QualType PreferredType, bool IsParenthesized = false); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase, SourceLocation OpLoc, bool IsArrow, bool IsBaseExprStatement, QualType PreferredType); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS, QualType PreferredType); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D, const VirtSpecifiers *VS = nullptr); void CodeCompleteBracketDeclarator(Scope *S); void CodeCompleteCase(Scope *S); /// Reports signatures for a call to CodeCompleteConsumer and returns the /// preferred type for the current argument. Returned type can be null. QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type, SourceLocation Loc, ArrayRef<Expr *> Args, SourceLocation OpenParLoc); QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl, CXXScopeSpec SS, ParsedType TemplateTypeTy, ArrayRef<Expr *> ArgExprs, IdentifierInfo *II, SourceLocation OpenParLoc); void CodeCompleteInitializer(Scope *S, Decl *D); /// Trigger code completion for a record of \p BaseType. \p InitExprs are /// expressions in the initializer list seen so far and \p D is the current /// Designation being parsed. void CodeCompleteDesignator(const QualType BaseType, llvm::ArrayRef<Expr *> InitExprs, const Designation &D); void CodeCompleteAfterIf(Scope *S, bool IsBracedThen); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext, bool IsUsingDeclaration, QualType BaseType, QualType PreferredType); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteAfterFunctionEquals(Declarator &D); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences( ArrayRef<IdentifierLocPair> Protocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName, SourceLocation ClassNameLoc, bool IsBaseExprStatement); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled); void CodeCompleteNaturalLanguage(); void CodeCompleteAvailabilityPlatformName(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto, const Expr *ThisArg, ArrayRef<const Expr *> Args, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckOSLogFormatStringArg(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckTSBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckSVEBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckCDEBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMCoprocessorImmediate(const TargetInfo &TI, const Expr *CoprocArg, bool WantCDE); bool CheckARMBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckBPFBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinCpu(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall); bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArguments(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinTileArgumentsRange(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinTileRangeAndDuplicate(CallExpr *TheCall, ArrayRef<int> ArgNums); bool CheckX86BuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckPPCBuiltinFunctionCall(const TargetInfo &TI, unsigned BuiltinID, CallExpr *TheCall); bool CheckAMDGCNBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); bool SemaBuiltinComplex(CallExpr *TheCall); bool SemaBuiltinVSX(CallExpr *TheCall); bool SemaBuiltinOSLogFormat(CallExpr *TheCall); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); bool SemaBuiltinSetjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult, bool IsDelete); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High, bool RangeIsError = true); bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum, unsigned Multiple); bool SemaBuiltinConstantArgPower2(CallExpr *TheCall, int ArgNum); bool SemaBuiltinConstantArgShiftedByte(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinConstantArgShiftedByteOrXXFF(CallExpr *TheCall, int ArgNum, unsigned ArgBits); bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall, int ArgNum, unsigned ExpectedFieldNum, bool AllowName); bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall); // Matrix builtin handling. ExprResult SemaBuiltinMatrixTranspose(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorLoad(CallExpr *TheCall, ExprResult CallResult); ExprResult SemaBuiltinMatrixColumnMajorStore(CallExpr *TheCall, ExprResult CallResult); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_FreeBSDKPrintf, FST_OSTrace, FST_OSLog, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); bool FormatStringHasSArg(const StringLiteral *FExpr); static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); public: void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS); private: void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(const Expr *E); /// Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// Check if there is a field shadowing. void CheckShadowInheritedFields(const SourceLocation &Loc, DeclarationName FieldName, const CXXRecordDecl *RD, bool DeclIsField = true); /// Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); /// Check whether receiver is mutable ObjC container which /// attempts to add itself into the container void CheckObjCCircularContainer(ObjCMessageExpr *Message); void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE); void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc, bool DeleteWasArrayForm); public: /// Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const ArrayRef<const Expr *> ExprArgs, SourceLocation CallSiteLoc); /// Check if we are taking the address of a packed field /// as this may be a problem if the pointer value is dereferenced. void CheckAddressOfPackedMember(Expr *rhs); /// The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; /// Nullability type specifiers. IdentifierInfo *Ident__Nonnull = nullptr; IdentifierInfo *Ident__Nullable = nullptr; IdentifierInfo *Ident__Null_unspecified = nullptr; IdentifierInfo *Ident_NSError = nullptr; /// The handler for the FileChanged preprocessor events. /// /// Used for diagnostics that implement custom semantic analysis for #include /// directives, like -Wpragma-pack. sema::SemaPPCallbacks *SemaPPCallbackHandler; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTDeclReader; friend class ASTWriter; public: /// Retrieve the keyword associated IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability); /// The struct behind the CFErrorRef pointer. RecordDecl *CFError = nullptr; bool isCFError(RecordDecl *D); /// Retrieve the identifier "NSError". IdentifierInfo *getNSErrorIdent(); /// Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSManglingNumber() const { return CurScope->incrementMSManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } /// Determine the number of levels of enclosing template parameters. This is /// only usable while parsing. Note that this does not include dependent /// contexts in which no template parameters have yet been declared, such as /// in a terse function template or generic lambda before the first 'auto' is /// encountered. unsigned getTemplateDepth(Scope *S) const; /// To be used for checking whether the arguments being passed to /// function exceeds the number of parameters expected for it. static bool TooManyArguments(size_t NumParams, size_t NumArgs, bool PartialOverloading = false) { // We check whether we're just after a comma in code-completion. if (NumArgs > 0 && PartialOverloading) return NumArgs + 1 > NumParams; // If so, we view as an extra argument. return NumArgs > NumParams; } // Emitting members of dllexported classes is delayed until the class // (including field initializers) is fully parsed. SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses; SmallVector<CXXMethodDecl*, 4> DelayedDllExportMemberFunctions; private: int ParsingClassDepth = 0; class SavePendingParsedClassStateRAII { public: SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); } ~SavePendingParsedClassStateRAII() { assert(S.DelayedOverridingExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); assert(S.DelayedEquivalentExceptionSpecChecks.empty() && "there shouldn't be any pending delayed exception spec checks"); swapSavedState(); } private: Sema &S; decltype(DelayedOverridingExceptionSpecChecks) SavedOverridingExceptionSpecChecks; decltype(DelayedEquivalentExceptionSpecChecks) SavedEquivalentExceptionSpecChecks; void swapSavedState() { SavedOverridingExceptionSpecChecks.swap( S.DelayedOverridingExceptionSpecChecks); SavedEquivalentExceptionSpecChecks.swap( S.DelayedEquivalentExceptionSpecChecks); } }; /// Helper class that collects misaligned member designations and /// their location info for delayed diagnostics. struct MisalignedMember { Expr *E; RecordDecl *RD; ValueDecl *MD; CharUnits Alignment; MisalignedMember() : E(), RD(), MD(), Alignment() {} MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment) : E(E), RD(RD), MD(MD), Alignment(Alignment) {} explicit MisalignedMember(Expr *E) : MisalignedMember(E, nullptr, nullptr, CharUnits()) {} bool operator==(const MisalignedMember &m) { return this->E == m.E; } }; /// Small set of gathered accesses to potentially misaligned members /// due to the packed attribute. SmallVector<MisalignedMember, 4> MisalignedMembers; /// Adds an expression to the set of gathered misaligned members. void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD, CharUnits Alignment); public: /// Diagnoses the current set of gathered accesses. This typically /// happens at full expression level. The set is cleared after emitting the /// diagnostics. void DiagnoseMisalignedMembers(); /// This function checks if the expression is in the sef of potentially /// misaligned members and it is converted to some pointer type T with lower /// or equal alignment requirements. If so it removes it. This is used when /// we do not want to diagnose such misaligned access (e.g. in conversions to /// void*). void DiscardMisalignedMemberAddress(const Type *T, Expr *E); /// This function calls Action when it determines that E designates a /// misaligned member due to the packed attribute. This is used to emit /// local diagnostics like in reference binding. void RefersToMemberWithReducedAlignment( Expr *E, llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)> Action); /// Describes the reason a calling convention specification was ignored, used /// for diagnostics. enum class CallingConventionIgnoredReason { ForThisTarget = 0, VariadicFunction, ConstructorDestructor, BuiltinFunction }; /// Creates a DeviceDiagBuilder that emits the diagnostic if the current /// context is "used as device code". /// /// - If CurLexicalContext is a kernel function or it is known that the /// function will be emitted for the device, emits the diagnostics /// immediately. /// - If CurLexicalContext is a function and we are compiling /// for the device, but we don't know that this function will be codegen'ed /// for devive yet, creates a diagnostic which is emitted if and when we /// realize that the function will be codegen'ed. /// /// Example usage: /// /// Diagnose __float128 type usage only from SYCL device code if the current /// target doesn't support it /// if (!S.Context.getTargetInfo().hasFloat128Type() && /// S.getLangOpts().SYCLIsDevice) /// SYCLDiagIfDeviceCode(Loc, diag::err_type_unsupported) << "__float128"; DeviceDiagBuilder SYCLDiagIfDeviceCode(SourceLocation Loc, unsigned DiagID); /// Check whether we're allowed to call Callee from the current context. /// /// - If the call is never allowed in a semantically-correct program /// emits an error and returns false. /// /// - If the call is allowed in semantically-correct programs, but only if /// it's never codegen'ed, creates a deferred diagnostic to be emitted if /// and when the caller is codegen'ed, and returns true. /// /// - Otherwise, returns true without emitting any diagnostics. /// /// Adds Callee to DeviceCallGraph if we don't know if its caller will be /// codegen'ed yet. bool checkSYCLDeviceFunction(SourceLocation Loc, FunctionDecl *Callee); }; /// RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; bool Entered = true; public: EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other, bool ShouldEnter = true) : Actions(Actions), Entered(ShouldEnter) { if (Entered) Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, ExprContext); } EnterExpressionEvaluationContext( Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext = Sema::ExpressionEvaluationContextRecord::EK_Other) : Actions(Actions) { Actions.PushExpressionEvaluationContext( NewContext, Sema::ReuseLambdaContextDecl, ExprContext); } enum InitListTag { InitList }; EnterExpressionEvaluationContext(Sema &Actions, InitListTag, bool ShouldEnter = true) : Actions(Actions), Entered(false) { // In C++11 onwards, narrowing checks are performed on the contents of // braced-init-lists, even when they occur within unevaluated operands. // Therefore we still need to instantiate constexpr functions used in such // a context. if (ShouldEnter && Actions.isUnevaluatedContext() && Actions.getLangOpts().CPlusPlus11) { Actions.PushExpressionEvaluationContext( Sema::ExpressionEvaluationContext::UnevaluatedList); Entered = true; } } ~EnterExpressionEvaluationContext() { if (Entered) Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// The template function declaration to be late parsed. Decl *D; }; } // end namespace clang namespace llvm { // Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its // SourceLocation. template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> { using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc; using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>; static FunctionDeclAndLoc getEmptyKey() { return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()}; } static FunctionDeclAndLoc getTombstoneKey() { return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()}; } static unsigned getHashValue(const FunctionDeclAndLoc &FDL) { return hash_combine(FDBaseInfo::getHashValue(FDL.FD), FDL.Loc.getRawEncoding()); } static bool isEqual(const FunctionDeclAndLoc &LHS, const FunctionDeclAndLoc &RHS) { return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc; } }; } // namespace llvm #endif
ex1-1.c
#include <stdio.h> #include <omp.h> int main(void) { #pragma omp parallel { int tcount = omp_get_num_threads(); int tid = omp_get_thread_num(); printf("Hello openmp from thread = %d/%d\n", tid, tcount); } return 0; }
GB_cumsum.c
//------------------------------------------------------------------------------ // GB_cumsum: cumlative sum of an array //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Compute the cumulative sum of an array count[0:n], of size n+1 // in pseudo-MATLAB notation: // k = sum (count [0:n-1] != 0) ; // count = cumsum ([0 count[0:n-1]]) ; // That is, count [j] on input is overwritten with the value of // sum (count [0..j-1]). count [n] is implicitly zero on input. // On output, count [n] is the total sum. #include "GB.h" GB_PUBLIC // accessed by the MATLAB tests in GraphBLAS/Test only void GB_cumsum // cumulative sum of an array ( int64_t *GB_RESTRICT count, // size n+1, input/output const int64_t n, int64_t *GB_RESTRICT kresult, // return k, if needed by the caller int nthreads ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (count != NULL) ; ASSERT (n >= 0) ; //-------------------------------------------------------------------------- // determine # of threads to use //-------------------------------------------------------------------------- #if !defined ( _OPENMP ) nthreads = 1 ; #endif if (nthreads > 1) { nthreads = GB_IMIN (nthreads, n / 1024) ; nthreads = GB_IMAX (nthreads, 1) ; } //-------------------------------------------------------------------------- // count = cumsum ([0 count[0:n-1]]) ; //-------------------------------------------------------------------------- if (kresult == NULL) { if (nthreads <= 2) { //------------------------------------------------------------------ // cumsum with one thread //------------------------------------------------------------------ int64_t s = 0 ; for (int64_t i = 0 ; i < n ; i++) { int64_t c = count [i] ; count [i] = s ; s += c ; } count [n] = s ; } else { //------------------------------------------------------------------ // cumsum with multiple threads //------------------------------------------------------------------ // allocate workspace int64_t *ws = NULL ; GB_MALLOC_MEMORY (ws, nthreads, sizeof (int64_t)) ; if (ws == NULL) { // out of memory; use a single thread instead GB_cumsum (count, n, NULL, 1) ; return ; } #pragma omp parallel num_threads(nthreads) { // each thread sums up its own part int tid = GB_OPENMP_THREAD_ID ; int64_t istart, iend ; GB_PARTITION (istart, iend, n, tid, nthreads) ; int64_t s = 0 ; for (int64_t i = istart ; i < iend ; i++) { s += count [i] ; } ws [tid] = s ; #pragma omp barrier // each thread computes the cumsum of its own part s = 0 ; for (int i = 0 ; i < tid ; i++) { s += ws [i] ; } for (int64_t i = istart ; i < iend ; i++) { int64_t c = count [i] ; count [i] = s ; s += c ; } if (iend == n) { count [n] = s ; } } // free workspace GB_FREE_MEMORY (ws, nthreads, sizeof (int64_t)) ; } } else { if (nthreads <= 2) { //------------------------------------------------------------------ // cumsum with one thread, also compute k //------------------------------------------------------------------ int64_t k = 0 ; int64_t s = 0 ; for (int64_t i = 0 ; i < n ; i++) { int64_t c = count [i] ; if (c != 0) k++ ; count [i] = s ; s += c ; } count [n] = s ; (*kresult) = k ; } else { //------------------------------------------------------------------ // cumsum with multiple threads, also compute k //------------------------------------------------------------------ int64_t *ws = NULL ; GB_MALLOC_MEMORY (ws, 2*nthreads, sizeof (int64_t)) ; if (ws == NULL) { // out of memory; use a single thread instead GB_cumsum (count, n, kresult, 1) ; return ; } int64_t *wk = ws + nthreads ; #pragma omp parallel num_threads(nthreads) { // each thread sums up its own part int tid = GB_OPENMP_THREAD_ID ; int64_t istart, iend ; GB_PARTITION (istart, iend, n, tid, nthreads) ; int64_t k = 0 ; int64_t s = 0 ; for (int64_t i = istart ; i < iend ; i++) { int64_t c = count [i] ; if (c != 0) k++ ; s += c ; } ws [tid] = s ; wk [tid] = k ; #pragma omp barrier // each thread computes the cumsum of its own part s = 0 ; for (int i = 0 ; i < tid ; i++) { s += ws [i] ; } for (int64_t i = istart ; i < iend ; i++) { int64_t c = count [i] ; count [i] = s ; s += c ; } if (iend == n) { count [n] = s ; } } int64_t k = 0 ; for (int tid = 0 ; tid < nthreads ; tid++) { k += wk [tid] ; } (*kresult) = k ; // free workspace GB_FREE_MEMORY (ws, 2*nthreads, sizeof (int64_t)) ; } } }
gi_robins_sliding_regular_grid.h
/* * * Copyright (C) 2018 Attila Gyulassy <[email protected]> * All rights reserved. * * This software may be modified and distributed under the terms * of the BSD license. See the LICENSE file for details. */ #ifndef BAST_ROBINS_NOALLOC_H #define BAST_ROBINS_NOALLOC_H #include "gi_labeling.h" #include "gi_discrete_gradient_labeling.h" //#include "gi_topological_simplicial_complex.h" #include "gi_topological_regular_grid_3x3x3.h" #include "gi_bifiltration_pairing.h" #include "gi_regular_grid_trilinear_function.h" #include "gi_max_vertex_labeling.h" //#define SANITY_CHECKS namespace GInt { //#define DEBUGPARALLEL //#define DEBUGPARALLEL template<class GridType, class GridFuncType, class MeshType, class MaxVLType, class GradType> class SlidingWindowRobinsNoalloc { protected: GridType* mGrid; GridFuncType* mFunc; MeshType* mMesh; MaxVLType* mMaxVL; DenseLabeling<char>* mResLabel; GradType* mGrad; MyRobinsNoalloc<MeshType, MaxVLType, GradType, 5, 4>* mStandardRobins; INDEX_TYPE m_data_27_offsets[27]; #ifdef DEBUGPARALLEL int* db_counter; #endif struct MESH_CONTEXT { Explicit3x3x3SmallRegularGrid* small_mesh; RegularGrid3D* small_grid; RegularGridTrilinearFunction* small_grid_func; RegularGridMaxMinVertexLabeling3D<Explicit3x3x3SmallRegularGrid, RegularGridTrilinearFunction>* small_mesh_maxmin_labeling; INDEX_TYPE small_mesh_id_to_big_mesh_id[125]; FLOATTYPE small_grid_values[27]; }; struct small_INDEX_vector { INDEX_TYPE vec[27]; int size; void push_back(INDEX_TYPE val) { vec[size] = val; ++size; } small_INDEX_vector() : size(0) {} const INDEX_TYPE& operator[](int i) const { return vec[i]; } INDEX_TYPE& operator[](int i) { return vec[i]; } }; struct cell_pairing { int num_missing; INDEX_TYPE pair; bool paired; cell_pairing() { //printf("SHOULD NEVER CALL\n"); pair = -1; paired = false; num_missing = 0; } }; struct myStaticMap { INDEX_TYPE sm_ids_in_lstar[27]; int in_lstar[125]; cell_pairing cell_pairings[125]; int size; void push_back(INDEX_TYPE small_mesh_id) { sm_ids_in_lstar[size] = small_mesh_id; in_lstar[small_mesh_id] = 1; ++size; } // initialize all in_star to 0 myStaticMap() : size(0), in_lstar{} {} int is_in_lstar(INDEX_TYPE id) const { return in_lstar[id] != 0; } }; void printStaticMapState(myStaticMap& m) { for (int i = 0; i < m.size; i++) { INDEX_TYPE id = m.sm_ids_in_lstar[i]; int inlstar = m.in_lstar[id]; cell_pairing& cp = m.cell_pairings[id]; printf("%lld -> %lld, nm=%d, inlst=%d, pinlst=%d, p=%d, ref=%d\n", id, cp.pair, cp.num_missing, inlstar, m.in_lstar[cp.pair], cp.paired, m.cell_pairings[cp.pair].pair == id); } } //std::queue<INDEX_TYPE> readytogo; void decrementCofacets(const MESH_CONTEXT& mc, INDEX_TYPE id, myStaticMap& small_mesh_cell_pairings) const { typename Explicit3x3x3SmallRegularGrid::CofacetsIterator cfit(mc.small_mesh); for (cfit.begin(id); cfit.valid(); cfit.advance()) { INDEX_TYPE cid = cfit.value(); if (small_mesh_cell_pairings.is_in_lstar(cid)) small_mesh_cell_pairings.cell_pairings[cid].num_missing--; } } bool is_steeper(const MESH_CONTEXT& mc, INDEX_TYPE vn_1, INDEX_TYPE vn_2) const { return mc.small_mesh_maxmin_labeling->Before(vn_1, vn_2); } public: INDEX_TYPE lowest_vertex(const MESH_CONTEXT& mc, INDEX_TYPE cid) const { return mc.small_mesh->VertexNumberFromCellID(mc.small_mesh_maxmin_labeling->Cell2LowestVertex(cid)); } protected: INDEX_TYPE PickLowestCandidate(const MESH_CONTEXT& mc, small_INDEX_vector& cands, myStaticMap& small_mesh_cell_pairings) const { if (cands.size == 1) return cands[0]; INDEX_TYPE curr_lowest_id = cands[0]; INDEX_TYPE lv_vid = lowest_vertex(mc, curr_lowest_id); for (int i = 1; i < cands.size; i++) { INDEX_TYPE olv_vid = lowest_vertex(mc, cands[i]); if (mc.small_mesh_maxmin_labeling->Before(olv_vid, lv_vid)) { lv_vid = olv_vid; curr_lowest_id = cands[i]; } } return curr_lowest_id; } void HomotopyExpand(const MESH_CONTEXT& mc, small_INDEX_vector& lstar_cell_sm_ids, myStaticMap& small_mesh_cell_pairings) const { // first push all small mesh ids into the cell pairings map small_INDEX_vector list_of_d_cells_sm_ids[4]; for (int i = 0; i < lstar_cell_sm_ids.size; i++) { INDEX_TYPE small_mesh_id = lstar_cell_sm_ids[i]; small_mesh_cell_pairings.push_back(small_mesh_id); } // count number of facets that are in the lower star for (int i = 0; i < small_mesh_cell_pairings.size; i++) { INDEX_TYPE small_mesh_cell_id = small_mesh_cell_pairings.sm_ids_in_lstar[i]; list_of_d_cells_sm_ids[mc.small_mesh->dimension(small_mesh_cell_id)].push_back(small_mesh_cell_id); typename Explicit3x3x3SmallRegularGrid::FacetsIterator small_mesh_facets_iterator(mc.small_mesh); for (small_mesh_facets_iterator.begin(small_mesh_cell_id); small_mesh_facets_iterator.valid(); small_mesh_facets_iterator.advance()) { INDEX_TYPE small_mesh_facet_id = small_mesh_facets_iterator.value(); if (small_mesh_cell_pairings.is_in_lstar(small_mesh_facet_id)) small_mesh_cell_pairings.cell_pairings[small_mesh_cell_id].num_missing++; } } // if there is a vertex we should pick steepest descent if (list_of_d_cells_sm_ids[0].size > 0) { // we shoudl only have one #ifdef SANITY_CHECKS if (list_of_d_cells_sm_ids[0].size > 1) { printf("ERROR: too many vertices %d\n", list_of_d_cells_sm_ids[0].size); printf("\n"); } #endif // the id of the vertex is simply the first element of the list INDEX_TYPE sm_vertex_id = list_of_d_cells_sm_ids[0][0]; // if there are no edges, then make the vertex critical, else pair with an edge if (list_of_d_cells_sm_ids[1].size == 0) { // make vertex critical small_mesh_cell_pairings.cell_pairings[sm_vertex_id].pair = sm_vertex_id; small_mesh_cell_pairings.cell_pairings[sm_vertex_id].paired = true; decrementCofacets(mc, sm_vertex_id, small_mesh_cell_pairings); } else { // to pair with the steepest down edge, we want to look through the list INDEX_TYPE sm_lowest_edge_id; // if there is only one edge, it's easy, pick that! if (list_of_d_cells_sm_ids[1].size == 1) { // just pair with only option sm_lowest_edge_id = list_of_d_cells_sm_ids[1][0]; } else { // find minimal edge sm_lowest_edge_id = list_of_d_cells_sm_ids[1][0]; // set to first INDEX_TYPE temp_lowest_vertex_vn = mc.small_mesh->VertexNumberFromCellID(mc.small_mesh_maxmin_labeling->Cell2LowestVertex(sm_lowest_edge_id)); #ifdef SANITY_CHECKS if (temp_lowest_vertex_vn == sm_vertex_id) { printf("ERROR: how the heck can the lowest vertex of an edge be its lstar thingy\n"); } #endif for (int i = 1; i < list_of_d_cells_sm_ids[1].size; i++) { INDEX_TYPE other_edge_id = list_of_d_cells_sm_ids[1][i]; INDEX_TYPE temp_other_vertex_vn = mc.small_mesh->VertexNumberFromCellID(mc.small_mesh_maxmin_labeling->Cell2LowestVertex(other_edge_id)); if (is_steeper(mc, temp_other_vertex_vn, temp_lowest_vertex_vn)) { sm_lowest_edge_id = other_edge_id; temp_lowest_vertex_vn = temp_other_vertex_vn; } } } // pair in direction of steepest descent small_mesh_cell_pairings.cell_pairings[sm_vertex_id].pair = sm_lowest_edge_id; small_mesh_cell_pairings.cell_pairings[sm_vertex_id].paired = true; small_mesh_cell_pairings.cell_pairings[sm_lowest_edge_id].pair = sm_vertex_id; small_mesh_cell_pairings.cell_pairings[sm_lowest_edge_id].paired = true; decrementCofacets(mc, sm_vertex_id, small_mesh_cell_pairings); decrementCofacets(mc, sm_lowest_edge_id, small_mesh_cell_pairings); } } for (int i = 0; i < 4; i++) { //while (!sorted.empty()) { // logic is we need to process every cell of dimension i // until all have been processed, first try to pair // if no pairing was successful, make one critical and repeat int num_processed = 0; int total_to_process = 0; for (int j = 0; j < list_of_d_cells_sm_ids[i].size; j++) { INDEX_TYPE i_cell_id = list_of_d_cells_sm_ids[i][j]; if (!small_mesh_cell_pairings.cell_pairings[i_cell_id].paired) total_to_process++; } while (num_processed < total_to_process) { int start_num_proc = num_processed; // try to pair as many as possible for (int j = 0; j < list_of_d_cells_sm_ids[i].size; j++) { INDEX_TYPE i_cell_id = list_of_d_cells_sm_ids[i][j]; if (small_mesh_cell_pairings.cell_pairings[i_cell_id].paired) continue; // already paired #ifdef DEBUG_PARALLEL if (small_mesh_cell_pairings.cell_pairings[i_cell_id].num_missing > 0) { printf("ERROR: should never get here1\n"); } #endif small_INDEX_vector candidates; typename Explicit3x3x3SmallRegularGrid::CofacetsIterator cfit(mc.small_mesh); for (cfit.begin(i_cell_id); cfit.valid(); cfit.advance()) { INDEX_TYPE cfid = cfit.value(); if (!small_mesh_cell_pairings.is_in_lstar(cfid)) continue; // not in our lower star #ifdef DEBUG_PARALLEL if (small_mesh_cell_pairings.cell_pairings[cfid].paired) { printf("ERROR: should never get here2\n"); } #endif if (small_mesh_cell_pairings.cell_pairings[cfid].num_missing == 1) { // pair lstar_cell_sm_ids candidates.push_back(cfid); } } //if (candidates.size() > 1) printf("got here candidates: %d\n", candidates.size()); if (candidates.size > 0) { INDEX_TYPE cfid = PickLowestCandidate(mc, candidates, small_mesh_cell_pairings); small_mesh_cell_pairings.cell_pairings[i_cell_id].pair = cfid; small_mesh_cell_pairings.cell_pairings[i_cell_id].paired = true; small_mesh_cell_pairings.cell_pairings[cfid].pair = i_cell_id; small_mesh_cell_pairings.cell_pairings[cfid].paired = true; decrementCofacets(mc, i_cell_id, small_mesh_cell_pairings); decrementCofacets(mc, cfid, small_mesh_cell_pairings); num_processed++; break; } } if (start_num_proc == num_processed) { // then no more pairs were possible small_INDEX_vector candidates; for (int j = 0; j < list_of_d_cells_sm_ids[i].size; j++) { INDEX_TYPE i_cell_id = list_of_d_cells_sm_ids[i][j]; if (small_mesh_cell_pairings.cell_pairings[i_cell_id].paired) continue; // already paired // make one critical and break candidates.push_back(i_cell_id); } INDEX_TYPE id = PickLowestCandidate(mc, candidates, small_mesh_cell_pairings); //asdf want to make lowest critical!? small_mesh_cell_pairings.cell_pairings[id].pair = id; small_mesh_cell_pairings.cell_pairings[id].paired = true; decrementCofacets(mc, id, small_mesh_cell_pairings); num_processed++; } } } //printf("out:"); //for (auto c : small_mesh_cell_pairings) { // if (c.second.pair == c.second.id) printf(" (%d:%llu)", mc.small_mesh->dimension(c.second.id), c.second.pair); // if (mc.small_mesh->dimension(c.second.id) < mc.small_mesh->dimension(c.second.pair)) // printf(" (%d:%llu->%d:%llu)", mc.small_mesh->dimension(c.second.id), c.second.id, mc.small_mesh->dimension(c.second.pair), c.second.pair); //} //printf("\n"); } void init() { mStandardRobins = new MyRobinsNoalloc<MeshType, MaxVLType, GradType, 5, 4>(mMesh, mMaxVL, NULL, mGrad); // get data offsets for a 27 neighborhood INDEX_TYPE t_did111 = mGrid->Index3d(Vec3l(1, 1, 1)); int t_pos = 0; for (int k = -1; k <= 1; k++) { for (int j = -1; j <= 1; j++) { for (int i = -1; i <= 1; i++) { m_data_27_offsets[t_pos++] = mGrid->Index3d(Vec3l(i + 1, j + 1, k + 1)) - t_did111; } } } } public: SlidingWindowRobinsNoalloc( GridType* grid, GridFuncType* grid_func, MeshType* mesh, MaxVLType* label1, GradType* grad) : mGrid(grid), mFunc(grid_func), mMesh(mesh), mMaxVL(label1), mResLabel(NULL), mGrad(grad) { init(); } ~SlidingWindowRobinsNoalloc() { } void ComputeLowerStar(const MESH_CONTEXT& mc, INDEX_TYPE small_mesh_vertex_id) { small_INDEX_vector lower_star_list[1]; // now add all lower star to restriciton sets typename Explicit3x3x3SmallRegularGrid::AdjacentCellsIterator star(mc.small_mesh); for (star.begin(small_mesh_vertex_id); star.valid(); star.advance()) { INDEX_TYPE small_mesh_vertex_neighbor = star.value(); // discard a cell if its highest vertex is NOT the vertex, hence not part of lower star INDEX_TYPE highest_small_mesh_vertex_id = mc.small_mesh_maxmin_labeling->Cell2HighestVertex(small_mesh_vertex_neighbor); if (highest_small_mesh_vertex_id != small_mesh_vertex_id) continue; // not in lower star of f1 lower_star_list[0].push_back(small_mesh_vertex_neighbor); #ifdef DEBUGPARALLEL #pragma omp critical { if (omp_get_thread_num() > 0) printf("here\n"); db_counter[mc.small_mesh_id_to_big_mesh_id[small_mesh_vertex_neighbor]]++; } #endif } // now do homotopy expand on each subset! // do homotopy expand myStaticMap small_mesh_cell_pairings; HomotopyExpand(mc, lower_star_list[0], small_mesh_cell_pairings); #ifdef SANITY_CHECKS printStaticMapState(small_mesh_cell_pairings); printf("\n"); #endif for (int j = 0; j < small_mesh_cell_pairings.size; j++) { INDEX_TYPE small_mesh_id = small_mesh_cell_pairings.sm_ids_in_lstar[j]; INDEX_TYPE small_mesh_id_pair = small_mesh_cell_pairings.cell_pairings[small_mesh_id].pair; INDEX_TYPE big_mesh_id = mc.small_mesh_id_to_big_mesh_id[small_mesh_id]; INDEX_TYPE big_mesh_id_pair = mc.small_mesh_id_to_big_mesh_id[small_mesh_id_pair]; // NOW GO BACK TO GLOBAL? if (big_mesh_id == big_mesh_id_pair) { mGrad->setCritical(big_mesh_id, true); mGrad->setAssigned(big_mesh_id, 1); #ifdef DEBUGPARALLEL2 #pragma omp critical { db_counter[big_mesh_id]++; } #endif } else { // SANITY CHECKS #ifdef SANITY_CHECKS // check small mesh sanity if (mc.small_mesh_maxmin_labeling->Cell2HighestVertex(small_mesh_id) != mc.small_mesh_maxmin_labeling->Cell2HighestVertex(small_mesh_id_pair)) { printf("whoathere\n"); } if (mMaxVL->Cell2HighestVertex(big_mesh_id) != mMaxVL->Cell2HighestVertex(big_mesh_id_pair)) { printf("whoathere\n"); } #endif // END SANITY CHECKS mGrad->setPair(big_mesh_id, big_mesh_id_pair); mGrad->setPair(big_mesh_id_pair, big_mesh_id); mGrad->setAssigned(big_mesh_id, 1); mGrad->setAssigned(big_mesh_id_pair, 1); #ifdef DEBUGPARALLEL2 #pragma omp critical { db_counter[big_mesh_id]++; db_counter[big_mesh_id_pair]++; } #endif } } } void ComputePairing() { std::chrono::steady_clock::time_point now_time = std::chrono::steady_clock::now(); std::chrono::steady_clock::time_point start_time = std::chrono::steady_clock::now(); // dimensions of the mesh Vec3l big_mesh_xyz = mMesh->XYZ(); //int lstars_count = 0; #ifdef DEBUGPARALLEL db_counter = new int[mMesh->numCells()]; memset(db_counter, 0, sizeof(int) * mMesh->numCells()); #endif // START PARALLEL WORK #pragma omp parallel { std::vector<INDEX_TYPE> topo_index_partition; int num_threads; num_threads = omp_get_num_threads(); ArrayIndexPartitioner::EvenChunkSplit(mMesh->numCells(), num_threads, topo_index_partition); int thread_num = omp_get_thread_num(); // these coordinates are INCLUSIVE - which means do start and end INDEX_TYPE thread_start_id = topo_index_partition[thread_num]; INDEX_TYPE thread_end_id = topo_index_partition[thread_num + 1] - 1; Vec3l start_coord, end_coord; mMesh->cellid2Coords(thread_start_id, start_coord); mMesh->cellid2Coords(thread_end_id, end_coord); // get inclusive coord //#pragma omp critical // { // printf("thread %d doing:\n\t", thread_num); // start_coord.PrintInt(); printf("\t"); // end_coord.PrintInt(); // } // iterate over all vertices MESH_CONTEXT mc; // gather the pointers rather than have to pass a million items // only need maxvl labeling and function values // and maybe reslabel mc.small_mesh = new Explicit3x3x3SmallRegularGrid(); mc.small_grid = new RegularGrid3D(Vec3l(3, 3, 3), Vec3b(0, 0, 0)); mc.small_grid_func = new RegularGridTrilinearFunction(mc.small_grid, mc.small_grid_values); // wrapper for our values // place to store our local copy of the max/min vertices for each cell mc.small_mesh_maxmin_labeling = new RegularGridMaxMinVertexLabeling3D<Explicit3x3x3SmallRegularGrid, RegularGridTrilinearFunction>(mc.small_mesh, mc.small_grid_func); mc.small_mesh_maxmin_labeling->HACK_init(); const INDEX_TYPE kernel_baseid = mc.small_mesh->coords2Cellid(Vec3l(2, 2, 2)); const INDEX_TYPE kernel_data_baseid = mc.small_grid->Index3d(Vec3l(1, 1, 1)); int kstart = start_coord[2]; if (kstart % 2 == 1) kstart--; // kstart cannot start on an odd number, if it is odd, start on prior? if (kstart == 0) kstart = 2; int kend = end_coord[2]; if (kend == big_mesh_xyz[2] - 1) kend = big_mesh_xyz[2] - 2; // NOW DO ALL INTERIOR VERTICES #ifdef DEBUGPARALLEL #pragma omp critical { printf("thread %d doing actual k: [%d:%d]\n", thread_num, kstart, kend); } #endif for (int k = kstart; k <= kend; k += 2) { // do parallel division of work const int d_k = k >> 1; // data k int jstart = 2; int jend = big_mesh_xyz[1] - 1; //if (k == kstart) { // jstart = start_coord[1]; //} //if (k == kend) { // jend = end_coord[1] - 2; //} for (int j = jstart; j < jend; j += 2) { const int d_j = j >> 1; // data j const INDEX_TYPE baseid_nox = mMesh->coords2Cellid(Vec3l(0, j, k)); const INDEX_TYPE data_baseid_nox = mGrid->Index3d(Vec3l(0, d_j, d_k)); int istart = 2; int iend = big_mesh_xyz[0] - 1; //if (k == kstart && j == start_coord[1]) { // istart = start_coord[0]; //} //if (k == kend && j == end_coord[1]) { // iend = end_coord[0] - 2; //} for (int i = istart; i < iend; i += 2) { const int d_i = i >> 1; // data i const INDEX_TYPE baseid = baseid_nox + i; if (baseid < thread_start_id || baseid > thread_end_id) continue; const INDEX_TYPE data_baseid = data_baseid_nox + d_i; #ifdef DEBUGPARALLEL #pragma omp critical { printf("thread %d doing actual %d,%d,%d\n", thread_num, i,j,k); } #endif #ifdef SANITY_CHECKS this->mStandardRobins->ComputeLowerStar(baseid); INDEX_TYPE pre_pair = mGrad->getPair(baseid); INDEX_TYPE pre_ppair = mGrad->getPair(pre_pair); BYTE_TYPE GRADS[27]; #endif // so for each vertex FIRST copy in the values // we can optimize this later to do less global lookups for (int pos = 0; pos < 27; pos++) { //int sd_nid = mc.small_mesh->get27NeighborOffset(pos) + kernel_baseid; INDEX_TYPE big_mesh_vertex_id = mMesh->get27NeighborOffset(pos) + baseid; INDEX_TYPE big_grid_vertex_data_id = m_data_27_offsets[pos] + data_baseid; INDEX_TYPE kernel_vertex_id = kernel_baseid + mc.small_mesh->get27NeighborOffset(pos); //INDEX_TYPE kernel_data_nid = kernel_data_baseid + m_data_27_offsets[pos]; // this should just = pos?? #ifdef SANITY_CHECKS if (this->mMaxVL->Cell2HighestVertex(big_mesh_vertex_id) == baseid) { GRADS[pos] = mGrad->getAsChar(big_mesh_vertex_id); mGrad->clearGrad(big_mesh_vertex_id); } #endif mc.small_mesh_id_to_big_mesh_id[kernel_vertex_id] = big_mesh_vertex_id; mc.small_mesh_maxmin_labeling->SetUncompressedMaxVal(kernel_vertex_id, this->mMaxVL->GetUncompressedMaxVal(big_mesh_vertex_id)); mc.small_mesh_maxmin_labeling->SetUncompressedMinVal(kernel_vertex_id, this->mMaxVL->GetUncompressedMinVal(big_mesh_vertex_id)); mc.small_grid_values[pos] = this->mFunc->SampleImage(big_grid_vertex_data_id); } ComputeLowerStar(mc, kernel_baseid); #ifdef SANITY_CHECKS for (int pos = 0; pos < 27; pos++) { //int sd_nid = mc.small_mesh->get27NeighborOffset(pos) + kernel_baseid; INDEX_TYPE big_mesh_vertex_id = mMesh->get27NeighborOffset(pos) + baseid; //INDEX_TYPE kernel_data_nid = kernel_data_baseid + m_data_27_offsets[pos]; // this should just = pos?? if (this->mMaxVL->Cell2HighestVertex(big_mesh_vertex_id) == baseid) { BYTE_TYPE comp = mGrad->getAsChar(big_mesh_vertex_id); if (comp != GRADS[pos]) { printf("Error %d != %d\n", comp, GRADS[pos]); } } } #endif #ifdef SANITY_CHECKS INDEX_TYPE post_pair = mGrad->getPair(baseid); lstars_count++; #endif } } } } printf("INTERIOR: new robins1 in %dms\n", std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - now_time).count()); now_time = std::chrono::steady_clock::now(); //lstars_count = 0; // DO Z Plane Boundaries #pragma omp parallel for for (int j = 0; j < big_mesh_xyz[1]; j += 2) { for (auto k : std::vector<INDEX_TYPE>({ 0, big_mesh_xyz[2] - 1 })) { // do parallel division of work for (int i = 0; i < big_mesh_xyz[0]; i += 2) { const INDEX_TYPE baseid = mMesh->coords2Cellid(Vec3l(i, j, k)); this->mStandardRobins->ComputeLowerStar(baseid); //lstars_count++; } } } //printf("did %d Z boundaries\n", lstars_count); //int tmp = lstars_count; //lstars_count = 0; // DO Y Plane Boundaries #pragma omp parallel for for (int k = 2; k < big_mesh_xyz[2] - 2; k += 2) { // smaller range since we did k = 0 and k = xyz[2]-1 for (auto j : std::vector<INDEX_TYPE>({ 0, big_mesh_xyz[1] - 1 })) { for (int i = 0; i < big_mesh_xyz[0]; i += 2) { const INDEX_TYPE baseid = mMesh->coords2Cellid(Vec3l(i, j, k)); this->mStandardRobins->ComputeLowerStar(baseid); //lstars_count++; } } } //printf("did %d Y boundaries\n", lstars_count); //int tmp2 = lstars_count; //lstars_count = 0; // DO X Plane Boundaries #pragma omp parallel for for (int k = 2; k < big_mesh_xyz[2] - 2; k += 2) { // smaller range since we did k = 0 and k = xyz[2]-1 for (int j = 2; j < big_mesh_xyz[1] - 2; j += 2) { // again smaller range for (auto i : std::vector<INDEX_TYPE>({ 0, big_mesh_xyz[0] - 1 })) { const INDEX_TYPE baseid = mMesh->coords2Cellid(Vec3l(i, j, k)); this->mStandardRobins->ComputeLowerStar(baseid); //lstars_count++; } } } //printf("did %d X boundaries\n", lstars_count); printf("BOUNDARY: new robins1 in %dms\n", std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - now_time).count()); now_time = std::chrono::steady_clock::now(); //printf("new robins1 %d lower stars in %dms\n", lstars_count, std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - now_time).count()); //now_time = std::chrono::steady_clock::now(); //lstars_count = 0; //for (int k = 2; k < big_mesh_xyz[2] - 3; k += 2) { // do parallel division of work // for (int j = 2; j < big_mesh_xyz[1] - 3; j += 2) { // for (int i = 2; i < big_mesh_xyz[0] - 3; i += 2) { // const INDEX_TYPE baseid = mMesh->coords2Cellid(Vec3l(i, j, k)); // INDEX_TYPE pre_pair = mGrad->getPair(baseid); // this->mStandardRobins->ComputeLowerStar(baseid); // lstars_count++; // INDEX_TYPE post_pair = mGrad->getPair(baseid); // if (pre_pair != post_pair) { // printf("asdasdf\n"); // } // } // } //} //printf("old robins1 %d lower stars in %dms\n", lstars_count, std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - now_time).count()); #ifdef DEBUGPARALLEL FILE* fout = fopen("test_out.raw", "wb"); fwrite(db_counter, sizeof(int), mMesh->numCells(), fout); fclose(fout); //printf("doing seen checks!\n"); //for (INDEX_TYPE i = 0; i < mMesh->numCells(); i++) { // // if (mMesh->boundaryValue(i) == 0 && db_counter[i] != 1) { // printf("index %lld seen %d times: ", i, db_counter[i]); // Vec3l c; // mMesh->cellid2Coords(i, c); // c.PrintInt(); // } //} printf("done seen checks\n"); #endif } // // std::vector<INDEX_TYPE> topo_index_partition; // int num_threads; //#pragma omp parallel // { //#pragma omp single // { // num_threads = omp_get_num_threads(); // ArrayIndexPartitioner::EvenChunkSplit(mMesh->numCells(), num_threads, topo_index_partition); // } // // int thread_num = omp_get_thread_num(); // // // iterate over all vertices // typename MeshType::DCellsIterator verts(mMesh, 0, topo_index_partition[thread_num], topo_index_partition[thread_num + 1]); // for (verts.begin(); verts.valid(); verts.advance()){ // INDEX_TYPE small_mesh_vertex_id = verts.value(); // // ComputeLowerStar(small_mesh_vertex_id); // // // } // } // } // //DenseLabeling<INDEX_TYPE>* GetLabeling() { return mPairs; } void ComputePairing_sliding() { std::chrono::steady_clock::time_point now_time = std::chrono::steady_clock::now(); std::chrono::steady_clock::time_point start_time = std::chrono::steady_clock::now(); // dimensions of the mesh Vec3l big_mesh_xyz = mMesh->XYZ(); //int lstars_count = 0; #ifdef DEBUGPARALLEL db_counter = new int[mMesh->numCells()]; memset(db_counter, 0, sizeof(int) * mMesh->numCells()); #endif // START PARALLEL WORK #pragma omp parallel { std::vector<INDEX_TYPE> topo_index_partition; int num_threads; num_threads = omp_get_num_threads(); ArrayIndexPartitioner::EvenChunkSplit(mMesh->numCells(), num_threads, topo_index_partition); int thread_num = omp_get_thread_num(); // these coordinates are INCLUSIVE - which means do start and end INDEX_TYPE thread_start_id = topo_index_partition[thread_num]; INDEX_TYPE thread_end_id = topo_index_partition[thread_num + 1] - 1; Vec3l start_coord, end_coord; mMesh->cellid2Coords(thread_start_id, start_coord); mMesh->cellid2Coords(thread_end_id, end_coord); // get inclusive coord //#pragma omp critical // { // printf("thread %d doing:\n\t", thread_num); // start_coord.PrintInt(); printf("\t"); // end_coord.PrintInt(); // } // iterate over all vertices MESH_CONTEXT mc; // gather the pointers rather than have to pass a million items // only need maxvl labeling and function values // and maybe reslabel mc.small_mesh = new Explicit3x3x3SmallRegularGrid(); mc.small_grid = new RegularGrid3D(Vec3l(3, 3, 3), Vec3b(0, 0, 0)); mc.small_grid_func = new RegularGridTrilinearFunction(mc.small_grid, mc.small_grid_values); // wrapper for our values // place to store our local copy of the max/min vertices for each cell mc.small_mesh_maxmin_labeling = new RegularGridMaxMinVertexLabeling3D<Explicit3x3x3SmallRegularGrid, RegularGridTrilinearFunction>(mc.small_mesh, mc.small_grid_func); mc.small_mesh_maxmin_labeling->HACK_init(); const INDEX_TYPE kernel_baseid = mc.small_mesh->coords2Cellid(Vec3l(2, 2, 2)); const INDEX_TYPE kernel_data_baseid = mc.small_grid->Index3d(Vec3l(1, 1, 1)); int kstart = start_coord[2]; if (kstart % 2 == 1) kstart--; // kstart cannot start on an odd number, if it is odd, start on prior? if (kstart == 0) kstart = 2; int kend = end_coord[2]; if (kend == big_mesh_xyz[2] - 1) kend = big_mesh_xyz[2] - 2; // NOW DO ALL INTERIOR VERTICES #ifdef DEBUGPARALLEL #pragma omp critical { printf("thread %d doing actual k: [%d:%d]\n", thread_num, kstart, kend); } #endif for (int k = kstart; k <= kend; k += 2) { // do parallel division of work const int d_k = k >> 1; // data k int jstart = 2; int jend = big_mesh_xyz[1] - 1; //if (k == kstart) { // jstart = start_coord[1]; //} //if (k == kend) { // jend = end_coord[1] - 2; //} for (int j = jstart; j < jend; j += 2) { const int d_j = j >> 1; // data j const INDEX_TYPE baseid_nox = mMesh->coords2Cellid(Vec3l(0, j, k)); const INDEX_TYPE data_baseid_nox = mGrid->Index3d(Vec3l(0, d_j, d_k)); int istart = 2; int iend = big_mesh_xyz[0] - 1; //if (k == kstart && j == start_coord[1]) { // istart = start_coord[0]; //} //if (k == kend && j == end_coord[1]) { // iend = end_coord[0] - 2; //} if (istart >= iend) continue; // DO FIRST WINDOW - COPY ALL ELEMENTS int i = istart; const int d_i_0 = i >> 1; // data i const INDEX_TYPE baseid_0 = baseid_nox + i; if (!(baseid_0 < thread_start_id || baseid_0 > thread_end_id)) { const INDEX_TYPE data_baseid_0 = data_baseid_nox + d_i_0; // so for each vertex FIRST copy in the values // we can optimize this later to do less global lookups for (int pos = 0; pos < 27; pos++) { //int sd_nid = mc.small_mesh->get27NeighborOffset(pos) + kernel_baseid; INDEX_TYPE big_mesh_vertex_id = mMesh->get27NeighborOffset(pos) + baseid_0; INDEX_TYPE big_grid_vertex_data_id = m_data_27_offsets[pos] + data_baseid_0; INDEX_TYPE kernel_vertex_id = kernel_baseid + mc.small_mesh->get27NeighborOffset(pos); //INDEX_TYPE kernel_data_nid = kernel_data_baseid + m_data_27_offsets[pos]; // this should just = pos?? mc.small_mesh_id_to_big_mesh_id[kernel_vertex_id] = big_mesh_vertex_id; mc.small_mesh_maxmin_labeling->SetUncompressedMaxVal(kernel_vertex_id, this->mMaxVL->GetUncompressedMaxVal(big_mesh_vertex_id)); mc.small_mesh_maxmin_labeling->SetUncompressedMinVal(kernel_vertex_id, this->mMaxVL->GetUncompressedMinVal(big_mesh_vertex_id)); mc.small_grid_values[pos] = this->mFunc->SampleImage(big_grid_vertex_data_id); } ComputeLowerStar(mc, kernel_baseid); } istart += 2; for (i = istart; i < iend; i += 2) { const int d_i = i >> 1; // data i const INDEX_TYPE baseid = baseid_nox + i; if (baseid < thread_start_id || baseid > thread_end_id) continue; const INDEX_TYPE data_baseid = data_baseid_nox + d_i; for (int pos = 0; pos < 27; pos += 3) { //int sd_nid = mc.small_mesh->get27NeighborOffset(pos) + kernel_baseid; //INDEX_TYPE kernel_data_nid = kernel_data_baseid + m_data_27_offsets[pos]; // this should just = pos?? INDEX_TYPE kernel_vertex_id_0 = kernel_baseid + mc.small_mesh->get27NeighborOffset(pos); INDEX_TYPE kernel_vertex_id_next = kernel_vertex_id_0 + 2; mc.small_mesh_id_to_big_mesh_id[kernel_vertex_id_0] = mc.small_mesh_id_to_big_mesh_id[kernel_vertex_id_next]; mc.small_mesh_maxmin_labeling->SetUncompressedMaxVal(kernel_vertex_id_0, mc.small_mesh_maxmin_labeling->GetUncompressedMaxVal(kernel_vertex_id_next)); mc.small_mesh_maxmin_labeling->SetUncompressedMinVal(kernel_vertex_id_0, mc.small_mesh_maxmin_labeling->GetUncompressedMinVal(kernel_vertex_id_next)); INDEX_TYPE kernel_vertex_id_1 = kernel_baseid + mc.small_mesh->get27NeighborOffset(pos + 1); INDEX_TYPE big_mesh_vertex_id_1 = mMesh->get27NeighborOffset(pos + 1) + baseid; mc.small_mesh_id_to_big_mesh_id[kernel_vertex_id_1] = big_mesh_vertex_id_1; mc.small_mesh_maxmin_labeling->SetUncompressedMaxVal(kernel_vertex_id_1, this->mMaxVL->GetUncompressedMaxVal(big_mesh_vertex_id_1)); mc.small_mesh_maxmin_labeling->SetUncompressedMinVal(kernel_vertex_id_1, this->mMaxVL->GetUncompressedMinVal(big_mesh_vertex_id_1)); INDEX_TYPE kernel_vertex_id_2 = kernel_baseid + mc.small_mesh->get27NeighborOffset(pos + 2); INDEX_TYPE big_mesh_vertex_id_2 = mMesh->get27NeighborOffset(pos + 2) + baseid; mc.small_mesh_id_to_big_mesh_id[kernel_vertex_id_2] = big_mesh_vertex_id_2; mc.small_mesh_maxmin_labeling->SetUncompressedMaxVal(kernel_vertex_id_2, this->mMaxVL->GetUncompressedMaxVal(big_mesh_vertex_id_2)); mc.small_mesh_maxmin_labeling->SetUncompressedMinVal(kernel_vertex_id_2, this->mMaxVL->GetUncompressedMinVal(big_mesh_vertex_id_2)); } mc.small_grid_values[0] = mc.small_grid_values[1]; mc.small_grid_values[1] = mc.small_grid_values[2]; mc.small_grid_values[2] = this->mFunc->SampleImage(m_data_27_offsets[2] + data_baseid); mc.small_grid_values[3] = mc.small_grid_values[4]; mc.small_grid_values[4] = mc.small_grid_values[5]; mc.small_grid_values[5] = this->mFunc->SampleImage(m_data_27_offsets[5] + data_baseid); mc.small_grid_values[6] = mc.small_grid_values[7]; mc.small_grid_values[7] = mc.small_grid_values[8]; mc.small_grid_values[8] = this->mFunc->SampleImage(m_data_27_offsets[8] + data_baseid); mc.small_grid_values[9] = mc.small_grid_values[10]; mc.small_grid_values[10] = mc.small_grid_values[11]; mc.small_grid_values[11] = this->mFunc->SampleImage(m_data_27_offsets[11] + data_baseid); mc.small_grid_values[12] = mc.small_grid_values[13]; mc.small_grid_values[13] = mc.small_grid_values[14]; mc.small_grid_values[14] = this->mFunc->SampleImage(m_data_27_offsets[14] + data_baseid); mc.small_grid_values[15] = mc.small_grid_values[16]; mc.small_grid_values[16] = mc.small_grid_values[17]; mc.small_grid_values[17] = this->mFunc->SampleImage(m_data_27_offsets[17] + data_baseid); mc.small_grid_values[18] = mc.small_grid_values[19]; mc.small_grid_values[19] = mc.small_grid_values[20]; mc.small_grid_values[20] = this->mFunc->SampleImage(m_data_27_offsets[20] + data_baseid); mc.small_grid_values[21] = mc.small_grid_values[22]; mc.small_grid_values[22] = mc.small_grid_values[23]; mc.small_grid_values[23] = this->mFunc->SampleImage(m_data_27_offsets[23] + data_baseid); mc.small_grid_values[24] = mc.small_grid_values[25]; mc.small_grid_values[25] = mc.small_grid_values[26]; mc.small_grid_values[26] = this->mFunc->SampleImage(m_data_27_offsets[26] + data_baseid); ComputeLowerStar(mc, kernel_baseid); } } } } printf("INTERIOR: new robins1 in %dms\n", std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - now_time).count()); now_time = std::chrono::steady_clock::now(); //lstars_count = 0; // DO Z Plane Boundaries #pragma omp parallel for for (int j = 0; j < big_mesh_xyz[1]; j += 2) { for (auto k : std::vector<INDEX_TYPE>({ 0, big_mesh_xyz[2] - 1 })) { // do parallel division of work for (int i = 0; i < big_mesh_xyz[0]; i += 2) { const INDEX_TYPE baseid = mMesh->coords2Cellid(Vec3l(i, j, k)); this->mStandardRobins->ComputeLowerStar(baseid); //lstars_count++; } } } //printf("did %d Z boundaries\n", lstars_count); //int tmp = lstars_count; //lstars_count = 0; // DO Y Plane Boundaries #pragma omp parallel for for (int k = 2; k < big_mesh_xyz[2] - 2; k += 2) { // smaller range since we did k = 0 and k = xyz[2]-1 for (auto j : std::vector<INDEX_TYPE>({ 0, big_mesh_xyz[1] - 1 })) { for (int i = 0; i < big_mesh_xyz[0]; i += 2) { const INDEX_TYPE baseid = mMesh->coords2Cellid(Vec3l(i, j, k)); this->mStandardRobins->ComputeLowerStar(baseid); //lstars_count++; } } } //printf("did %d Y boundaries\n", lstars_count); //int tmp2 = lstars_count; //lstars_count = 0; // DO X Plane Boundaries #pragma omp parallel for for (int k = 2; k < big_mesh_xyz[2] - 2; k += 2) { // smaller range since we did k = 0 and k = xyz[2]-1 for (int j = 2; j < big_mesh_xyz[1] - 2; j += 2) { // again smaller range for (auto i : std::vector<INDEX_TYPE>({ 0, big_mesh_xyz[0] - 1 })) { const INDEX_TYPE baseid = mMesh->coords2Cellid(Vec3l(i, j, k)); this->mStandardRobins->ComputeLowerStar(baseid); //lstars_count++; } } } //printf("did %d X boundaries\n", lstars_count); printf("BOUNDARY: new robins1 in %dms\n", std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - now_time).count()); now_time = std::chrono::steady_clock::now(); //printf("new robins1 %d lower stars in %dms\n", lstars_count, std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - now_time).count()); //now_time = std::chrono::steady_clock::now(); //lstars_count = 0; //for (int k = 2; k < big_mesh_xyz[2] - 3; k += 2) { // do parallel division of work // for (int j = 2; j < big_mesh_xyz[1] - 3; j += 2) { // for (int i = 2; i < big_mesh_xyz[0] - 3; i += 2) { // const INDEX_TYPE baseid = mMesh->coords2Cellid(Vec3l(i, j, k)); // INDEX_TYPE pre_pair = mGrad->getPair(baseid); // this->mStandardRobins->ComputeLowerStar(baseid); // lstars_count++; // INDEX_TYPE post_pair = mGrad->getPair(baseid); // if (pre_pair != post_pair) { // printf("asdasdf\n"); // } // } // } //} //printf("old robins1 %d lower stars in %dms\n", lstars_count, std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::steady_clock::now() - now_time).count()); #ifdef DEBUGPARALLEL FILE* fout = fopen("test_out.raw", "wb"); fwrite(db_counter, sizeof(int), mMesh->numCells(), fout); fclose(fout); //printf("doing seen checks!\n"); //for (INDEX_TYPE i = 0; i < mMesh->numCells(); i++) { // // if (mMesh->boundaryValue(i) == 0 && db_counter[i] != 1) { // printf("index %lld seen %d times: ", i, db_counter[i]); // Vec3l c; // mMesh->cellid2Coords(i, c); // c.PrintInt(); // } //} printf("done seen checks\n"); #endif } }; } #endif
utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file utils.h * \brief Basic utilility functions. */ #ifndef MXNET_COMMON_UTILS_H_ #define MXNET_COMMON_UTILS_H_ #include <dmlc/logging.h> #include <dmlc/omp.h> #include <nnvm/graph.h> #include <mxnet/engine.h> #include <mxnet/ndarray.h> #include <mxnet/op_attr_types.h> #include <mxnet/graph_attr_types.h> #include <nnvm/graph_attr_types.h> #include <memory> #include <vector> #include <type_traits> #include <utility> #include <random> #include <string> #include <thread> #include <algorithm> #include <functional> #include <limits> #include "../operator/mxnet_op.h" #if MXNET_USE_MKLDNN == 1 #include "../operator/nn/mkldnn/mkldnn_base-inl.h" #endif #if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__) #include <windows.h> #else #include <unistd.h> #endif namespace mxnet { namespace common { #if defined(_WIN32) || defined(_WIN64) || defined(__WINDOWS__) inline size_t current_process_id() { return ::GetCurrentProcessId(); } #else inline size_t current_process_id() { return getpid(); } #endif /*! * \brief IndPtr should be non-negative, in non-decreasing order, start with 0 * and end with value equal with size of indices. */ struct csr_indptr_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr, const nnvm::dim_t end, const nnvm::dim_t idx_size) { if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] || (i == 0 && indptr[i] != 0) || (i == end - 1 && indptr[end] != idx_size)) *out = kCSRIndPtrErr; } }; /*! * \brief Indices should be non-negative, less than the number of columns * and in ascending order per row. */ struct csr_idx_check { template<typename DType, typename IType, typename RType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const RType* indptr, const nnvm::dim_t ncols) { for (RType j = indptr[i]; j < indptr[i+1]; j++) { if (idx[j] >= ncols || idx[j] < 0 || (j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) { *out = kCSRIdxErr; break; } } } }; /*! * \brief Indices of RSPNDArray should be non-negative, * less than the size of first dimension and in ascending order */ struct rsp_idx_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const nnvm::dim_t end, const nnvm::dim_t nrows) { if ((i < end && idx[i+1] <= idx[i]) || idx[i] < 0 || idx[i] >= nrows) *out = kRSPIdxErr; } }; template<typename xpu> void CheckFormatWrapper(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check); /*! * \brief Check the validity of CSRNDArray. * \param rctx Execution context. * \param input Input NDArray of CSRStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kCSRStorage) << "CheckFormatCSRImpl is for CSRNDArray"; const mxnet::TShape shape = input.shape(); const mxnet::TShape idx_shape = input.aux_shape(csr::kIdx); const mxnet::TShape indptr_shape = input.aux_shape(csr::kIndPtr); const mxnet::TShape storage_shape = input.storage_shape(); if ((shape.ndim() != 2) || (idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) || (indptr_shape[0] != shape[0] + 1) || (idx_shape[0] != storage_shape[0])) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kCSRShapeErr; }); return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), indptr_shape[0] - 1, idx_shape[0]); // no need to check indices if indices are empty if (idx_shape[0] != 0) { Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIdx).dptr<IType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]); } mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); }); } } /*! * \brief Check the validity of RowSparseNDArray. * \param rctx Execution context. * \param input Input NDArray of RowSparseStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kRowSparseStorage) << "CheckFormatRSPImpl is for RSPNDArray"; const mxnet::TShape idx_shape = input.aux_shape(rowsparse::kIdx); if (idx_shape[0] != input.storage_shape()[0]) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kRSPShapeErr; }); return; } if (idx_shape[0] == 0) { return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0], val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(), idx_shape[0] - 1, input.shape()[0]); mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); } } template<typename xpu> void CheckFormatImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { int stype = input.storage_type(); if (stype == kCSRStorage) { CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kRowSparseStorage) { CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kDefaultStorage) { // no-op for default storage } else { LOG(FATAL) << "Unknown storage type " << stype; } } /*! \brief Pick rows specified by user input index array from a row sparse ndarray * and save them in the output sparse ndarray. */ template<typename xpu> void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s, const NDArray& input_nd, const TBlob& idx_data, const OpReqType req, NDArray* output_nd); /* \brief Casts tensor storage type to the new type. */ template<typename xpu> void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output); /*! \brief returns true if all storage types in `vstorage` are the same as target `stype`. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype) { if (!vstorage.empty()) { for (const auto& i : vstorage) { if (i != stype) return false; } return true; } return false; } /*! \brief returns true if all storage types in `vstorage` are the same as target `stype1` * or `stype2'. Sets boolean if both found. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!vstorage.empty()) { uint8_t has = 0; for (const auto i : vstorage) { if (i == stype1) { has |= 1; } else if (i == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as target `stype`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() != stype) { return false; } } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as targets `stype1` or `stype2`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!ndarrays.empty()) { uint8_t has = 0; for (const auto& nd : ndarrays) { const NDArrayStorageType stype = nd.storage_type(); if (stype == stype1) { has |= 1; } else if (stype == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if storage type of any array in `ndarrays` * is the same as the target `stype`. false is returned for empty inputs. */ inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() == stype) { return true; } } } return false; } /*! \brief returns true if any storage type `ndstype` in `ndstypes` * is the same as the target `stype`. false is returned for empty inputs. */ inline bool ContainsStorageType(const std::vector<int>& ndstypes, const NDArrayStorageType stype) { if (!ndstypes.empty()) { for (const auto& ndstype : ndstypes) { if (ndstype == stype) { return true; } } } return false; } /*! \brief get string representation of dispatch_mode */ inline std::string dispatch_mode_string(const DispatchMode x) { switch (x) { case DispatchMode::kFCompute: return "fcompute"; case DispatchMode::kFComputeEx: return "fcompute_ex"; case DispatchMode::kFComputeFallback: return "fcompute_fallback"; case DispatchMode::kVariable: return "variable"; case DispatchMode::kUndefined: return "undefined"; } return "unknown"; } /*! \brief get string representation of storage_type */ inline std::string stype_string(const int x) { switch (x) { case kDefaultStorage: return "default"; case kCSRStorage: return "csr"; case kRowSparseStorage: return "row_sparse"; } return "unknown"; } /*! \brief get string representation of device type */ inline std::string dev_type_string(const int dev_type) { switch (dev_type) { case Context::kCPU: return "cpu"; case Context::kGPU: return "gpu"; case Context::kCPUPinned: return "cpu_pinned"; case Context::kCPUShared: return "cpu_shared"; } return "unknown"; } /*! \brief get string representation of the operator stypes */ inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>& in_attrs, const std::vector<int>& out_attrs) { std::ostringstream os; os << "operator = " << attrs.op->name << "\ninput storage types = ["; for (const int attr : in_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "output storage types = ["; for (const int attr : out_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "params = {"; for (auto kv : attrs.dict) { os << "\"" << kv.first << "\" : " << kv.second << ", "; } os << "}\n" << "context.dev_mask = " << dev_type_string(dev_mask); return os.str(); } /*! \brief get string representation of the operator */ inline std::string operator_string(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { std::string result = ""; std::vector<int> in_stypes; std::vector<int> out_stypes; in_stypes.reserve(inputs.size()); out_stypes.reserve(outputs.size()); auto xform = [](const NDArray arr) -> int { return arr.storage_type(); }; std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform); std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform); result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes); return result; } /*! \brief log message once. Intended for storage fallback warning messages. */ inline void LogOnce(const std::string& message) { typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore; auto log_store = LogStore::Get(); if (log_store->find(message) == log_store->end()) { LOG(INFO) << message; log_store->insert(message); } } /*! \brief log storage fallback event */ inline void LogStorageFallback(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>* in_attrs, const std::vector<int>* out_attrs) { static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true); if (!log) return; const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs); std::ostringstream os; const char* warning = "\nThe operator with default storage type will be dispatched " "for execution. You're seeing this warning message because the operator above is unable " "to process the given ndarrays with specified storage types, context and parameter. " "Temporary dense ndarrays are generated in order to execute the operator. " "This does not affect the correctness of the programme. " "You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to " "0 to suppress this warning."; os << "\nStorage type fallback detected:\n" << op_str << warning; LogOnce(os.str()); #if MXNET_USE_MKLDNN == 1 if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. " "You can re-enable by setting MXNET_MKLDNN_ENABLED=1"); if (GetMKLDNNCacheSize() != -1) common::LogOnce("MXNET_MKLDNN_CACHE_NUM is set." "Should only be set if " "your model has variable input shapes, " "as cache size may grow unbounded"); #endif } // heuristic to dermine number of threads per GPU inline int GetNumThreadsPerGPU() { // This is resource efficient option. return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2); } // heuristic to get number of matching colors. // this decides how much parallelism we can get in each GPU. inline int GetExecNumMatchColor() { // This is resource efficient option. int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1); return std::min(num_match_color, GetNumThreadsPerGPU()); } template<typename T, typename V> V ParallelAccumulate(const T* a, const int n, V start) { V sum = start; #pragma omp parallel for reduction(+:sum) for (int i = 0; i < n; ++i) { sum += a[i]; } return sum; } /*! * \brief * Helper function for ParallelSort. * DO NOT call this function directly. * Use the interface ParallelSort instead. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSortHelper(RandomIt first, size_t len, size_t grainsize, const Compare& comp) { if (len < grainsize) { std::sort(first, first+len, comp); } else { std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp); ParallelSortHelper(first+len/2, len - len/2, grainsize, comp); thr.join(); std::inplace_merge(first, first+len/2, first+len, comp); } } /*! * \brief * Sort the elements in the range [first, last) into the ascending order defined by * the comparator comp. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) { const auto num = std::distance(first, last); size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16)); ParallelSortHelper(first, num, grainsize, comp); } /*! * \brief * Sort the elements in the range [first, last) into ascending order. * The elements are compared using the default < operator. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) { ParallelSort(first, last, num_threads, std::less<typename std::iterator_traits<RandomIt>::value_type>()); } /*! * \brief Random Engine */ typedef std::mt19937 RANDOM_ENGINE; /*! * \brief Helper functions. */ namespace helper { /*! * \brief Helper for non-array type `T`. */ template <class T> struct UniqueIf { /*! * \brief Type of `T`. */ using SingleObject = std::unique_ptr<T>; }; /*! * \brief Helper for an array of unknown bound `T`. */ template <class T> struct UniqueIf<T[]> { /*! * \brief Type of `T`. */ using UnknownBound = std::unique_ptr<T[]>; }; /*! * \brief Helper for an array of known bound `T`. */ template <class T, size_t kSize> struct UniqueIf<T[kSize]> { /*! * \brief Type of `T`. */ using KnownBound = void; }; } // namespace helper /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs a non-array type `T`. The arguments `args` are passed to the * constructor of `T`. The function does not participate in the overload * resolution if `T` is an array type. */ template <class T, class... Args> typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) { return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param n The size of the array to construct. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs an array of unknown bound `T`. The function does not participate * in the overload resolution unless `T` is an array of unknown bound. */ template <class T> typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) { using U = typename std::remove_extent<T>::type; return std::unique_ptr<T>(new U[n]{}); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * * Constructs an arrays of known bound is disallowed. */ template <class T, class... Args> typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete; template<typename FCompType> FCompType GetFCompute(const nnvm::Op* op, const std::string& name, const Context& ctx) { static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>"); static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>"); if (ctx.dev_mask() == cpu::kDevMask) { return fcompute_cpu.get(op, nullptr); } else if (ctx.dev_mask() == gpu::kDevMask) { return fcompute_gpu.get(op, nullptr); } else { LOG(FATAL) << "Unknown device mask " << ctx.dev_mask(); return nullptr; } } /*! * \brief Return the max integer value representable in the type `T` without loss of precision. */ template <typename T> constexpr size_t MaxIntegerValue() { return std::is_integral<T>::value ? std::numeric_limits<T>::max(): size_t(2) << (std::numeric_limits<T>::digits - 1); } template <> constexpr size_t MaxIntegerValue<mshadow::half::half_t>() { return size_t(2) << 10; } MSHADOW_XINLINE int ilog2ul(size_t a) { int k = 1; while (a >>= 1) ++k; return k; } MSHADOW_XINLINE int ilog2ui(unsigned int a) { int k = 1; while (a >>= 1) ++k; return k; } /*! * \brief Return an NDArray of all zeros. */ inline NDArray InitZeros(const NDArrayStorageType stype, const mxnet::TShape &shape, const Context &ctx, const int dtype) { // NDArray with default storage if (stype == kDefaultStorage) { NDArray ret(shape, ctx, false, dtype); ret = 0; return ret; } // NDArray with non-default storage. Storage allocation is always delayed. return NDArray(stype, shape, ctx, true, dtype); } /*! * \brief Helper to add a NDArray of zeros to a std::vector. */ inline void EmplaceBackZeros(const NDArrayStorageType stype, const mxnet::TShape &shape, const Context &ctx, const int dtype, std::vector<NDArray> *vec) { // NDArray with default storage if (stype == kDefaultStorage) { vec->emplace_back(shape, ctx, false, dtype); vec->back() = 0; } else { // NDArray with non-default storage. Storage allocation is always delayed. vec->emplace_back(stype, shape, ctx, true, dtype); } } /*! * \brief parallelize copy by OpenMP. */ template<typename DType> inline void ParallelCopy(DType* dst, const DType* src, index_t size) { static index_t copy_block_size = dmlc::GetEnv("MXNET_CPU_PARALLEL_COPY_SIZE", 200000); if (size >= copy_block_size) { #pragma omp parallel for num_threads(engine::OpenMP::Get()->GetRecommendedOMPThreadCount()) for (index_t i = 0; i < size; ++i) { dst[i] = src[i]; } } else { std::memcpy(dst, src, sizeof(DType) * size); } } /*! * \brief If numpy compatibility is turned off (default), the shapes passed in * by users follow the legacy shape definition: * 1. 0 ndim means the shape is completely unknown. * 2. 0 dim size means the dim size is unknown. * We need to convert those shapes to use the numpy shape definition: * 1. 0 ndim means it's a scalar tensor. * 2. -1 ndim means the shape is unknown. * 3. 0 dim size means no elements in that dimension. * 4. -1 dim size means the dimension's size is unknown. * so that operator's infer shape function can work in backend. * \param shape to be converted. * Note: It is possible that the shape to be converted is already * numpy compatible. For example, when a subgraph operator's infer * shape function is called from the infer shape pass of the whole * graph, its input/output shapes have been converted to numpy * compatible shapes. */ inline void ConvertToNumpyShape(mxnet::TShape* shape) { if (shape->ndim() == 0) { // legacy shape ndim = 0 means unknown *shape = mxnet::TShape(); // unknown shape ndim = -1 } else { for (int j = 0; j < shape->ndim(); ++j) { if ((*shape)[j] == 0) { // legacy shape dim_size = 0 means unknown (*shape)[j] = -1; // unknown dim size = -1 } } } } inline void ConvertToNumpyShape(mxnet::ShapeVector* shapes) { for (size_t i = 0; i < shapes->size(); ++i) { ConvertToNumpyShape(&(shapes->at(i))); } } /*! * \brief This is function is used to convert shapes returned by * the infer shape functions/pass to the legacy shape definition. */ inline void ConvertToLegacyShape(mxnet::TShape* shape) { if (!mxnet::ndim_is_known(*shape)) { *shape = mxnet::TShape(0, -1); } else { for (int j = 0; j < shape->ndim(); ++j) { if (!mxnet::dim_size_is_known(*shape, j)) { (*shape)[j] = 0; } } } } inline void ConvertToLegacyShape(mxnet::ShapeVector* shapes) { for (size_t i = 0; i < shapes->size(); ++i) { ConvertToLegacyShape(&(shapes->at(i))); } } } // namespace common } // namespace mxnet #endif // MXNET_COMMON_UTILS_H_
GB_unaryop__minv_fp32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_fp32_fp64 // op(A') function: GB_tran__minv_fp32_fp64 // C type: float // A type: double // cast: float cij = (float) aij // unaryop: cij = (1.0F)/aij #define GB_ATYPE \ double #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = (1.0F)/x ; // casting #define GB_CASTING(z, x) \ float z = (float) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_FP32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_fp32_fp64 ( float *restrict Cx, const double *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_fp32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
dcache.c
#include "papi.h" #include "caches.h" #include "timing_kernels.h" #include "dcache.h" #include <math.h> #define _SIZE_SAMPLES_ 40 extern char* eventname; int min_size, max_size; void d_cache_driver(char* papi_event_name, int max_iter, hw_desc_t *hw_desc, char* outdir, int latency_only, int mode, int show_progress) { int pattern = 3; int stride, f, cache_line; int status, test_cnt = 0; float ppb = 16; FILE *ofp_papi; char *sufx, *papiFileName; // Open file (pass handle to d_cache_test()). if(CACHE_READ_WRITE == mode){ sufx = strdup(".data.writes"); }else{ sufx = strdup(".data.reads"); } int l = strlen(outdir)+strlen(papi_event_name)+strlen(sufx); papiFileName = (char *)calloc( 1+l, sizeof(char) ); if (!papiFileName) { fprintf(stderr, "Unable to allocate memory. Skipping event %s.\n", papi_event_name); goto error0; } if (l != (sprintf(papiFileName, "%s%s%s", outdir, papi_event_name, sufx))) { fprintf(stderr, "sprintf error. Skipping event %s.\n", papi_event_name); goto error1; } if (NULL == (ofp_papi = fopen(papiFileName,"w"))) { fprintf(stderr, "Unable to open file %s. Skipping event %s.\n", papiFileName, papi_event_name); goto error1; } if( (NULL==hw_desc) || (0==hw_desc->dcache_line_size[0]) ) cache_line = 64; else cache_line = hw_desc->dcache_line_size[0]; // Print the core to which each thread is pinned. print_core_affinities(ofp_papi); // Go through each parameter variant. for(pattern = 3; pattern <= 4; ++pattern) { for(f = 1; f <= 2; f *= 2) { stride = cache_line*f; // PPB variation only makes sense if the pattern is not sequential. if(pattern != 4) { for(ppb = 64; ppb >= 16; ppb -= 48) { if( show_progress ) { printf("%3d%%\b\b\b\b",(100*test_cnt++)/6); fflush(stdout); } status = d_cache_test(pattern, max_iter, hw_desc, stride, ppb, papi_event_name, latency_only, mode, ofp_papi); if( status < 0 ) goto error2; } } else { if( show_progress ) { printf("%3d%%\b\b\b\b",(100*test_cnt++)/6); fflush(stdout); } status = d_cache_test(pattern, max_iter, hw_desc, stride, ppb, papi_event_name, latency_only, mode, ofp_papi); if( status < 0 ) goto error2; } } } error2: if( show_progress ) { size_t i; printf("100%%"); for(i=0; i<strlen("Total:100% Current test:100%"); i++) putchar('\b'); fflush(stdout); } // Close files and free memory. fclose(ofp_papi); error1: free(papiFileName); error0: free(sufx); return; } int d_cache_test(int pattern, int max_iter, hw_desc_t *hw_desc, int stride_in_bytes, float pages_per_block, char* papi_event_name, int latency_only, int mode, FILE* ofp){ int i,j,k; int *values; double ***rslts, *sorted_rslts; double ***counter, *sorted_counter; int status=0, guessCount, ONT; min_size = 2*1024/sizeof(uintptr_t); // 2KB max_size = 1024*1024*1024/sizeof(uintptr_t);// 1GB // The number of different sizes we will guess, trying to find the right size. guessCount = 0; if( (NULL==hw_desc) || (hw_desc->cache_levels<=0) ){ for(i=min_size; i<max_size; i*=2){ // += 4 for i, i*1.25, i*1.5, i*1.75 guessCount += 4; } }else{ guessCount = _SIZE_SAMPLES_; } // Get the number of threads. ONT = get_thread_count(); // Latency results from the benchmark. rslts = (double ***)malloc(max_iter*sizeof(double **)); for(i=0; i<max_iter; ++i){ rslts[i] = (double **)malloc(guessCount*sizeof(double*)); for(j=0; j<guessCount; ++j){ rslts[i][j] = (double *)malloc(ONT*sizeof(double)); } } sorted_rslts = (double *)malloc(max_iter*sizeof(double)); // Counter results from the benchmark. counter = (double ***)malloc(max_iter*sizeof(double **)); for(i=0; i<max_iter; ++i){ counter[i] = (double **)malloc(guessCount*sizeof(double*)); for(j=0; j<guessCount; ++j){ counter[i][j] = (double *)malloc(ONT*sizeof(double)); } } sorted_counter = (double *)malloc(max_iter*sizeof(double)); // List of buffer sizes which are used in the benchmark. values = (int *)malloc(guessCount*sizeof(int)); // Set the name of the event to be monitored during the benchmark. eventname = papi_event_name; for(i=0; i<max_iter; ++i){ status = varyBufferSizes(values, rslts[i], counter[i], hw_desc, stride_in_bytes, pages_per_block, pattern, latency_only, mode, ONT); if( status < 0 ) goto cleanup; } // Sort and print latency and counter results. fprintf(ofp, "# PTRN=%d, STRIDE=%d, PPB=%f, ThreadCount=%d\n", pattern, stride_in_bytes, pages_per_block, ONT); if(latency_only) { for(j=0; j<guessCount; ++j){ fprintf(ofp, "%d", values[j]); for(k=0; k<ONT; ++k){ for(i=0; i<max_iter; ++i){ sorted_rslts[i] = rslts[i][j][k]; } qsort(sorted_rslts, max_iter, sizeof(double), compar_lf); fprintf(ofp, " %.4lf", sorted_rslts[0]); } fprintf(ofp, "\n"); } } else { for(j=0; j<guessCount; ++j){ fprintf(ofp, "%d", values[j]); for(k=0; k<ONT; ++k){ for(i=0; i<max_iter; ++i){ sorted_counter[i] = counter[i][j][k]; } qsort(sorted_counter, max_iter, sizeof(double), compar_lf); fprintf(ofp, " %lf", sorted_counter[0]); } fprintf(ofp, "\n"); } } cleanup: for(i=0; i<max_iter; ++i){ for(j=0; j<guessCount; ++j){ free(rslts[i][j]); free(counter[i][j]); } free(rslts[i]); free(counter[i]); } free(rslts); free(counter); free(sorted_rslts); free(sorted_counter); free(values); return status; } int varyBufferSizes(int *values, double **rslts, double **counter, hw_desc_t *hw_desc, int stride_in_bytes, float pages_per_block, int pattern, int latency_only, int mode, int ONT){ int i, j, k, cnt; long active_buf_len; int allocErr = 0; run_output_t out; int stride = stride_in_bytes/sizeof(uintptr_t); uintptr_t rslt=42, *v[ONT], *ptr[ONT]; // Allocate memory for each thread to traverse. #pragma omp parallel private(i) reduction(+:rslt) default(shared) { int idx = omp_get_thread_num(); ptr[idx] = (uintptr_t *)malloc( (2*max_size+stride)*sizeof(uintptr_t) ); if( !ptr[idx] ){ fprintf(stderr, "Error: cannot allocate space for experiment.\n"); #pragma omp critical { allocErr = -1; } }else{ // align v to the stride. v[idx] = (uintptr_t *)(stride_in_bytes*(((uintptr_t)ptr[idx]+stride_in_bytes)/stride_in_bytes)); // touch every page at least a few times for(i=0; i<2*max_size; i+=512){ rslt += v[idx][i]; } } } if(allocErr != 0) { goto error; } // Make a cold run out = probeBufferSize(16*stride, stride, pages_per_block, pattern, v, &rslt, latency_only, mode, ONT); if(out.status != 0) goto error; // Run the actual experiment if( (NULL==hw_desc) || (hw_desc->cache_levels<=0) ){ cnt = 0; // If we don't know the cache sizes, space the measurements between two default values. for(active_buf_len=min_size; active_buf_len<max_size; active_buf_len*=2){ out = probeBufferSize(active_buf_len, stride, pages_per_block, pattern, v, &rslt, latency_only, mode, ONT); if(out.status != 0) goto error; for(k = 0; k < ONT; ++k) { rslts[cnt][k] = out.dt[k]; counter[cnt][k] = out.counter[k]; } values[cnt++] = ONT*sizeof(uintptr_t)*active_buf_len; out = probeBufferSize((int)((double)active_buf_len*1.25), stride, pages_per_block, pattern, v, &rslt, latency_only, mode, ONT); if(out.status != 0) goto error; for(k = 0; k < ONT; ++k) { rslts[cnt][k] = out.dt[k]; counter[cnt][k] = out.counter[k]; } values[cnt++] = ONT*sizeof(uintptr_t)*((int)((double)active_buf_len*1.25)); out = probeBufferSize((int)((double)active_buf_len*1.5), stride, pages_per_block, pattern, v, &rslt, latency_only, mode, ONT); if(out.status != 0) goto error; for(k = 0; k < ONT; ++k) { rslts[cnt][k] = out.dt[k]; counter[cnt][k] = out.counter[k]; } values[cnt++] = ONT*sizeof(uintptr_t)*((int)((double)active_buf_len*1.5)); out = probeBufferSize((int)((double)active_buf_len*1.75), stride, pages_per_block, pattern, v, &rslt, latency_only, mode, ONT); if(out.status != 0) goto error; for(k = 0; k < ONT; ++k) { rslts[cnt][k] = out.dt[k]; counter[cnt][k] = out.counter[k]; } values[cnt++] = ONT*sizeof(uintptr_t)*((int)((double)active_buf_len*1.75)); } }else{ int llc; double f, small_size, large_size, curr_size; // If we know the cache sizes, space the measurements between a buffer size equal to L1/8 // and a buffer size that all threads cumulatively will exceed the LLC by a factor of 8. // The rationale is that the L1 is typically private, while the LLC is shared among all cores. llc = hw_desc->dcache_size[hw_desc->cache_levels-1]; small_size = hw_desc->dcache_size[0]/8; large_size = (double)llc; large_size = 8*large_size/ONT; // Choose a factor "f" to grow the buffer size by, such that we collect "_SIZE_SAMPLES_" // number of samples between "small_size" and "large_size", evenly distributed // in a geometric fashion (i.e., sizes will be equally spaced in a log graph). f = pow(large_size/small_size, 1.0/(_SIZE_SAMPLES_-1)); curr_size = small_size; cnt=0; for(j=0; j<_SIZE_SAMPLES_; j++){ active_buf_len = (long)(curr_size/sizeof(uintptr_t)); out = probeBufferSize(active_buf_len, stride, pages_per_block, pattern, v, &rslt, latency_only, mode, ONT); if(out.status != 0) goto error; for(k = 0; k < ONT; ++k) { rslts[cnt][k] = out.dt[k]; counter[cnt][k] = out.counter[k]; } values[cnt++] = sizeof(uintptr_t)*active_buf_len; curr_size *= f; } } // Free each thread's memory. for(j=0; j<ONT; ++j){ free(ptr[j]); } return 0; error: // Free each thread's memory. for(j=0; j<ONT; ++j){ free(ptr[j]); } return -1; } int get_thread_count() { int threadNum = 1; #pragma omp parallel default(shared) { if(!omp_get_thread_num()) { threadNum = omp_get_num_threads(); } } return threadNum; } void print_core_affinities(FILE *ofp) { int k, ONT; int *pinnings = NULL; // Get the number of threads. ONT = get_thread_count(); // List of core affinities in which the index is the thread ID. pinnings = (int *)malloc(ONT*sizeof(int)); if( NULL == pinnings ) { fprintf(stderr, "Error: cannot allocate space for experiment.\n"); return; } #pragma omp parallel default(shared) { int idx = omp_get_thread_num(); pinnings[idx] = sched_getcpu(); } fprintf(ofp, "# Core:"); for(k=0; k<ONT; ++k) { fprintf(ofp, " %d", pinnings[k]); } fprintf(ofp, "\n"); free(pinnings); return; }
matrix_funs_intel_mkl.c
#include "mkl_scalapack.h" #include "matrix_funs_intel_mkl.h" double get_seconds_frac(struct timeval start_timeval, struct timeval end_timeval){ long secs_used, micros_used; secs_used=(end_timeval.tv_sec - start_timeval.tv_sec); micros_used= ((secs_used*1000000) + end_timeval.tv_usec) - (start_timeval.tv_usec); return (micros_used/1e6); } /* initialize new matrix and set all entries to zero */ mat * matrix_new(int nrows, int ncols) { mat *M = malloc(sizeof(mat)); //M->d = (double*)mkl_calloc(nrows*ncols, sizeof(double), 64); M->d = (double*)calloc(nrows*ncols, sizeof(double)); M->nrows = nrows; M->ncols = ncols; return M; } /* initialize new vector and set all entries to zero */ vec * vector_new(int nrows) { vec *v = malloc(sizeof(vec)); //v->d = (double*)mkl_calloc(nrows,sizeof(double), 64); v->d = (double*)calloc(nrows,sizeof(double)); v->nrows = nrows; return v; } void matrix_delete(mat *M) { //mkl_free(M->d); free(M->d); free(M); } void vector_delete(vec *v) { //mkl_free(v->d); free(v->d); free(v); } /* copy contents of mat S to D */ void matrix_copy(mat *D, mat *S){ int i; //#pragma omp parallel for #pragma omp parallel shared(D,S) private(i) { #pragma omp for for(i=0; i<((S->nrows)*(S->ncols)); i++){ D->d[i] = S->d[i]; } } } /* initialize a random matrix */ void initialize_random_matrix(mat *M){ int i,m,n; double val; m = M->nrows; n = M->ncols; float a=0.0,sigma=1.0; int N = m*n; float *r; VSLStreamStatePtr stream; r = (float*)malloc(N*sizeof(float)); vslNewStream( &stream, BRNG, time(NULL) ); //vslNewStream( &stream, BRNG, SEED ); vsRngGaussian( METHOD, stream, N, r, a, sigma ); // read and set elements #pragma omp parallel shared(M,N,r) private(i,val) { #pragma omp parallel for for(i=0; i<N; i++){ val = r[i]; M->d[i] = val; } } free(r); } /* initialize new matrix and set all entries to zero for float*/ void matrix_matrix_mult_row(mat *A, mat* B, mat* C){ double alpha, beta; alpha = 1.0; beta = 0.0; cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, A->nrows, B->ncols, A->ncols, alpha, A->d, A->ncols, B->d, B->ncols, beta, C->d, C->ncols); } void matrix_transpose_matrix_mult_row(mat *A, mat* B, mat* C){ double alpha, beta; alpha = 1.0; beta = 0.0; cblas_dgemm(CblasRowMajor, CblasTrans, CblasNoTrans, A->ncols, B->ncols, A->nrows, alpha, A->d, A->ncols, B->d, B->ncols, beta, C->d, C->ncols); } /* C = A*B, A is a file on hard disk */ void matrix_matrix_mult_disk(FILE *A, mat *B, mat *C, int row, int col, int l){ int row_size = l; int read_row_size = row_size; double alpha, beta; int i, j; // count int m=row, n=col, k=B->ncols; alpha = 1.0; beta = 0.0; // printf("matrix_matrix_mult_disk is running\n"); float *M_f = (float*)malloc(read_row_size*n*sizeof(float)); double *M = (double*)malloc(read_row_size*n*sizeof(double)); struct timeval start_timeval_1, end_timeval_1; struct timeval start_timeval_2, end_timeval_2; double sum = 0; double time_1, time_2; gettimeofday(&start_timeval_1, NULL); for (i = 0; i < m; i += row_size){ if (row_size > (m - i)) read_row_size = m - i; gettimeofday(&start_timeval_2, NULL); //time_2 fread(M_f, sizeof(float), n*read_row_size, A); #pragma omp parallel shared(M, M_f,n,read_row_size) private(j) { #pragma omp parallel for for(j=0; j<n*read_row_size; j++){ M[j] = M_f[j]; //leixing zhuanhuan } } gettimeofday(&end_timeval_2, NULL); sum += get_seconds_frac(start_timeval_2 ,end_timeval_2); /* 1*n , n*k = 1*k , all m*k */ cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, read_row_size, B->ncols, n, alpha, M, n, B->d, B->ncols, beta, C->d+i*k, C->ncols); } gettimeofday(&end_timeval_1, NULL); time_1 = get_seconds_frac(start_timeval_1 ,end_timeval_1); time_2 = sum; printf("Time for reading data file_(fread-time1): %g second\n",time_2); printf("Time for matrix_matrix_mult: %g second\n", time_1); free(M_f); free(M); } /* C = A^T*B ; column major */ /* n*m , m*k = n*k , all n*k */ void matrix_transpose_matrix_mult_disk(FILE *A, mat *B, mat *C, int row, int col, int l){ int row_size = l; int read_row_size = row_size; double alpha, beta; int i, j; // count int m=row, n=col, k=B->ncols; float *M_f = (float*)malloc(read_row_size*n*sizeof(float)); double *M = (double*)malloc(read_row_size*n*sizeof(double));; // printf("matrix_transpose_matrix_mult_disk is running\n"); alpha = 1.0; beta = 1.0; // innitial C=0 #pragma omp parallel shared(C) private(i) { #pragma omp parallel for for(i=0; i < (C->nrows*C->ncols); i++){ C->d[i] = 0.0; } } struct timeval start_timeval_1, end_timeval_1; struct timeval start_timeval_2, end_timeval_2; double sum = 0; double time_1, time_2; gettimeofday(&start_timeval_1, NULL); for (i = 0; i < m; i += row_size){ if (row_size > (m-i) ) read_row_size = m-i; gettimeofday(&start_timeval_2, NULL); //time_2 fread(M_f, sizeof(float), n*read_row_size, A); // cblas_dcopy(k, B->d+i*k, 1, g_row, 1); //g_row = g[i]; #pragma omp parallel shared(M,M_f,n,read_row_size) private(j) { #pragma omp parallel for for(j=0; j < n * read_row_size; j++){ M[j] = M_f[j]; } } gettimeofday(&end_timeval_2, NULL); sum += get_seconds_frac(start_timeval_2 ,end_timeval_2); cblas_dgemm(CblasRowMajor, CblasTrans, CblasNoTrans, n, B->ncols, read_row_size, alpha, M, n, B->d+i*k, B->ncols, beta, C->d, C->ncols); } gettimeofday(&end_timeval_1, NULL); time_1 = get_seconds_frac(start_timeval_1 ,end_timeval_1); time_2 = sum; printf("Time for reading data file_(fread-time2): %g second\n",time_2); printf("Time for matrix_transpose_matrix_mult: %g second\n", time_1); free(M_f); free(M); } /* Performs [Q,R] = qr(M,'0') compact QR factorization M is mxn ; Q is mxn ; R is min(m,n) x min(m,n) */ void compact_QR_factorization(mat *M, mat *Q, mat *R){ int i,j,m,n,k; m = M->nrows; n = M->ncols; k = min(m,n); mat *R_full = matrix_new(m,n); matrix_copy(R_full,M); //vec *tau = vector_new(n); vec *tau = vector_new(k); // get R //printf("get R..\n"); //LAPACKE_dgeqrf(CblasColMajor, m, n, R_full->d, n, tau->d); LAPACKE_dgeqrf(LAPACK_ROW_MAJOR, R_full->nrows, R_full->ncols, R_full->d, R_full->ncols, tau->d); for(i=0; i<k; i++){ for(j=0; j<k; j++){ if(j>=i){ matrix_set_element(R,i,j,matrix_get_element(R_full,i,j)); } } } // get Q matrix_copy(Q,R_full); //printf("dorgqr..\n"); LAPACKE_dorgqr(LAPACK_ROW_MAJOR, Q->nrows, Q->ncols, min(Q->ncols,Q->nrows), Q->d, Q->ncols, tau->d); // clean up matrix_delete(R_full); vector_delete(tau); } /* orth (Q)*/ void QR_factorization_getQ_inplace(mat *Q){ int i,j,m,n,k; m = Q->nrows; n = Q->ncols; k = min(m,n); vec *tau = vector_new(k); /* do QR */ //sometime core dump, bug of MKL //LAPACKE_dgeqrf(LAPACK_ROW_MAJOR, m, n, Q->d, n, tau->d); /* do QRCP */ //more stable, but more expensive printf("Warning: use QRCP to replace QR! (see line 269 of matrix_funs_intel_mkl.c)\n"); int *jpvt = (int *)malloc(sizeof(int)*n); LAPACKE_dgeqpf(LAPACK_ROW_MAJOR, m, n, Q->d, n, jpvt, tau->d); free(jpvt); LAPACKE_dorgqr(LAPACK_ROW_MAJOR, m, n, k, Q->d, n, tau->d); vector_delete(tau); } /* M(:,inds) = Mc */ void matrix_set_selected_columns(mat *M, int *inds, mat *Mc){ int i; vec *col_vec; #pragma omp parallel shared(M,Mc,inds) private(i,col_vec) { #pragma omp parallel for for(i=0; i<(Mc->ncols); i++){ col_vec = vector_new(M->nrows); matrix_get_col(Mc,i,col_vec); matrix_set_col(M,inds[i],col_vec); vector_delete(col_vec); } } } /* M(inds,:) = Mr */ void matrix_set_selected_rows(mat *M, int *inds, mat *Mr){ //modify int i; vec *row_vec; #pragma omp parallel shared(M,Mr,inds) private(i,row_vec) { #pragma omp parallel for for(i=0; i<(Mr->nrows); i++){ row_vec = vector_new(M->ncols); matrix_get_row(Mr,i,row_vec); matrix_set_row(M,inds[i],row_vec); vector_delete(row_vec); } } } /* Mc = M(:,inds) */ void matrix_get_selected_columns(mat *M, int *inds, mat *Mc){ //modify int i; vec *col_vec; #pragma omp parallel shared(M,Mc,inds) private(i,col_vec) { #pragma omp parallel for for(i=0; i<(Mc->ncols); i++){ col_vec = vector_new(M->nrows); matrix_get_col(M,inds[i],col_vec); matrix_set_col(Mc,i,col_vec); vector_delete(col_vec); } } } /* extract column of a matrix into a vector */ void matrix_get_col(mat *M, int j, vec *column_vec){//modify int i; // unclear #pragma omp parallel shared(column_vec,M,j) private(i) {//unclear #pragma omp parallel for for(i=0; i<M->nrows; i++){ vector_set_element(column_vec,i,matrix_get_element(M,i,j)); } } } /* extract row i of a matrix into a vector */ void matrix_get_row(mat *M, int i, vec *row_vec){//modify int j; #pragma omp parallel shared(row_vec,M,i) private(j) { #pragma omp parallel for for(j=0; j<M->ncols; j++){ vector_set_element(row_vec,j,matrix_get_element(M,i,j)); } } } /* set column of matrix to vector */ void matrix_set_col(mat *M, int j, vec *column_vec){ //modify int i; #pragma omp parallel shared(column_vec,M,j) private(i) { #pragma omp for for(i=0; i<M->nrows; i++){ matrix_set_element(M,i,j,vector_get_element(column_vec,i)); } } } /* put vector row_vec as row i of a matrix */ void matrix_set_row(mat *M, int i, vec *row_vec){ //modify int j; #pragma omp parallel shared(row_vec,M,i) private(j) { #pragma omp parallel for for(j=0; j<M->ncols; j++){ matrix_set_element(M,i,j,vector_get_element(row_vec,j)); } } } /* set vector element */ void vector_set_element(vec *v, int row_num, double val){ //modify v->d[row_num] = val; } /* set element in column major format */ void matrix_set_element(mat *M, int row_num, int col_num, double val){ //modify M->d[row_num*(M->ncols) + col_num] = val; } /* get element in column major format */ double matrix_get_element(mat *M, int row_num, int col_num){ //modify return M->d[row_num*(M->ncols) + col_num]; } double vector_get_element(vec *v, int row_num){ return v->d[row_num]; } /*********************Lijian***********************/ /* C = A*B & D = A^T*C */ void matrix_union_matrix_mult_disk_mem(FILE *A, mat *B, mat *C, mat *D, int row, int col, int row_size){ int read_row_size = row_size; double alpha, beta ,gama; int i, j; // count int m=row, n=col, k=B->ncols; float *M_f = (float*)malloc(read_row_size*n*sizeof(float)); double *M = (double*)malloc(read_row_size*n*sizeof(double)); // double *g_row= (double*)malloc(k*sizeof(double)); //C's row vector' //printf("matrix_union_matrix_mult_disk_mem is running\n"); alpha = 1.0; beta = 0.0; gama = 1.0; struct timeval start_timeval_1, end_timeval_1; struct timeval start_timeval_2, end_timeval_2; double sum = 0; double time_1, time_2; gettimeofday(&start_timeval_1, NULL); //time_1 // #pragma omp parallel shared(D) private(i) // { // #pragma omp parallel for // for(i=0; i < (D->nrows*D->ncols); i++){ // D->d[i] = 0.0; // } // } for (i = 0; i < m; i += row_size) { if (row_size > (m-i) ) read_row_size = m-i; gettimeofday(&start_timeval_2, NULL); //time_2 fread(M_f, sizeof(float), n*read_row_size, A); #pragma omp parallel shared(M,M_f,n,read_row_size) private(j) { #pragma omp parallel for for(j=0; j < n*read_row_size; j++){ M[j] = M_f[j]; } } gettimeofday(&end_timeval_2, NULL); sum += get_seconds_frac(start_timeval_2 ,end_timeval_2); // C = A*D cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, read_row_size, B->ncols, n, alpha, M, n, B->d, B->ncols, beta, C->d+i*k, C->ncols); // B = A^T*C exchange B & C // cblas_dcopy(k, C->d+i*k, 1, g_row, 1); //g_row = g[i]; // cblas_dger(CblasRowMajor, D->nrows, D->ncols, alpha, M, 1, g_row, 1, D->d, D->ncols); //A := alpha*x*y'+ A, cblas_dgemm(CblasRowMajor, CblasTrans, CblasNoTrans, n, C->ncols, read_row_size, alpha, M, n, C->d+i*k, C->ncols, gama, D->d, D->ncols); } gettimeofday(&end_timeval_1, NULL); time_1 = get_seconds_frac(start_timeval_1 ,end_timeval_1); time_2 = sum; printf("Time for reading data file_(fread-time): %g second\n",time_2); printf("Time for matrix_union_matrix_mult: %g second\n", time_1); //printf("matrix_union_mem is %d KB\n", getCurrentRSS()/1024); free(M_f); free(M); } //input A B C //output D E // D=A*B E=A^T*C void matrix_union_matrix_mult_disk_mem_2(FILE *A, mat *B, mat *C, mat *D, mat*E, int row, int col, int row_size){ int read_row_size = row_size; double alpha, beta ,gama; int i, j; // count int m=row, n=col, k=B->ncols; float *M_f = (float*)malloc(read_row_size*n*sizeof(float)); double *M = (double*)malloc(read_row_size*n*sizeof(double)); // double *g_row= (double*)malloc(k*sizeof(double)); //C's row vector' //printf("matrix_union_matrix_mult_disk_mem_2 is running\n"); //matrix_copy(D,B); //D=B // float *a_g_mult=(float*)malloc(n*k*sizeof(float)); // ai * gi , n*k alpha = 1.0; beta = 0.0; gama = 1.0; struct timeval start_timeval_1, end_timeval_1; struct timeval start_timeval_2, end_timeval_2; double sum = 0; double time_1, time_2; gettimeofday(&start_timeval_1, NULL); //time_1 // #pragma omp parallel shared(E) private(i) // { // #pragma omp parallel for // for(i=0; i < (E->nrows*E->ncols); i++){ // E->d[i] = 0.0; // } // } for (i = 0; i < m; i += row_size) { if (row_size > (m-i) ) read_row_size = m-i; gettimeofday(&start_timeval_2, NULL); //time_2 fread(M_f, sizeof(float), n*read_row_size, A); #pragma omp parallel shared(M,M_f,n,read_row_size) private(j) { #pragma omp parallel for for(j=0; j < n*read_row_size; j++){ M[j] = M_f[j]; } } gettimeofday(&end_timeval_2, NULL); sum += get_seconds_frac(start_timeval_2 ,end_timeval_2); // C = A*D cblas_dgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, read_row_size, B->ncols, n, alpha, M, n, B->d, B->ncols, beta, D->d+i*k, D->ncols); // B = A^T*C exchange B & C // cblas_dcopy(k, C->d+i*k, 1, g_row, 1); //g_row = g[i]; // cblas_dger(CblasRowMajor, D->nrows, D->ncols, alpha, M, 1, g_row, 1, D->d, D->ncols); //A := alpha*x*y'+ A, cblas_dgemm(CblasRowMajor, CblasTrans, CblasNoTrans, n, C->ncols, read_row_size, alpha, M, n, C->d+(i*C->ncols), C->ncols, gama, E->d, E->ncols); } gettimeofday(&end_timeval_1, NULL); time_1 = get_seconds_frac(start_timeval_1 ,end_timeval_1); time_2 = sum; printf("Time for reading data file_(fread-time): %g second\n",time_2); printf("Time for matrix_union_matrix_mult2: %g second\n", time_1); free(M_f); free(M); } /* k*n = k*k k*k n*k */ void svd_row_cut (mat *A, mat *U, vec *E, mat * V) { int m = A->nrows; int n = A->ncols; int i, j; // mat *A_in = matrix_new(m,n);; // matrix_copy(A_in, A); // printf("dong tai sheng qing\n"); // double *u = (double*)malloc(m*m*sizeof(double)); double *vt = (double*)malloc(n*m*sizeof(double)); // printf("svd is running\n"); // LAPACKE_dgesvd(LAPACK_ROW_MAJOR, 'A', 'S', m, n, A->d, n, E->d, U->d, m, vt, n, superb); // LAPACKE_dgesdd( int matrix_layout, char jobz, lapack_int m, lapack_int n, double* a, lapack_int lda, double* s, double* u, lapack_int ldu, double* vt, lapack_int ldvt ); LAPACKE_dgesdd(LAPACK_ROW_MAJOR,'S', m, n, A->d, n, E->d, U->d, m, vt, n); //printf("Complete Lapack svd\n\n"); for (i = 0; i < m; i++) { for (j = 0; j < n; j++) { V->d[j * m + i] = vt[i * n+ j] ; } } // printf("svd_row_cut is over\n"); // matrix_delete(A_in); free(vt); } /* D = M(:,inds)' */ void matrix_get_selected_columns_and_transpose(mat *M, int *inds, mat *Mc){ int i; vec *col_vec; #pragma omp parallel shared(M,Mc,inds) private(i,col_vec) { #pragma omp parallel for for(i=0; i<(Mc->nrows); i++){ col_vec = vector_new(M->nrows); matrix_get_col(M,inds[i],col_vec); matrix_set_row(Mc,i,col_vec); vector_delete(col_vec); } } } void linear_solve_UTxb(mat *A, mat *b) { LAPACKE_dtrtrs(LAPACK_ROW_MAJOR, 'U', 'T', 'N', //unclear b->nrows, b->ncols, A->d, A->ncols, b->d, b->ncols ); } /* C = beta*C + alpha*A(1:Anrows, 1:Ancols)[T]*B(1:Bnrows, 1:Bncols)[T] */ void submatrix_submatrix_mult_with_ab(mat *A, mat *B, mat *C, int Anrows, int Ancols, int Bnrows, int Bncols, int transa, int transb, double alpha, double beta) { int opAnrows, opAncols, opBnrows, opBncols; if (transa == CblasTrans) { opAnrows = Ancols; opAncols = Anrows; } else { opAnrows = Anrows; opAncols = Ancols; } if (transb == CblasTrans) { opBnrows = Bncols; opBncols = Bnrows; } else { opBnrows = Bnrows; opBncols = Bncols; } if (opAncols != opBnrows) { printf("error in submatrix_submatrix_mult()"); exit(0); } cblas_dgemm(CblasRowMajor, transa, transb, opAnrows, opBncols, // m, n, opAncols, // k alpha, A->d, A->ncols, // lda // modify B->d, B->ncols, // ldb beta, C->d, C->ncols // ldc ); } void submatrix_submatrix_mult(mat *A, mat *B, mat *C, int Anrows, int Ancols, int Bnrows, int Bncols, int transa, int transb) { double alpha, beta; alpha = 1.0; beta = 0.0; submatrix_submatrix_mult_with_ab(A, B, C, Anrows, Ancols, Bnrows, Bncols, transa, transb, alpha, beta); }
GB_unaryop__minv_int8_uint16.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_int8_uint16 // op(A') function: GB_tran__minv_int8_uint16 // C type: int8_t // A type: uint16_t // cast: int8_t cij = (int8_t) aij // unaryop: cij = GB_IMINV_SIGNED (aij, 8) #define GB_ATYPE \ uint16_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IMINV_SIGNED (x, 8) ; // casting #define GB_CASTING(z, aij) \ int8_t z = (int8_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_INT8 || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_int8_uint16 ( int8_t *Cx, // Cx and Ax may be aliased uint16_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_int8_uint16 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DenseVector.h
//================================================================================================= /*! // \file blaze/math/smp/openmp/DenseVector.h // \brief Header file for the OpenMP-based dense vector SMP implementation // // Copyright (C) 2012-2019 Klaus Iglberger - All Rights Reserved // // This file is part of the Blaze library. You can redistribute it and/or modify it under // the terms of the New (Revised) BSD License. Redistribution and use in source and binary // forms, with or without modification, are permitted provided that the following conditions // are met: // // 1. Redistributions of source code must retain the above copyright notice, this list of // conditions and the following disclaimer. // 2. Redistributions in binary form must reproduce the above copyright notice, this list // of conditions and the following disclaimer in the documentation and/or other materials // provided with the distribution. // 3. Neither the names of the Blaze development group nor the names of its contributors // may be used to endorse or promote products derived from this software without specific // prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES // OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT // SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED // TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR // BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH // DAMAGE. */ //================================================================================================= #ifndef _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ #define _BLAZE_MATH_SMP_OPENMP_DENSEVECTOR_H_ //************************************************************************************************* // Includes //************************************************************************************************* #include <omp.h> #include <blaze/math/Aliases.h> #include <blaze/math/constraints/SMPAssignable.h> #include <blaze/math/expressions/DenseVector.h> #include <blaze/math/expressions/SparseVector.h> #include <blaze/math/functors/AddAssign.h> #include <blaze/math/functors/Assign.h> #include <blaze/math/functors/DivAssign.h> #include <blaze/math/functors/MultAssign.h> #include <blaze/math/functors/SubAssign.h> #include <blaze/math/simd/SIMDTrait.h> #include <blaze/math/smp/ParallelSection.h> #include <blaze/math/smp/SerialSection.h> #include <blaze/math/typetraits/IsDenseVector.h> #include <blaze/math/typetraits/IsSIMDCombinable.h> #include <blaze/math/typetraits/IsSMPAssignable.h> #include <blaze/math/views/Subvector.h> #include <blaze/system/SMP.h> #include <blaze/util/algorithms/Min.h> #include <blaze/util/Assert.h> #include <blaze/util/EnableIf.h> #include <blaze/util/FunctionTrace.h> #include <blaze/util/StaticAssert.h> #include <blaze/util/Types.h> namespace blaze { //================================================================================================= // // OPENMP-BASED ASSIGNMENT KERNELS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a dense vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a dense // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side dense vector , bool TF2 // Transpose flag of the right-hand side dense vector , typename OP > // Type of the assignment operation void openmpAssign( DenseVector<VT1,TF1>& lhs, const DenseVector<VT2,TF2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); using ET1 = ElementType_t<VT1>; using ET2 = ElementType_t<VT2>; constexpr bool simdEnabled( VT1::simdEnabled && VT2::simdEnabled && IsSIMDCombinable_v<ET1,ET2> ); constexpr size_t SIMDSIZE( SIMDTrait< ElementType_t<VT1> >::size ); const bool lhsAligned( (~lhs).isAligned() ); const bool rhsAligned( (~rhs).isAligned() ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t equalShare ( (~lhs).size() / threads + addon ); const size_t rest ( equalShare & ( SIMDSIZE - 1UL ) ); const size_t sizePerThread( ( simdEnabled && rest )?( equalShare - rest + SIMDSIZE ):( equalShare ) ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); if( simdEnabled && lhsAligned && rhsAligned ) { auto target( subvector<aligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<aligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else if( simdEnabled && lhsAligned ) { auto target( subvector<aligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else if( simdEnabled && rhsAligned ) { auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<aligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } else { auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } } } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Backend of the OpenMP-based SMP (compound) assignment of a sparse vector to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \param op The (compound) assignment operation. // \return void // // This function is the backend implementation of the OpenMP-based SMP assignment of a sparse // vector to a dense vector.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side sparse vector , bool TF2 // Transpose flag of the right-hand side sparse vector , typename OP > // Type of the assignment operation void openmpAssign( DenseVector<VT1,TF1>& lhs, const SparseVector<VT2,TF2>& rhs, OP op ) { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( isParallelSectionActive(), "Invalid call outside a parallel section" ); const int threads ( omp_get_num_threads() ); const size_t addon ( ( ( (~lhs).size() % threads ) != 0UL )? 1UL : 0UL ); const size_t sizePerThread( (~lhs).size() / threads + addon ); #pragma omp for schedule(dynamic,1) nowait for( int i=0UL; i<threads; ++i ) { const size_t index( i*sizePerThread ); if( index >= (~lhs).size() ) continue; const size_t size( min( sizePerThread, (~lhs).size() - index ) ); auto target( subvector<unaligned>( ~lhs, index, size, unchecked ) ); const auto source( subvector<unaligned>( ~rhs, index, size, unchecked ) ); op( target, source ); } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // PLAIN ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be assigned. // \return void // // This function implements the default OpenMP-based SMP assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); assign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be assigned. // \return void // // This function performs the OpenMP-based SMP assignment to a dense vector. Due to the // explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { assign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, Assign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // ADDITION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be added. // \return void // // This function implements the default OpenMP-based SMP addition assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); addAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP addition assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be added. // \return void // // This function implements the OpenMP-based SMP addition assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands are // not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpAddAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { addAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, AddAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // SUBTRACTION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be subtracted. // \return void // // This function implements the default OpenMP-based SMP subtraction assignment of a vector to // a dense vector. Due to the explicit application of the SFINAE principle, this function can // only be selected by the compiler in case both operands are SMP-assignable and the element // types of both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); subAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP subtraction assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side sparse vector to be subtracted. // \return void // // This function implements the OpenMP-based SMP subtraction assignment to a dense vector. Due // to the explicit application of the SFINAE principle, this function can only be selected by // the compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpSubAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { subAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, SubAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // MULTIPLICATION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector to be multiplied. // \return void // // This function implements the default OpenMP-based SMP multiplication assignment to a dense // vector. Due to the explicit application of the SFINAE principle, this function can only be // selected by the compiler in case both operands are SMP-assignable and the element types of // both operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); multAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP multiplication assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector to be multiplied. // \return void // // This function implements the OpenMP-based SMP multiplication assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpMultAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { multAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, MultAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // DIVISION ASSIGNMENT // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Default implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side vector divisor. // \return void // // This function implements the default OpenMP-based SMP division assignment to a dense vector. // Due to the explicit application of the SFINAE principle, this function can only be selected // by the compiler in case both operands are SMP-assignable and the element types of both // operands are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && ( !IsSMPAssignable_v<VT1> || !IsSMPAssignable_v<VT2> ) > { BLAZE_FUNCTION_TRACE; BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); divAssign( ~lhs, ~rhs ); } /*! \endcond */ //************************************************************************************************* //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ /*!\brief Implementation of the OpenMP-based SMP division assignment to a dense vector. // \ingroup smp // // \param lhs The target left-hand side dense vector. // \param rhs The right-hand side dense vector divisor. // \return void // // This function implements the OpenMP-based SMP division assignment to a dense vector. Due to // the explicit application of the SFINAE principle, this function can only be selected by the // compiler in case both operands are SMP-assignable and the element types of both operands // are not SMP-assignable.\n // This function must \b NOT be called explicitly! It is used internally for the performance // optimized evaluation of expression templates. Calling this function explicitly might result // in erroneous results and/or in compilation errors. Instead of using this function use the // assignment operator. */ template< typename VT1 // Type of the left-hand side dense vector , bool TF1 // Transpose flag of the left-hand side dense vector , typename VT2 // Type of the right-hand side vector , bool TF2 > // Transpose flag of the right-hand side vector inline auto smpDivAssign( Vector<VT1,TF1>& lhs, const Vector<VT2,TF2>& rhs ) -> EnableIf_t< IsDenseVector_v<VT1> && IsSMPAssignable_v<VT1> && IsSMPAssignable_v<VT2> > { BLAZE_FUNCTION_TRACE; BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT1> ); BLAZE_CONSTRAINT_MUST_NOT_BE_SMP_ASSIGNABLE( ElementType_t<VT2> ); BLAZE_INTERNAL_ASSERT( (~lhs).size() == (~rhs).size(), "Invalid vector sizes" ); BLAZE_PARALLEL_SECTION { if( isSerialSectionActive() || !(~rhs).canSMPAssign() ) { divAssign( ~lhs, ~rhs ); } else { #pragma omp parallel shared( lhs, rhs ) openmpAssign( ~lhs, ~rhs, DivAssign() ); } } } /*! \endcond */ //************************************************************************************************* //================================================================================================= // // COMPILE TIME CONSTRAINTS // //================================================================================================= //************************************************************************************************* /*! \cond BLAZE_INTERNAL */ namespace { BLAZE_STATIC_ASSERT( BLAZE_OPENMP_PARALLEL_MODE ); } /*! \endcond */ //************************************************************************************************* } // namespace blaze #endif
cgemm.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgemm.c, normal z -> c, Fri Sep 28 17:38:01 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * * @ingroup plasma_gemm * * Performs one of the matrix-matrix operations * * \f[ C = \alpha [op( A )\times op( B )] + \beta C, \f] * * where op( X ) is one of: * \f[ op( X ) = X, \f] * \f[ op( X ) = X^T, \f] * \f[ op( X ) = X^H, \f] * * alpha and beta are scalars, and A, B and C are matrices, with op( A ) * an m-by-k matrix, op( B ) a k-by-n matrix and C an m-by-n matrix. * ******************************************************************************* * * @param[in] transa * - PlasmaNoTrans: A is not transposed, * - PlasmaTrans: A is transposed, * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] transb * - PlasmaNoTrans: B is not transposed, * - PlasmaTrans: B is transposed, * - PlasmaConjTrans: B is conjugate transposed. * * @param[in] m * The number of rows of the matrix op( A ) and of the matrix C. * m >= 0. * * @param[in] n * The number of columns of the matrix op( B ) and of the matrix C. * n >= 0. * * @param[in] k * The number of columns of the matrix op( A ) and the number of rows * of the matrix op( B ). k >= 0. * * @param[in] alpha * The scalar alpha. * * @param[in] pA * An lda-by-ka matrix, where ka is k when transa = PlasmaNoTrans, * and is m otherwise. * * @param[in] lda * The leading dimension of the array A. * When transa = PlasmaNoTrans, lda >= max(1,m), * otherwise, lda >= max(1,k). * * @param[in] pB * An ldb-by-kb matrix, where kb is n when transb = PlasmaNoTrans, * and is k otherwise. * * @param[in] ldb * The leading dimension of the array B. * When transb = PlasmaNoTrans, ldb >= max(1,k), * otherwise, ldb >= max(1,n). * * @param[in] beta * The scalar beta. * * @param[in,out] pC * An ldc-by-n matrix. On exit, the array is overwritten by the m-by-n * matrix ( alpha*op( A )*op( B ) + beta*C ). * * @param[in] ldc * The leading dimension of the array C. ldc >= max(1,m). * ******************************************************************************* * * @retval PlasmaSuccess successful exit * ******************************************************************************* * * @sa plasma_omp_cgemm * @sa plasma_cgemm * @sa plasma_dgemm * @sa plasma_sgemm * ******************************************************************************/ int plasma_cgemm(plasma_enum_t transa, plasma_enum_t transb, int m, int n, int k, plasma_complex32_t alpha, plasma_complex32_t *pA, int lda, plasma_complex32_t *pB, int ldb, plasma_complex32_t beta, plasma_complex32_t *pC, int ldc) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } // Check input arguments. if ((transa != PlasmaNoTrans) && (transa != PlasmaTrans) && (transa != PlasmaConjTrans)) { plasma_error("illegal value of transa"); return -1; } if ((transb != PlasmaNoTrans) && (transb != PlasmaTrans) && (transb != PlasmaConjTrans)) { plasma_error("illegal value of transb"); return -2; } if (m < 0) { plasma_error("illegal value of m"); return -3; } if (n < 0) { plasma_error("illegal value of n"); return -4; } if (k < 0) { plasma_error("illegal value of k"); return -5; } int am, an; int bm, bn; if (transa == PlasmaNoTrans) { am = m; an = k; } else { am = k; an = m; } if (transb == PlasmaNoTrans) { bm = k; bn = n; } else { bm = n; bn = k; } if (lda < imax(1, am)) { plasma_error("illegal value of lda"); return -8; } if (ldb < imax(1, bm)) { plasma_error("illegal value of ldb"); return -10; } if (ldc < imax(1, m)) { plasma_error("illegal value of ldc"); return -13; } // quick return if (m == 0 || n == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0)) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_gemm(plasma, PlasmaComplexFloat, m, n, k); // Set tiling parameters. int nb = plasma->nb; // Create tile matrices. plasma_desc_t A; plasma_desc_t B; plasma_desc_t C; int retval; retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, am, an, 0, 0, am, an, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, bm, bn, 0, 0, bm, bn, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, m, n, 0, 0, m, n, &C); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); plasma_desc_destroy(&A); plasma_desc_destroy(&B); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); // asynchronous block #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cge2desc(pA, lda, A, &sequence, &request); plasma_omp_cge2desc(pB, ldb, B, &sequence, &request); plasma_omp_cge2desc(pC, ldc, C, &sequence, &request); // Call the tile async function. plasma_omp_cgemm(transa, transb, alpha, A, B, beta, C, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_cdesc2ge(C, pC, ldc, &sequence, &request); } // implicit synchronization // Free matrices in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); plasma_desc_destroy(&C); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * * @ingroup plasma_gemm * * Performs matrix multiplication. * Non-blocking tile version of plasma_cgemm(). * May return before the computation is finished. * Operates on matrices stored by tiles. * All matrices are passed through descriptors. * All dimensions are taken from the descriptors. * Allows for pipelining of operations at runtime. * ******************************************************************************* * * @param[in] transa * - PlasmaNoTrans: A is not transposed, * - PlasmaTrans: A is transposed, * - PlasmaConjTrans: A is conjugate transposed. * * @param[in] transb * - PlasmaNoTrans: B is not transposed, * - PlasmaTrans: B is transposed, * - PlasmaConjTrans: B is conjugate transposed. * * @param[in] alpha * The scalar alpha. * * @param[in] A * Descriptor of matrix A. * * @param[in] B * Descriptor of matrix B. * * @param[in] beta * The scalar beta. * * @param[in,out] C * Descriptor of matrix C. * * @param[in] sequence * Identifies the sequence of function calls that this call belongs to * (for completion checks and exception handling purposes). Check * the sequence->status for errors. * * @param[out] request * Identifies this function call (for exception handling purposes). * * @retval void * Errors are returned by setting sequence->status and * request->status to error values. The sequence->status and * request->status should never be set to PlasmaSuccess (the * initial values) since another async call may be setting a * failure value at the same time. * ******************************************************************************* * * @sa plasma_cgemm * @sa plasma_omp_cgemm * @sa plasma_omp_dgemm * @sa plasma_omp_sgemm * ******************************************************************************/ void plasma_omp_cgemm(plasma_enum_t transa, plasma_enum_t transb, plasma_complex32_t alpha, plasma_desc_t A, plasma_desc_t B, plasma_complex32_t beta, plasma_desc_t C, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if ((transa != PlasmaNoTrans) && (transa != PlasmaTrans) && (transa != PlasmaConjTrans)) { plasma_error("illegal value of transa"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if ((transb != PlasmaNoTrans) && (transb != PlasmaTrans) && (transb != PlasmaConjTrans)) { plasma_error("illegal value of transb"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(A) != PlasmaSuccess) { plasma_error("invalid A"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_error("invalid B"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (plasma_desc_check(C) != PlasmaSuccess) { plasma_error("invalid C"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (sequence == NULL) { plasma_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return int k = transa == PlasmaNoTrans ? A.n : A.m; if (C.m == 0 || C.n == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0)) return; // Call the parallel function. plasma_pcgemm(transa, transb, alpha, A, B, beta, C, sequence, request); }
image.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % IIIII M M AAA GGGG EEEEE % % I MM MM A A G E % % I M M M AAAAA G GG EEE % % I M M A A G G E % % IIIII M M A A GGGG EEEEE % % % % % % MagickCore Image Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/animate.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/blob-private.h" #include "MagickCore/cache.h" #include "MagickCore/cache-private.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/client.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/composite-private.h" #include "MagickCore/compress.h" #include "MagickCore/constitute.h" #include "MagickCore/delegate.h" #include "MagickCore/display.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/magic.h" #include "MagickCore/magick.h" #include "MagickCore/magick-private.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/module.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/property.h" #include "MagickCore/quantize.h" #include "MagickCore/random_.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/semaphore.h" #include "MagickCore/signature-private.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/timer.h" #include "MagickCore/timer-private.h" #include "MagickCore/token.h" #include "MagickCore/token-private.h" #include "MagickCore/utility.h" #include "MagickCore/utility-private.h" #include "MagickCore/version.h" #include "MagickCore/xwindow-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImage() returns a pointer to an image structure initialized to % default values. % % The format of the AcquireImage method is: % % Image *AcquireImage(const ImageInfo *image_info,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AcquireImage(const ImageInfo *image_info, ExceptionInfo *exception) { const char *option; Image *image; MagickStatusType flags; /* Allocate image structure. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); image=(Image *) AcquireCriticalMemory(sizeof(*image)); (void) memset(image,0,sizeof(*image)); /* Initialize Image structure. */ (void) CopyMagickString(image->magick,"MIFF",MagickPathExtent); image->storage_class=DirectClass; image->depth=MAGICKCORE_QUANTUM_DEPTH; image->colorspace=sRGBColorspace; image->rendering_intent=PerceptualIntent; image->gamma=1.000f/2.200f; image->chromaticity.red_primary.x=0.6400f; image->chromaticity.red_primary.y=0.3300f; image->chromaticity.red_primary.z=0.0300f; image->chromaticity.green_primary.x=0.3000f; image->chromaticity.green_primary.y=0.6000f; image->chromaticity.green_primary.z=0.1000f; image->chromaticity.blue_primary.x=0.1500f; image->chromaticity.blue_primary.y=0.0600f; image->chromaticity.blue_primary.z=0.7900f; image->chromaticity.white_point.x=0.3127f; image->chromaticity.white_point.y=0.3290f; image->chromaticity.white_point.z=0.3583f; image->interlace=NoInterlace; image->ticks_per_second=UndefinedTicksPerSecond; image->compose=OverCompositeOp; (void) QueryColorCompliance(MatteColor,AllCompliance,&image->matte_color, exception); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance,&image->border_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image->transparent_color,exception); GetTimerInfo(&image->timer); image->cache=AcquirePixelCache(0); image->channel_mask=DefaultChannels; image->channel_map=AcquirePixelChannelMap(); image->blob=CloneBlobInfo((BlobInfo *) NULL); image->timestamp=GetMagickTime(); image->debug=IsEventLogging(); image->reference_count=1; image->semaphore=AcquireSemaphoreInfo(); image->signature=MagickCoreSignature; if (image_info == (ImageInfo *) NULL) return(image); /* Transfer image info. */ SetBlobExempt(image,image_info->file != (FILE *) NULL ? MagickTrue : MagickFalse); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick_filename,image_info->filename, MagickPathExtent); (void) CopyMagickString(image->magick,image_info->magick,MagickPathExtent); if (image_info->size != (char *) NULL) { (void) ParseAbsoluteGeometry(image_info->size,&image->extract_info); image->columns=image->extract_info.width; image->rows=image->extract_info.height; image->offset=image->extract_info.x; image->extract_info.x=0; image->extract_info.y=0; } if (image_info->extract != (char *) NULL) { RectangleInfo geometry; (void) memset(&geometry,0,sizeof(geometry)); flags=ParseAbsoluteGeometry(image_info->extract,&geometry); if (((flags & XValue) != 0) || ((flags & YValue) != 0)) { image->extract_info=geometry; Swap(image->columns,image->extract_info.width); Swap(image->rows,image->extract_info.height); } } image->compression=image_info->compression; image->quality=image_info->quality; image->endian=image_info->endian; image->interlace=image_info->interlace; image->units=image_info->units; if (image_info->density != (char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(image_info->density,&geometry_info); if ((flags & RhoValue) != 0) image->resolution.x=geometry_info.rho; image->resolution.y=image->resolution.x; if ((flags & SigmaValue) != 0) image->resolution.y=geometry_info.sigma; } if (image_info->page != (char *) NULL) { char *geometry; image->page=image->extract_info; geometry=GetPageGeometry(image_info->page); (void) ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } if (image_info->depth != 0) image->depth=image_info->depth; image->dither=image_info->dither; image->matte_color=image_info->matte_color; image->background_color=image_info->background_color; image->border_color=image_info->border_color; image->transparent_color=image_info->transparent_color; image->ping=image_info->ping; image->progress_monitor=image_info->progress_monitor; image->client_data=image_info->client_data; if (image_info->cache != (void *) NULL) ClonePixelCacheMethods(image->cache,image_info->cache); /* Set all global options that map to per-image settings. */ (void) SyncImageSettings(image_info,image,exception); /* Global options that are only set for new images. */ option=GetImageOption(image_info,"delay"); if (option != (const char *) NULL) { GeometryInfo geometry_info; flags=ParseGeometry(option,&geometry_info); if ((flags & GreaterValue) != 0) { if ((double) image->delay > floor(geometry_info.rho+0.5)) image->delay=(size_t) CastDoubleToLong(floor( geometry_info.rho+0.5)); } else if ((flags & LessValue) != 0) { if ((double) image->delay < floor(geometry_info.rho+0.5)) image->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } else image->delay=(size_t) CastDoubleToLong(floor( geometry_info.rho+0.5)); if ((flags & SigmaValue) != 0) image->ticks_per_second=CastDoubleToLong(floor( geometry_info.sigma+0.5)); } option=GetImageOption(image_info,"dispose"); if (option != (const char *) NULL) image->dispose=(DisposeType) ParseCommandOption(MagickDisposeOptions, MagickFalse,option); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageInfo() allocates the ImageInfo structure. % % The format of the AcquireImageInfo method is: % % ImageInfo *AcquireImageInfo(void) % */ MagickExport ImageInfo *AcquireImageInfo(void) { ImageInfo *image_info; image_info=(ImageInfo *) AcquireCriticalMemory(sizeof(*image_info)); GetImageInfo(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % The format of the AcquireNextImage method is: % % void AcquireNextImage(const ImageInfo *image_info,Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport void AcquireNextImage(const ImageInfo *image_info,Image *image, ExceptionInfo *exception) { /* Allocate image structure. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->next=AcquireImage(image_info,exception); if (GetNextImageInList(image) == (Image *) NULL) return; (void) CopyMagickString(GetNextImageInList(image)->filename,image->filename, MagickPathExtent); if (image_info != (ImageInfo *) NULL) (void) CopyMagickString(GetNextImageInList(image)->filename, image_info->filename,MagickPathExtent); DestroyBlob(GetNextImageInList(image)); image->next->blob=ReferenceBlob(image->blob); image->next->endian=image->endian; image->next->scene=image->scene+1; image->next->previous=image; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A p p e n d I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AppendImages() takes all images from the current image pointer to the end % of the image list and appends them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting effects how the image is justified in the % final image. % % The format of the AppendImages method is: % % Image *AppendImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AppendImages(const Image *images, const MagickBooleanType stack,ExceptionInfo *exception) { #define AppendImageTag "Append/Image" CacheView *append_view; Image *append_image; MagickBooleanType homogeneous_colorspace, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; register const Image *next; size_t depth, height, number_images, width; ssize_t x_offset, y, y_offset; /* Compute maximum area of appended area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); alpha_trait=images->alpha_trait; number_images=1; width=images->columns; height=images->rows; depth=images->depth; homogeneous_colorspace=MagickTrue; next=GetNextImageInList(images); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->depth > depth) depth=next->depth; if (next->colorspace != images->colorspace) homogeneous_colorspace=MagickFalse; if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; continue; } width+=next->columns; if (next->rows > height) height=next->rows; } /* Append images. */ append_image=CloneImage(images,width,height,MagickTrue,exception); if (append_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(append_image,DirectClass,exception) == MagickFalse) { append_image=DestroyImage(append_image); return((Image *) NULL); } if (homogeneous_colorspace == MagickFalse) (void) SetImageColorspace(append_image,sRGBColorspace,exception); append_image->depth=depth; append_image->alpha_trait=alpha_trait; append_image->page=images->page; (void) SetImageBackgroundColor(append_image,exception); status=MagickTrue; x_offset=0; y_offset=0; next=images; append_view=AcquireAuthenticCacheView(append_image,exception); for (n=0; n < (MagickOffsetType) number_images; n++) { CacheView *image_view; MagickBooleanType proceed; SetGeometry(append_image,&geometry); GravityAdjustGeometry(next->columns,next->rows,next->gravity,&geometry); if (stack != MagickFalse) x_offset-=geometry.x; else y_offset-=geometry.y; image_view=AcquireVirtualCacheView(next,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(next,next,next->rows,1) #endif for (y=0; y < (ssize_t) next->rows; y++) { MagickBooleanType sync; PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,next->columns,1,exception); q=QueueCacheViewAuthenticPixels(append_view,x_offset,y+y_offset, next->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } GetPixelInfo(next,&pixel); for (x=0; x < (ssize_t) next->columns; x++) { GetPixelInfoPixel(next,p,&pixel); SetPixelViaPixelInfo(append_image,&pixel,q); p+=GetPixelChannels(next); q+=GetPixelChannels(append_image); } sync=SyncCacheViewAuthenticPixels(append_view,exception); if (sync == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (stack == MagickFalse) { x_offset+=(ssize_t) next->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) next->rows; } proceed=SetImageProgress(append_image,AppendImageTag,n,number_images); if (proceed == MagickFalse) break; next=GetNextImageInList(next); } append_view=DestroyCacheView(append_view); if (status == MagickFalse) append_image=DestroyImage(append_image); return(append_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C a t c h I m a g e E x c e p t i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CatchImageException() returns if no exceptions are found in the image % sequence, otherwise it determines the most severe exception and reports % it as a warning or error depending on the severity. % % The format of the CatchImageException method is: % % ExceptionType CatchImageException(Image *image) % % A description of each parameter follows: % % o image: An image sequence. % */ MagickExport ExceptionType CatchImageException(Image *image) { ExceptionInfo *exception; ExceptionType severity; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); exception=AcquireExceptionInfo(); CatchException(exception); severity=exception->severity; exception=DestroyExceptionInfo(exception); return(severity); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipImagePath() sets the image clip mask based any clipping path information % if it exists. % % The format of the ClipImagePath method is: % % MagickBooleanType ClipImagePath(Image *image,const char *pathname, % const MagickBooleanType inside,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClipImage(Image *image,ExceptionInfo *exception) { return(ClipImagePath(image,"#1",MagickTrue,exception)); } MagickExport MagickBooleanType ClipImagePath(Image *image,const char *pathname, const MagickBooleanType inside,ExceptionInfo *exception) { #define ClipImagePathTag "ClipPath/Image" char *property; const char *value; Image *clip_mask; ImageInfo *image_info; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(pathname != NULL); property=AcquireString(pathname); (void) FormatLocaleString(property,MagickPathExtent,"8BIM:1999,2998:%s", pathname); value=GetImageProperty(image,property,exception); property=DestroyString(property); if (value == (const char *) NULL) { ThrowFileException(exception,OptionError,"NoClipPathDefined", image->filename); return(MagickFalse); } image_info=AcquireImageInfo(); (void) CopyMagickString(image_info->filename,image->filename, MagickPathExtent); (void) ConcatenateMagickString(image_info->filename,pathname, MagickPathExtent); clip_mask=BlobToImage(image_info,value,strlen(value),exception); image_info=DestroyImageInfo(image_info); if (clip_mask == (Image *) NULL) return(MagickFalse); if (clip_mask->storage_class == PseudoClass) { (void) SyncImage(clip_mask,exception); if (SetImageStorageClass(clip_mask,DirectClass,exception) == MagickFalse) return(MagickFalse); } if (inside == MagickFalse) (void) NegateImage(clip_mask,MagickFalse,exception); (void) FormatLocaleString(clip_mask->magick_filename,MagickPathExtent, "8BIM:1999,2998:%s\nPS",pathname); (void) SetImageMask(image,WritePixelMask,clip_mask,exception); image->mask_trait=UpdatePixelTrait; clip_mask=DestroyImage(clip_mask); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImage() copies an image and returns the copy as a new image object. % % If the specified columns and rows is 0, an exact copy of the image is % returned, otherwise the pixel data is undefined and must be initialized % with the QueueAuthenticPixels() and SyncAuthenticPixels() methods. On % failure, a NULL image is returned and exception describes the reason for the % failure. % % The format of the CloneImage method is: % % Image *CloneImage(const Image *image,const size_t columns, % const size_t rows,const MagickBooleanType orphan, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: the number of columns in the cloned image. % % o rows: the number of rows in the cloned image. % % o detach: With a value other than 0, the cloned image is detached from % its parent I/O stream. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CloneImage(const Image *image,const size_t columns, const size_t rows,const MagickBooleanType detach,ExceptionInfo *exception) { Image *clone_image; double scale; size_t length; /* Clone the image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns == 0) || (image->rows == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),CorruptImageError, "NegativeOrZeroImageSize","`%s'",image->filename); return((Image *) NULL); } clone_image=(Image *) AcquireCriticalMemory(sizeof(*clone_image)); (void) memset(clone_image,0,sizeof(*clone_image)); clone_image->signature=MagickCoreSignature; clone_image->storage_class=image->storage_class; clone_image->number_channels=image->number_channels; clone_image->number_meta_channels=image->number_meta_channels; clone_image->metacontent_extent=image->metacontent_extent; clone_image->colorspace=image->colorspace; clone_image->alpha_trait=image->alpha_trait; clone_image->channels=image->channels; clone_image->mask_trait=image->mask_trait; clone_image->columns=image->columns; clone_image->rows=image->rows; clone_image->dither=image->dither; clone_image->image_info=CloneImageInfo(image->image_info); (void) CloneImageProfiles(clone_image,image); (void) CloneImageProperties(clone_image,image); (void) CloneImageArtifacts(clone_image,image); GetTimerInfo(&clone_image->timer); if (image->ascii85 != (void *) NULL) Ascii85Initialize(clone_image); clone_image->extent=image->extent; clone_image->magick_columns=image->magick_columns; clone_image->magick_rows=image->magick_rows; clone_image->type=image->type; clone_image->channel_mask=image->channel_mask; clone_image->channel_map=ClonePixelChannelMap(image->channel_map); (void) CopyMagickString(clone_image->magick_filename,image->magick_filename, MagickPathExtent); (void) CopyMagickString(clone_image->magick,image->magick,MagickPathExtent); (void) CopyMagickString(clone_image->filename,image->filename, MagickPathExtent); clone_image->progress_monitor=image->progress_monitor; clone_image->client_data=image->client_data; clone_image->reference_count=1; clone_image->next=image->next; clone_image->previous=image->previous; clone_image->list=NewImageList(); if (detach == MagickFalse) clone_image->blob=ReferenceBlob(image->blob); else { clone_image->next=NewImageList(); clone_image->previous=NewImageList(); clone_image->blob=CloneBlobInfo((BlobInfo *) NULL); } clone_image->ping=image->ping; clone_image->debug=IsEventLogging(); clone_image->semaphore=AcquireSemaphoreInfo(); if (image->colormap != (PixelInfo *) NULL) { /* Allocate and copy the image colormap. */ clone_image->colors=image->colors; length=(size_t) image->colors; clone_image->colormap=(PixelInfo *) AcquireQuantumMemory(length+1, sizeof(*clone_image->colormap)); if (clone_image->colormap == (PixelInfo *) NULL) { clone_image=DestroyImage(clone_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memcpy(clone_image->colormap,image->colormap,length* sizeof(*clone_image->colormap)); } if ((columns == 0) || (rows == 0)) { if (image->montage != (char *) NULL) (void) CloneString(&clone_image->montage,image->montage); if (image->directory != (char *) NULL) (void) CloneString(&clone_image->directory,image->directory); clone_image->cache=ReferencePixelCache(image->cache); return(clone_image); } scale=1.0; if (image->columns != 0) scale=(double) columns/(double) image->columns; clone_image->page.width=(size_t) CastDoubleToLong(floor(scale* image->page.width+0.5)); clone_image->page.x=CastDoubleToLong(ceil(scale*image->page.x-0.5)); clone_image->tile_offset.x=CastDoubleToLong(ceil(scale* image->tile_offset.x-0.5)); scale=1.0; if (image->rows != 0) scale=(double) rows/(double) image->rows; clone_image->page.height=(size_t) CastDoubleToLong(floor(scale* image->page.height+0.5)); clone_image->page.y=CastDoubleToLong(ceil(scale*image->page.y-0.5)); clone_image->tile_offset.y=CastDoubleToLong(ceil(scale* image->tile_offset.y-0.5)); clone_image->cache=ClonePixelCache(image->cache); if (SetImageExtent(clone_image,columns,rows,exception) == MagickFalse) clone_image=DestroyImage(clone_image); return(clone_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageInfo() makes a copy of the given image info structure. If % NULL is specified, a new image info structure is created initialized to % default values. % % The format of the CloneImageInfo method is: % % ImageInfo *CloneImageInfo(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *CloneImageInfo(const ImageInfo *image_info) { ImageInfo *clone_info; clone_info=AcquireImageInfo(); if (image_info == (ImageInfo *) NULL) return(clone_info); clone_info->compression=image_info->compression; clone_info->temporary=image_info->temporary; clone_info->adjoin=image_info->adjoin; clone_info->antialias=image_info->antialias; clone_info->scene=image_info->scene; clone_info->number_scenes=image_info->number_scenes; clone_info->depth=image_info->depth; if (image_info->size != (char *) NULL) (void) CloneString(&clone_info->size,image_info->size); if (image_info->extract != (char *) NULL) (void) CloneString(&clone_info->extract,image_info->extract); if (image_info->scenes != (char *) NULL) (void) CloneString(&clone_info->scenes,image_info->scenes); if (image_info->page != (char *) NULL) (void) CloneString(&clone_info->page,image_info->page); clone_info->interlace=image_info->interlace; clone_info->endian=image_info->endian; clone_info->units=image_info->units; clone_info->quality=image_info->quality; if (image_info->sampling_factor != (char *) NULL) (void) CloneString(&clone_info->sampling_factor, image_info->sampling_factor); if (image_info->server_name != (char *) NULL) (void) CloneString(&clone_info->server_name,image_info->server_name); if (image_info->font != (char *) NULL) (void) CloneString(&clone_info->font,image_info->font); if (image_info->texture != (char *) NULL) (void) CloneString(&clone_info->texture,image_info->texture); if (image_info->density != (char *) NULL) (void) CloneString(&clone_info->density,image_info->density); clone_info->pointsize=image_info->pointsize; clone_info->fuzz=image_info->fuzz; clone_info->matte_color=image_info->matte_color; clone_info->background_color=image_info->background_color; clone_info->border_color=image_info->border_color; clone_info->transparent_color=image_info->transparent_color; clone_info->dither=image_info->dither; clone_info->monochrome=image_info->monochrome; clone_info->colorspace=image_info->colorspace; clone_info->type=image_info->type; clone_info->orientation=image_info->orientation; clone_info->ping=image_info->ping; clone_info->verbose=image_info->verbose; clone_info->progress_monitor=image_info->progress_monitor; clone_info->client_data=image_info->client_data; clone_info->cache=image_info->cache; if (image_info->cache != (void *) NULL) clone_info->cache=ReferencePixelCache(image_info->cache); if (image_info->profile != (void *) NULL) clone_info->profile=(void *) CloneStringInfo((StringInfo *) image_info->profile); SetImageInfoFile(clone_info,image_info->file); SetImageInfoBlob(clone_info,image_info->blob,image_info->length); clone_info->stream=image_info->stream; clone_info->custom_stream=image_info->custom_stream; (void) CopyMagickString(clone_info->magick,image_info->magick, MagickPathExtent); (void) CopyMagickString(clone_info->unique,image_info->unique, MagickPathExtent); (void) CopyMagickString(clone_info->filename,image_info->filename, MagickPathExtent); clone_info->channel=image_info->channel; (void) CloneImageOptions(clone_info,image_info); clone_info->debug=IsEventLogging(); clone_info->signature=image_info->signature; return(clone_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o p y I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CopyImagePixels() copies pixels from the source image as defined by the % geometry the destination image at the specified offset. % % The format of the CopyImagePixels method is: % % MagickBooleanType CopyImagePixels(Image *image,const Image *source_image, % const RectangleInfo *geometry,const OffsetInfo *offset, % ExceptionInfo *exception); % % A description of each parameter follows: % % o image: the destination image. % % o source_image: the source image. % % o geometry: define the dimensions of the source pixel rectangle. % % o offset: define the offset in the destination image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CopyImagePixels(Image *image, const Image *source_image,const RectangleInfo *geometry, const OffsetInfo *offset,ExceptionInfo *exception) { #define CopyImageTag "Copy/Image" CacheView *image_view, *source_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(source_image != (Image *) NULL); assert(geometry != (RectangleInfo *) NULL); assert(offset != (OffsetInfo *) NULL); if ((offset->x < 0) || (offset->y < 0) || ((ssize_t) (offset->x+geometry->width) > (ssize_t) image->columns) || ((ssize_t) (offset->y+geometry->height) > (ssize_t) image->rows)) ThrowBinaryException(OptionError,"GeometryDoesNotContainImage", image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); /* Copy image pixels. */ status=MagickTrue; progress=0; source_view=AcquireVirtualCacheView(source_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,source_image,geometry->height,1) #endif for (y=0; y < (ssize_t) geometry->height; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,geometry->x,y+geometry->y, geometry->width,1,exception); q=QueueCacheViewAuthenticPixels(image_view,offset->x,y+offset->y, geometry->width,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) geometry->width; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait source_traits=GetPixelChannelTraits(source_image,channel); if ((traits == UndefinedPixelTrait) || ((traits & UpdatePixelTrait) == 0) || (source_traits == UndefinedPixelTrait)) continue; SetPixelChannel(image,channel,p[i],q); } p+=GetPixelChannels(source_image); q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,CopyImageTag,progress,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } source_view=DestroyCacheView(source_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImage() dereferences an image, deallocating memory associated with % the image if the reference count becomes zero. % % The format of the DestroyImage method is: % % Image *DestroyImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *DestroyImage(Image *image) { MagickBooleanType destroy; /* Dereference image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); destroy=MagickFalse; LockSemaphoreInfo(image->semaphore); image->reference_count--; if (image->reference_count == 0) destroy=MagickTrue; UnlockSemaphoreInfo(image->semaphore); if (destroy == MagickFalse) return((Image *) NULL); /* Destroy image. */ DestroyImagePixels(image); image->channel_map=DestroyPixelChannelMap(image->channel_map); if (image->montage != (char *) NULL) image->montage=DestroyString(image->montage); if (image->directory != (char *) NULL) image->directory=DestroyString(image->directory); if (image->colormap != (PixelInfo *) NULL) image->colormap=(PixelInfo *) RelinquishMagickMemory(image->colormap); if (image->geometry != (char *) NULL) image->geometry=DestroyString(image->geometry); DestroyImageProfiles(image); DestroyImageProperties(image); DestroyImageArtifacts(image); if (image->ascii85 != (Ascii85Info *) NULL) image->ascii85=(Ascii85Info *) RelinquishMagickMemory(image->ascii85); if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); DestroyBlob(image); if (image->semaphore != (SemaphoreInfo *) NULL) RelinquishSemaphoreInfo(&image->semaphore); image->signature=(~MagickCoreSignature); image=(Image *) RelinquishMagickMemory(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageInfo() deallocates memory associated with an ImageInfo % structure. % % The format of the DestroyImageInfo method is: % % ImageInfo *DestroyImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport ImageInfo *DestroyImageInfo(ImageInfo *image_info) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); if (image_info->size != (char *) NULL) image_info->size=DestroyString(image_info->size); if (image_info->extract != (char *) NULL) image_info->extract=DestroyString(image_info->extract); if (image_info->scenes != (char *) NULL) image_info->scenes=DestroyString(image_info->scenes); if (image_info->page != (char *) NULL) image_info->page=DestroyString(image_info->page); if (image_info->sampling_factor != (char *) NULL) image_info->sampling_factor=DestroyString( image_info->sampling_factor); if (image_info->server_name != (char *) NULL) image_info->server_name=DestroyString( image_info->server_name); if (image_info->font != (char *) NULL) image_info->font=DestroyString(image_info->font); if (image_info->texture != (char *) NULL) image_info->texture=DestroyString(image_info->texture); if (image_info->density != (char *) NULL) image_info->density=DestroyString(image_info->density); if (image_info->cache != (void *) NULL) image_info->cache=DestroyPixelCache(image_info->cache); if (image_info->profile != (StringInfo *) NULL) image_info->profile=(void *) DestroyStringInfo((StringInfo *) image_info->profile); DestroyImageOptions(image_info); image_info->signature=(~MagickCoreSignature); image_info=(ImageInfo *) RelinquishMagickMemory(image_info); return(image_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D i s a s s o c i a t e I m a g e S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DisassociateImageStream() disassociates the image stream. It checks if the % blob of the specified image is referenced by other images. If the reference % count is higher then 1 a new blob is assigned to the specified image. % % The format of the DisassociateImageStream method is: % % void DisassociateImageStream(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DisassociateImageStream(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); DisassociateBlob(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfo() initializes image_info to default values. % % The format of the GetImageInfo method is: % % void GetImageInfo(ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport void GetImageInfo(ImageInfo *image_info) { char *synchronize; ExceptionInfo *exception; /* File and image dimension members. */ (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info != (ImageInfo *) NULL); (void) memset(image_info,0,sizeof(*image_info)); image_info->adjoin=MagickTrue; image_info->interlace=NoInterlace; image_info->channel=DefaultChannels; image_info->quality=UndefinedCompressionQuality; image_info->antialias=MagickTrue; image_info->dither=MagickTrue; synchronize=GetEnvironmentValue("MAGICK_SYNCHRONIZE"); if (synchronize != (const char *) NULL) { image_info->synchronize=IsStringTrue(synchronize); synchronize=DestroyString(synchronize); } exception=AcquireExceptionInfo(); (void) QueryColorCompliance(BackgroundColor,AllCompliance, &image_info->background_color,exception); (void) QueryColorCompliance(BorderColor,AllCompliance, &image_info->border_color,exception); (void) QueryColorCompliance(MatteColor,AllCompliance,&image_info->matte_color, exception); (void) QueryColorCompliance(TransparentColor,AllCompliance, &image_info->transparent_color,exception); exception=DestroyExceptionInfo(exception); image_info->debug=IsEventLogging(); image_info->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageInfoFile() returns the image info file member. % % The format of the GetImageInfoFile method is: % % FILE *GetImageInfoFile(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: the image info. % */ MagickExport FILE *GetImageInfoFile(const ImageInfo *image_info) { return(image_info->file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageMask() returns the mask associated with the image. % % The format of the GetImageMask method is: % % Image *GetImageMask(const Image *image,const PixelMask type, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % */ MagickExport Image *GetImageMask(const Image *image,const PixelMask type, ExceptionInfo *exception) { CacheView *mask_view, *image_view; Image *mask_image; MagickBooleanType status; ssize_t y; /* Get image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); switch (type) { case ReadPixelMask: { if ((image->channels & ReadMaskChannel) == 0) return((Image *) NULL); break; } case WritePixelMask: { if ((image->channels & WriteMaskChannel) == 0) return((Image *) NULL); break; } default: { if ((image->channels & CompositeMaskChannel) == 0) return((Image *) NULL); break; } } mask_image=AcquireImage((ImageInfo *) NULL,exception); status=SetImageExtent(mask_image,image->columns,image->rows,exception); if (status == MagickFalse) return(DestroyImage(mask_image)); status=MagickTrue; mask_image->alpha_trait=UndefinedPixelTrait; (void) SetImageColorspace(mask_image,GRAYColorspace,exception); image_view=AcquireVirtualCacheView(image,exception); mask_view=AcquireAuthenticCacheView(mask_image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=GetCacheViewAuthenticPixels(mask_view,0,y,mask_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { switch (type) { case ReadPixelMask: { SetPixelGray(mask_image,GetPixelReadMask(image,p),q); break; } case WritePixelMask: { SetPixelGray(mask_image,GetPixelWriteMask(image,p),q); break; } default: { SetPixelGray(mask_image,GetPixelCompositeMask(image,p),q); break; } } p+=GetPixelChannels(image); q+=GetPixelChannels(mask_image); } if (SyncCacheViewAuthenticPixels(mask_view,exception) == MagickFalse) status=MagickFalse; } mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) mask_image=DestroyImage(mask_image); return(mask_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e R e f e r e n c e C o u n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageReferenceCount() returns the image reference count. % % The format of the GetReferenceCount method is: % % ssize_t GetImageReferenceCount(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport ssize_t GetImageReferenceCount(Image *image) { ssize_t reference_count; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); LockSemaphoreInfo(image->semaphore); reference_count=image->reference_count; UnlockSemaphoreInfo(image->semaphore); return(reference_count); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageVirtualPixelMethod() gets the "virtual pixels" method for the % image. A virtual pixel is any pixel access that is outside the boundaries % of the image cache. % % The format of the GetImageVirtualPixelMethod() method is: % % VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport VirtualPixelMethod GetImageVirtualPixelMethod(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(GetPixelCacheVirtualMethod(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageFilename() interprets embedded characters in an image filename. % The filename length is returned. % % The format of the InterpretImageFilename method is: % % size_t InterpretImageFilename(const ImageInfo *image_info,Image *image, % const char *format,int value,char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o image_info: the image info.. % % o image: the image. % % o format: A filename describing the format to use to write the numeric % argument. Only the first numeric format identifier is replaced. % % o value: Numeric value to substitute into format filename. % % o filename: return the formatted filename in this character buffer. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t InterpretImageFilename(const ImageInfo *image_info, Image *image,const char *format,int value,char *filename, ExceptionInfo *exception) { char *q; int c; MagickBooleanType canonical; register const char *p; ssize_t field_width, offset; canonical=MagickFalse; offset=0; (void) CopyMagickString(filename,format,MagickPathExtent); for (p=strchr(format,'%'); p != (char *) NULL; p=strchr(p+1,'%')) { q=(char *) p+1; if (*q == '%') { p=q+1; continue; } field_width=0; if (*q == '0') field_width=(ssize_t) strtol(q,&q,10); switch (*q) { case 'd': case 'o': case 'x': { q++; c=(*q); *q='\0'; (void) FormatLocaleString(filename+(p-format-offset),(size_t) (MagickPathExtent-(p-format-offset)),p,value); offset+=(4-field_width); *q=c; (void) ConcatenateMagickString(filename,q,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } case '[': { char pattern[MagickPathExtent]; const char *option; register char *r; register ssize_t i; ssize_t depth; /* Image option. */ if (strchr(p,']') == (char *) NULL) break; depth=1; r=q+1; for (i=0; (i < (MagickPathExtent-1L)) && (*r != '\0'); i++) { if (*r == '[') depth++; if (*r == ']') depth--; if (depth <= 0) break; pattern[i]=(*r++); } pattern[i]='\0'; if (LocaleNCompare(pattern,"filename:",9) != 0) break; option=(const char *) NULL; if (image != (Image *) NULL) option=GetImageProperty(image,pattern,exception); if ((option == (const char *) NULL) && (image != (Image *) NULL)) option=GetImageArtifact(image,pattern); if ((option == (const char *) NULL) && (image_info != (ImageInfo *) NULL)) option=GetImageOption(image_info,pattern); if (option == (const char *) NULL) break; q--; c=(*q); *q='\0'; (void) CopyMagickString(filename+(p-format-offset),option,(size_t) (MagickPathExtent-(p-format-offset))); offset+=strlen(pattern)-strlen(option)+3; *q=c; (void) ConcatenateMagickString(filename,r+1,MagickPathExtent); canonical=MagickTrue; if (*(q-1) != '%') break; p++; break; } default: break; } } if (canonical == MagickFalse) (void) CopyMagickString(filename,format,MagickPathExtent); else for (q=filename; *q != '\0'; q++) if ((*q == '%') && (*(q+1) == '%')) (void) CopyMagickString(q,q+1,(size_t) (MagickPathExtent-(q-filename))); return(strlen(filename)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s H i g h D y n a m i c R a n g e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsHighDynamicRangeImage() returns MagickTrue if any pixel component is % non-integer or exceeds the bounds of the quantum depth (e.g. for Q16 % 0..65535. % % The format of the IsHighDynamicRangeImage method is: % % MagickBooleanType IsHighDynamicRangeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsHighDynamicRangeImage(const Image *image, ExceptionInfo *exception) { #if !defined(MAGICKCORE_HDRI_SUPPORT) (void) image; (void) exception; return(MagickFalse); #else CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); status=MagickTrue; image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double pixel; PixelTrait traits; traits=GetPixelChannelTraits(image,(PixelChannel) i); if (traits == UndefinedPixelTrait) continue; pixel=(double) p[i]; if ((pixel < 0.0) || (pixel > QuantumRange) || (pixel != (double) ((QuantumAny) pixel))) break; } p+=GetPixelChannels(image); if (i < (ssize_t) GetPixelChannels(image)) status=MagickFalse; } if (x < (ssize_t) image->columns) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status != MagickFalse ? MagickFalse : MagickTrue); #endif } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s I m a g e O b j e c t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsImageObject() returns MagickTrue if the image sequence contains a valid % set of image objects. % % The format of the IsImageObject method is: % % MagickBooleanType IsImageObject(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsImageObject(const Image *image) { register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) if (p->signature != MagickCoreSignature) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s T a i n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsTaintImage() returns MagickTrue any pixel in the image has been altered % since it was first constituted. % % The format of the IsTaintImage method is: % % MagickBooleanType IsTaintImage(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType IsTaintImage(const Image *image) { char magick[MagickPathExtent], filename[MagickPathExtent]; register const Image *p; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); (void) CopyMagickString(magick,image->magick,MagickPathExtent); (void) CopyMagickString(filename,image->filename,MagickPathExtent); for (p=image; p != (Image *) NULL; p=GetNextImageInList(p)) { if (p->taint != MagickFalse) return(MagickTrue); if (LocaleCompare(p->magick,magick) != 0) return(MagickTrue); if (LocaleCompare(p->filename,filename) != 0) return(MagickTrue); } return(MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d i f y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModifyImage() ensures that there is only a single reference to the image % to be modified, updating the provided image pointer to point to a clone of % the original image if necessary. % % The format of the ModifyImage method is: % % MagickBooleanType ModifyImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ModifyImage(Image **image, ExceptionInfo *exception) { Image *clone_image; assert(image != (Image **) NULL); assert(*image != (Image *) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); if (GetImageReferenceCount(*image) <= 1) return(MagickTrue); clone_image=CloneImage(*image,0,0,MagickTrue,exception); LockSemaphoreInfo((*image)->semaphore); (*image)->reference_count--; UnlockSemaphoreInfo((*image)->semaphore); *image=clone_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e w M a g i c k I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NewMagickImage() creates a blank image canvas of the specified size and % background color. % % The format of the NewMagickImage method is: % % Image *NewMagickImage(const ImageInfo *image_info,const size_t width, % const size_t height,const PixelInfo *background, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the image width. % % o height: the image height. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *NewMagickImage(const ImageInfo *image_info, const size_t width,const size_t height,const PixelInfo *background, ExceptionInfo *exception) { CacheView *image_view; Image *image; MagickBooleanType status; ssize_t y; assert(image_info != (const ImageInfo *) NULL); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image_info->signature == MagickCoreSignature); assert(background != (const PixelInfo *) NULL); image=AcquireImage(image_info,exception); image->columns=width; image->rows=height; image->colorspace=background->colorspace; image->alpha_trait=background->alpha_trait; image->fuzz=background->fuzz; image->depth=background->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); if (status == MagickFalse) image=DestroyImage(image); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e f e r e n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReferenceImage() increments the reference count associated with an image % returning a pointer to the image. % % The format of the ReferenceImage method is: % % Image *ReferenceImage(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport Image *ReferenceImage(Image *image) { assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); LockSemaphoreInfo(image->semaphore); image->reference_count++; UnlockSemaphoreInfo(image->semaphore); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePage() resets the image page canvas and position. % % The format of the ResetImagePage method is: % % MagickBooleanType ResetImagePage(Image *image,const char *page) % % A description of each parameter follows: % % o image: the image. % % o page: the relative page specification. % */ MagickExport MagickBooleanType ResetImagePage(Image *image,const char *page) { MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); flags=ParseAbsoluteGeometry(page,&geometry); if ((flags & WidthValue) != 0) { if ((flags & HeightValue) == 0) geometry.height=geometry.width; image->page.width=geometry.width; image->page.height=geometry.height; } if ((flags & AspectValue) != 0) { if ((flags & XValue) != 0) image->page.x+=geometry.x; if ((flags & YValue) != 0) image->page.y+=geometry.y; } else { if ((flags & XValue) != 0) { image->page.x=geometry.x; if ((image->page.width == 0) && (geometry.x > 0)) image->page.width=image->columns+geometry.x; } if ((flags & YValue) != 0) { image->page.y=geometry.y; if ((image->page.height == 0) && (geometry.y > 0)) image->page.height=image->rows+geometry.y; } } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImagePixels() reset the image pixels, that is, all the pixel components % are zereod. % % The format of the SetImage method is: % % MagickBooleanType ResetImagePixels(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ResetImagePixels(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; size_t length; ssize_t y; void *pixels; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); pixels=AcquirePixelCachePixels(image,&length,exception); if (pixels != (void *) NULL) { /* Reset in-core image pixels. */ (void) memset(pixels,0,length); return(MagickTrue); } /* Reset image pixels. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { (void) memset(q,0,GetPixelChannels(image)*sizeof(Quantum)); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A l p h a % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAlpha() sets the alpha levels of the image. % % The format of the SetImageAlpha method is: % % MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o alpha: the level of transparency: 0 is fully transparent and QuantumRange % is fully opaque. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageAlpha(Image *image,const Quantum alpha, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); image->alpha_trait=BlendPixelTrait; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelWriteMask(image,q) > (QuantumRange/2)) SetPixelAlpha(image,alpha,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e B a c k g r o u n d C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageBackgroundColor() initializes the image pixels to the image % background color. The background color is defined by the background_color % member of the image structure. % % The format of the SetImage method is: % % MagickBooleanType SetImageBackgroundColor(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageBackgroundColor(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; PixelInfo background; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if ((image->background_color.alpha_trait != UndefinedPixelTrait) && (image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlphaChannel(image,OnAlphaChannel,exception); ConformPixelInfo(image,&image->background_color,&background,exception); /* Set image background color. */ status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,&background,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelMask() sets the image channel mask from the specified channel % mask. % % The format of the SetImageChannelMask method is: % % ChannelType SetImageChannelMask(Image *image, % const ChannelType channel_mask) % % A description of each parameter follows: % % o image: the image. % % o channel_mask: the channel mask. % */ MagickExport ChannelType SetImageChannelMask(Image *image, const ChannelType channel_mask) { return(SetPixelChannelMask(image,channel_mask)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageColor() set the entire image canvas to the specified color. % % The format of the SetImageColor method is: % % MagickBooleanType SetImageColor(Image *image,const PixelInfo *color, % ExeptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o background: the image color. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageColor(Image *image, const PixelInfo *color,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); assert(color != (const PixelInfo *) NULL); image->colorspace=color->colorspace; image->alpha_trait=color->alpha_trait; image->fuzz=color->fuzz; image->depth=color->depth; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=QueueCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelViaPixelInfo(image,color,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e S t o r a g e C l a s s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageStorageClass() sets the image class: DirectClass for true color % images or PseudoClass for colormapped images. % % The format of the SetImageStorageClass method is: % % MagickBooleanType SetImageStorageClass(Image *image, % const ClassType storage_class,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o storage_class: The image class. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageStorageClass(Image *image, const ClassType storage_class,ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image->storage_class=storage_class; return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e E x t e n t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageExtent() sets the image size (i.e. columns & rows). % % The format of the SetImageExtent method is: % % MagickBooleanType SetImageExtent(Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: The image width in pixels. % % o rows: The image height in pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageExtent(Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { if ((columns == 0) || (rows == 0)) ThrowBinaryException(ImageError,"NegativeOrZeroImageSize",image->filename); image->columns=columns; image->rows=rows; if (image->depth == 0) { image->depth=8; (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageDepthNotSupported","`%s'",image->filename); } if (image->depth > (8*sizeof(MagickSizeType))) { image->depth=8*sizeof(MagickSizeType); (void) ThrowMagickException(exception,GetMagickModule(),ImageError, "ImageDepthNotSupported","`%s'",image->filename); } return(SyncImagePixelCache(image,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t I m a g e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfo() initializes the 'magick' field of the ImageInfo structure. % It is set to a type of image format based on the prefix or suffix of the % filename. For example, 'ps:image' returns PS indicating a Postscript image. % JPEG is returned for this filename: 'image.jpg'. The filename prefix has % precendence over the suffix. Use an optional index enclosed in brackets % after a file name to specify a desired scene of a multi-resolution image % format like Photo CD (e.g. img0001.pcd[4]). A True (non-zero) return value % indicates success. % % The format of the SetImageInfo method is: % % MagickBooleanType SetImageInfo(ImageInfo *image_info, % const unsigned int frames,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o frames: the number of images you intend to write. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageInfo(ImageInfo *image_info, const unsigned int frames,ExceptionInfo *exception) { char component[MagickPathExtent], magic[MagickPathExtent], #if defined(MAGICKCORE_ZLIB_DELEGATE) || defined(MAGICKCORE_BZLIB_DELEGATE) path[MagickPathExtent], #endif *q; const MagicInfo *magic_info; const MagickInfo *magick_info; ExceptionInfo *sans_exception; Image *image; MagickBooleanType status; register const char *p; ssize_t count; /* Look for 'image.format' in filename. */ assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); *component='\0'; GetPathComponent(image_info->filename,SubimagePath,component); if (*component != '\0') { /* Look for scene specification (e.g. img0001.pcd[4]). */ if (IsSceneGeometry(component,MagickFalse) == MagickFalse) { if (IsGeometry(component) != MagickFalse) (void) CloneString(&image_info->extract,component); } else { size_t first, last; (void) CloneString(&image_info->scenes,component); image_info->scene=StringToUnsignedLong(image_info->scenes); image_info->number_scenes=image_info->scene; p=image_info->scenes; for (q=(char *) image_info->scenes; *q != '\0'; p++) { while ((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) p++; first=(size_t) strtol(p,&q,10); last=first; while (isspace((int) ((unsigned char) *q)) != 0) q++; if (*q == '-') last=(size_t) strtol(q+1,&q,10); if (first > last) Swap(first,last); if (first < image_info->scene) image_info->scene=first; if (last > image_info->number_scenes) image_info->number_scenes=last; p=q; } image_info->number_scenes-=image_info->scene-1; } } *component='\0'; if (*image_info->magick == '\0') GetPathComponent(image_info->filename,ExtensionPath,component); #if defined(MAGICKCORE_ZLIB_DELEGATE) if (*component != '\0') if ((LocaleCompare(component,"gz") == 0) || (LocaleCompare(component,"Z") == 0) || (LocaleCompare(component,"svgz") == 0) || (LocaleCompare(component,"wmz") == 0)) { (void) CopyMagickString(path,image_info->filename,MagickPathExtent); path[strlen(path)-strlen(component)-1]='\0'; GetPathComponent(path,ExtensionPath,component); } #endif #if defined(MAGICKCORE_BZLIB_DELEGATE) if (*component != '\0') if (LocaleCompare(component,"bz2") == 0) { (void) CopyMagickString(path,image_info->filename,MagickPathExtent); path[strlen(path)-strlen(component)-1]='\0'; GetPathComponent(path,ExtensionPath,component); } #endif image_info->affirm=MagickFalse; sans_exception=AcquireExceptionInfo(); if ((*component != '\0') && (IsGlob(component) == MagickFalse)) { MagickFormatType format_type; register ssize_t i; static const char *format_type_formats[] = { "AUTOTRACE", "BROWSE", "DCRAW", "EDIT", "LAUNCH", "MPEG:DECODE", "MPEG:ENCODE", "PRINT", "PS:ALPHA", "PS:CMYK", "PS:COLOR", "PS:GRAY", "PS:MONO", "SCAN", "SHOW", "WIN", (char *) NULL }; /* User specified image format. */ (void) CopyMagickString(magic,component,MagickPathExtent); LocaleUpper(magic); /* Look for explicit image formats. */ format_type=UndefinedFormatType; magick_info=GetMagickInfo(magic,sans_exception); if ((magick_info != (const MagickInfo *) NULL) && (magick_info->format_type != UndefinedFormatType)) format_type=magick_info->format_type; i=0; while ((format_type == UndefinedFormatType) && (format_type_formats[i] != (char *) NULL)) { if ((*magic == *format_type_formats[i]) && (LocaleCompare(magic,format_type_formats[i]) == 0)) format_type=ExplicitFormatType; i++; } if (format_type == UndefinedFormatType) (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); else if (format_type == ExplicitFormatType) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); } if (LocaleCompare(magic,"RGB") == 0) image_info->affirm=MagickFalse; /* maybe SGI disguised as RGB */ } /* Look for explicit 'format:image' in filename. */ *magic='\0'; GetPathComponent(image_info->filename,MagickPath,magic); if (*magic == '\0') { (void) CopyMagickString(magic,image_info->magick,MagickPathExtent); magick_info=GetMagickInfo(magic,sans_exception); if (frames == 0) GetPathComponent(image_info->filename,CanonicalPath,component); else GetPathComponent(image_info->filename,SubcanonicalPath,component); (void) CopyMagickString(image_info->filename,component,MagickPathExtent); } else { const DelegateInfo *delegate_info; /* User specified image format. */ LocaleUpper(magic); magick_info=GetMagickInfo(magic,sans_exception); delegate_info=GetDelegateInfo(magic,"*",sans_exception); if (delegate_info == (const DelegateInfo *) NULL) delegate_info=GetDelegateInfo("*",magic,sans_exception); if (((magick_info != (const MagickInfo *) NULL) || (delegate_info != (const DelegateInfo *) NULL)) && (IsMagickConflict(magic) == MagickFalse)) { image_info->affirm=MagickTrue; (void) CopyMagickString(image_info->magick,magic,MagickPathExtent); GetPathComponent(image_info->filename,CanonicalPath,component); (void) CopyMagickString(image_info->filename,component, MagickPathExtent); } } sans_exception=DestroyExceptionInfo(sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; if ((image_info->adjoin != MagickFalse) && (frames > 1)) { /* Test for multiple image support (e.g. image%02d.png). */ (void) InterpretImageFilename(image_info,(Image *) NULL, image_info->filename,(int) image_info->scene,component,exception); if ((LocaleCompare(component,image_info->filename) != 0) && (strchr(component,'%') == (char *) NULL)) image_info->adjoin=MagickFalse; } if ((image_info->adjoin != MagickFalse) && (frames > 0)) { /* Some image formats do not support multiple frames per file. */ magick_info=GetMagickInfo(magic,exception); if (magick_info != (const MagickInfo *) NULL) if (GetMagickAdjoin(magick_info) == MagickFalse) image_info->adjoin=MagickFalse; } if (image_info->affirm != MagickFalse) return(MagickTrue); if (frames == 0) { unsigned char *magick; size_t magick_size; /* Determine the image format from the first few bytes of the file. */ magick_size=GetMagicPatternExtent(exception); if (magick_size == 0) return(MagickFalse); image=AcquireImage(image_info,exception); (void) CopyMagickString(image->filename,image_info->filename, MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { image=DestroyImage(image); return(MagickFalse); } if ((IsBlobSeekable(image) == MagickFalse) || (IsBlobExempt(image) != MagickFalse)) { /* Copy image to seekable temporary file. */ *component='\0'; status=ImageToFile(image,component,exception); (void) CloseBlob(image); if (status == MagickFalse) { (void) RelinquishUniqueFileResource(component); image=DestroyImage(image); return(MagickFalse); } SetImageInfoFile(image_info,(FILE *) NULL); (void) CopyMagickString(image->filename,component,MagickPathExtent); status=OpenBlob(image_info,image,ReadBinaryBlobMode,exception); if (status == MagickFalse) { (void) RelinquishUniqueFileResource(component); image=DestroyImage(image); return(MagickFalse); } (void) CopyMagickString(image_info->filename,component, MagickPathExtent); image_info->temporary=MagickTrue; } magick=(unsigned char *) AcquireQuantumMemory(1,magick_size); if (magick == (unsigned char *) NULL) { (void) CloseBlob(image); image=DestroyImage(image); return(MagickFalse); } (void) memset(magick,0,magick_size); count=ReadBlob(image,magick_size,magick); (void) SeekBlob(image,-((MagickOffsetType) count),SEEK_CUR); (void) CloseBlob(image); image=DestroyImage(image); /* Check magic cache. */ sans_exception=AcquireExceptionInfo(); magic_info=GetMagicInfo(magick,(size_t) count,sans_exception); magick=(unsigned char *) RelinquishMagickMemory(magick); if ((magic_info != (const MagicInfo *) NULL) && (GetMagicName(magic_info) != (char *) NULL)) { /* Try to use magick_info that was determined earlier by the extension */ if ((magick_info != (const MagickInfo *) NULL) && (GetMagickUseExtension(magick_info) != MagickFalse) && (LocaleCompare(magick_info->magick_module,GetMagicName( magic_info)) == 0)) (void) CopyMagickString(image_info->magick,magick_info->name, MagickPathExtent); else { (void) CopyMagickString(image_info->magick,GetMagicName( magic_info),MagickPathExtent); magick_info=GetMagickInfo(image_info->magick,sans_exception); } if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); return(MagickTrue); } magick_info=GetMagickInfo(image_info->magick,sans_exception); if ((magick_info == (const MagickInfo *) NULL) || (GetMagickEndianSupport(magick_info) == MagickFalse)) image_info->endian=UndefinedEndian; sans_exception=DestroyExceptionInfo(sans_exception); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoBlob() sets the image info blob member. % % The format of the SetImageInfoBlob method is: % % void SetImageInfoBlob(ImageInfo *image_info,const void *blob, % const size_t length) % % A description of each parameter follows: % % o image_info: the image info. % % o blob: the blob. % % o length: the blob length. % */ MagickExport void SetImageInfoBlob(ImageInfo *image_info,const void *blob, const size_t length) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->blob=(void *) blob; image_info->length=length; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o C u s t o m S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoCustomStream() sets the image info custom stream handlers. % % The format of the SetImageInfoCustomStream method is: % % void SetImageInfoCustomStream(ImageInfo *image_info, % CustomStreamInfo *custom_stream) % % A description of each parameter follows: % % o image_info: the image info. % % o custom_stream: your custom stream methods. % */ MagickExport void SetImageInfoCustomStream(ImageInfo *image_info, CustomStreamInfo *custom_stream) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->custom_stream=(CustomStreamInfo *) custom_stream; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e I n f o F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageInfoFile() sets the image info file member. % % The format of the SetImageInfoFile method is: % % void SetImageInfoFile(ImageInfo *image_info,FILE *file) % % A description of each parameter follows: % % o image_info: the image info. % % o file: the file. % */ MagickExport void SetImageInfoFile(ImageInfo *image_info,FILE *file) { assert(image_info != (ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); if (image_info->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", image_info->filename); image_info->file=file; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageMask() associates a mask with the image. The mask must be the same % dimensions as the image. % % The format of the SetImageMask method is: % % MagickBooleanType SetImageMask(Image *image,const PixelMask type, % const Image *mask,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % % o mask: the image mask. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageMask(Image *image,const PixelMask type, const Image *mask,ExceptionInfo *exception) { CacheView *mask_view, *image_view; MagickBooleanType status; ssize_t y; /* Set image mask. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (mask == (const Image *) NULL) { switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels & ~ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels & ~WriteMaskChannel); } default: { image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel); break; } } return(SyncImagePixelCache(image,exception)); } switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels | ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels | WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels | CompositeMaskChannel); break; } } if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; image->mask_trait=UpdatePixelTrait; mask_view=AcquireVirtualCacheView(mask,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(mask,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(mask_view,0,y,mask->columns,1,exception); q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity; intensity=0.0; if ((x < (ssize_t) mask->columns) && (y < (ssize_t) mask->rows)) intensity=GetPixelIntensity(mask,p); switch (type) { case ReadPixelMask: { SetPixelReadMask(image,ClampToQuantum(intensity),q); break; } case WritePixelMask: { SetPixelWriteMask(image,ClampToQuantum(intensity),q); break; } default: { SetPixelCompositeMask(image,ClampToQuantum(intensity),q); break; } } p+=GetPixelChannels(mask); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image->mask_trait=UndefinedPixelTrait; mask_view=DestroyCacheView(mask_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e R e g i o n M a s k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageRegionMask() associates a mask with the image as defined by the % specified region. % % The format of the SetImageRegionMask method is: % % MagickBooleanType SetImageRegionMask(Image *image,const PixelMask type, % const RectangleInfo *region,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o type: the mask type, ReadPixelMask or WritePixelMask. % % o geometry: the mask region. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SetImageRegionMask(Image *image, const PixelMask type,const RectangleInfo *region,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; /* Set image mask as defined by the region. */ assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (region == (const RectangleInfo *) NULL) { switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels & ~ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels & ~WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels & ~CompositeMaskChannel); break; } } return(SyncImagePixelCache(image,exception)); } switch (type) { case ReadPixelMask: { image->channels=(ChannelType) (image->channels | ReadMaskChannel); break; } case WritePixelMask: { image->channels=(ChannelType) (image->channels | WriteMaskChannel); break; } default: { image->channels=(ChannelType) (image->channels | CompositeMaskChannel); break; } } if (SyncImagePixelCache(image,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; image->mask_trait=UpdatePixelTrait; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { Quantum pixel; pixel=QuantumRange; if (((x >= region->x) && (x < (region->x+(ssize_t) region->width))) && ((y >= region->y) && (y < (region->y+(ssize_t) region->height)))) pixel=(Quantum) 0; switch (type) { case ReadPixelMask: { SetPixelReadMask(image,pixel,q); break; } case WritePixelMask: { SetPixelWriteMask(image,pixel,q); break; } default: { SetPixelCompositeMask(image,pixel,q); break; } } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image->mask_trait=UndefinedPixelTrait; image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e V i r t u a l P i x e l M e t h o d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageVirtualPixelMethod() sets the "virtual pixels" method for the % image and returns the previous setting. A virtual pixel is any pixel access % that is outside the boundaries of the image cache. % % The format of the SetImageVirtualPixelMethod() method is: % % VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, % const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o virtual_pixel_method: choose the type of virtual pixel. % % o exception: return any errors or warnings in this structure. % */ MagickExport VirtualPixelMethod SetImageVirtualPixelMethod(Image *image, const VirtualPixelMethod virtual_pixel_method,ExceptionInfo *exception) { assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); return(SetPixelCacheVirtualMethod(image,virtual_pixel_method,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S m u s h I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SmushImages() takes all images from the current image pointer to the end % of the image list and smushes them to each other top-to-bottom if the % stack parameter is true, otherwise left-to-right. % % The current gravity setting now effects how the image is justified in the % final image. % % The format of the SmushImages method is: % % Image *SmushImages(const Image *images,const MagickBooleanType stack, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o stack: A value other than 0 stacks the images top-to-bottom. % % o offset: minimum distance in pixels between images. % % o exception: return any errors or warnings in this structure. % */ static ssize_t SmushXGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *left_view, *right_view; const Image *left_image, *right_image; RectangleInfo left_geometry, right_geometry; register const Quantum *p; register ssize_t i, y; size_t gap; ssize_t x; if (images->previous == (Image *) NULL) return(0); right_image=images; SetGeometry(smush_image,&right_geometry); GravityAdjustGeometry(right_image->columns,right_image->rows, right_image->gravity,&right_geometry); left_image=images->previous; SetGeometry(smush_image,&left_geometry); GravityAdjustGeometry(left_image->columns,left_image->rows, left_image->gravity,&left_geometry); gap=right_image->columns; left_view=AcquireVirtualCacheView(left_image,exception); right_view=AcquireVirtualCacheView(right_image,exception); for (y=0; y < (ssize_t) smush_image->rows; y++) { for (x=(ssize_t) left_image->columns-1; x > 0; x--) { p=GetCacheViewVirtualPixels(left_view,x,left_geometry.y+y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(left_image,p) != TransparentAlpha) || ((left_image->columns-x-1) >= gap)) break; } i=(ssize_t) left_image->columns-x-1; for (x=0; x < (ssize_t) right_image->columns; x++) { p=GetCacheViewVirtualPixels(right_view,x,right_geometry.y+y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(right_image,p) != TransparentAlpha) || ((x+i) >= (ssize_t) gap)) break; } if ((x+i) < (ssize_t) gap) gap=(size_t) (x+i); } right_view=DestroyCacheView(right_view); left_view=DestroyCacheView(left_view); if (y < (ssize_t) smush_image->rows) return(offset); return((ssize_t) gap-offset); } static ssize_t SmushYGap(const Image *smush_image,const Image *images, const ssize_t offset,ExceptionInfo *exception) { CacheView *bottom_view, *top_view; const Image *bottom_image, *top_image; RectangleInfo bottom_geometry, top_geometry; register const Quantum *p; register ssize_t i, x; size_t gap; ssize_t y; if (images->previous == (Image *) NULL) return(0); bottom_image=images; SetGeometry(smush_image,&bottom_geometry); GravityAdjustGeometry(bottom_image->columns,bottom_image->rows, bottom_image->gravity,&bottom_geometry); top_image=images->previous; SetGeometry(smush_image,&top_geometry); GravityAdjustGeometry(top_image->columns,top_image->rows,top_image->gravity, &top_geometry); gap=bottom_image->rows; top_view=AcquireVirtualCacheView(top_image,exception); bottom_view=AcquireVirtualCacheView(bottom_image,exception); for (x=0; x < (ssize_t) smush_image->columns; x++) { for (y=(ssize_t) top_image->rows-1; y > 0; y--) { p=GetCacheViewVirtualPixels(top_view,top_geometry.x+x,y,1,1,exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(top_image,p) != TransparentAlpha) || ((top_image->rows-y-1) >= gap)) break; } i=(ssize_t) top_image->rows-y-1; for (y=0; y < (ssize_t) bottom_image->rows; y++) { p=GetCacheViewVirtualPixels(bottom_view,bottom_geometry.x+x,y,1,1, exception); if ((p == (const Quantum *) NULL) || (GetPixelAlpha(bottom_image,p) != TransparentAlpha) || ((y+i) >= (ssize_t) gap)) break; } if ((y+i) < (ssize_t) gap) gap=(size_t) (y+i); } bottom_view=DestroyCacheView(bottom_view); top_view=DestroyCacheView(top_view); if (x < (ssize_t) smush_image->columns) return(offset); return((ssize_t) gap-offset); } MagickExport Image *SmushImages(const Image *images, const MagickBooleanType stack,const ssize_t offset,ExceptionInfo *exception) { #define SmushImageTag "Smush/Image" const Image *image; Image *smush_image; MagickBooleanType proceed, status; MagickOffsetType n; PixelTrait alpha_trait; RectangleInfo geometry; register const Image *next; size_t height, number_images, width; ssize_t x_offset, y_offset; /* Compute maximum area of smushed area. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); image=images; alpha_trait=image->alpha_trait; number_images=1; width=image->columns; height=image->rows; next=GetNextImageInList(image); for ( ; next != (Image *) NULL; next=GetNextImageInList(next)) { if (next->alpha_trait != UndefinedPixelTrait) alpha_trait=BlendPixelTrait; number_images++; if (stack != MagickFalse) { if (next->columns > width) width=next->columns; height+=next->rows; if (next->previous != (Image *) NULL) height+=offset; continue; } width+=next->columns; if (next->previous != (Image *) NULL) width+=offset; if (next->rows > height) height=next->rows; } /* Smush images. */ smush_image=CloneImage(image,width,height,MagickTrue,exception); if (smush_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(smush_image,DirectClass,exception) == MagickFalse) { smush_image=DestroyImage(smush_image); return((Image *) NULL); } smush_image->alpha_trait=alpha_trait; (void) SetImageBackgroundColor(smush_image,exception); status=MagickTrue; x_offset=0; y_offset=0; for (n=0; n < (MagickOffsetType) number_images; n++) { SetGeometry(smush_image,&geometry); GravityAdjustGeometry(image->columns,image->rows,image->gravity,&geometry); if (stack != MagickFalse) { x_offset-=geometry.x; y_offset-=SmushYGap(smush_image,image,offset,exception); } else { x_offset-=SmushXGap(smush_image,image,offset,exception); y_offset-=geometry.y; } status=CompositeImage(smush_image,image,OverCompositeOp,MagickTrue,x_offset, y_offset,exception); proceed=SetImageProgress(image,SmushImageTag,n,number_images); if (proceed == MagickFalse) break; if (stack == MagickFalse) { x_offset+=(ssize_t) image->columns; y_offset=0; } else { x_offset=0; y_offset+=(ssize_t) image->rows; } image=GetNextImageInList(image); } if (stack == MagickFalse) smush_image->columns=(size_t) x_offset; else smush_image->rows=(size_t) y_offset; if (status == MagickFalse) smush_image=DestroyImage(smush_image); return(smush_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % StripImage() strips an image of all profiles and comments. % % The format of the StripImage method is: % % MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType StripImage(Image *image,ExceptionInfo *exception) { MagickBooleanType status; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); (void) exception; DestroyImageProfiles(image); (void) DeleteImageProperty(image,"comment"); (void) DeleteImageProperty(image,"date:create"); (void) DeleteImageProperty(image,"date:modify"); status=SetImageArtifact(image,"png:exclude-chunk", "bKGD,caNv,cHRM,eXIf,gAMA,iCCP,iTXt,pHYs,sRGB,tEXt,zCCP,zTXt,date"); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S y n c I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImage() initializes the red, green, and blue intensities of each pixel % as defined by the colormap index. % % The format of the SyncImage method is: % % MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PushColormapIndex(Image *image,const Quantum index, MagickBooleanType *range_exception) { if ((size_t) index < image->colors) return(index); *range_exception=MagickTrue; return((Quantum) 0); } MagickExport MagickBooleanType SyncImage(Image *image,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType range_exception, status, taint; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->ping != MagickFalse) return(MagickTrue); if (image->storage_class != PseudoClass) return(MagickFalse); assert(image->colormap != (PixelInfo *) NULL); range_exception=MagickFalse; status=MagickTrue; taint=image->taint; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(range_exception,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Quantum index; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { index=PushColormapIndex(image,GetPixelIndex(image,q),&range_exception); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); image->taint=taint; if ((image->ping == MagickFalse) && (range_exception != MagickFalse)) (void) ThrowMagickException(exception,GetMagickModule(), CorruptImageWarning,"InvalidColormapIndex","`%s'",image->filename); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e S e t t i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageSettings() syncs any image_info global options into per-image % attributes. % % Note: in IMv6 free form 'options' were always mapped into 'artifacts', so % that operations and coders can find such settings. In IMv7 if a desired % per-image artifact is not set, then it will directly look for a global % option as a fallback, as such this copy is no longer needed, only the % link set up. % % The format of the SyncImageSettings method is: % % MagickBooleanType SyncImageSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % MagickBooleanType SyncImagesSettings(const ImageInfo *image_info, % Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncImagesSettings(ImageInfo *image_info, Image *images,ExceptionInfo *exception) { Image *image; assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); image=images; for ( ; image != (Image *) NULL; image=GetNextImageInList(image)) (void) SyncImageSettings(image_info,image,exception); (void) DeleteImageOption(image_info,"page"); return(MagickTrue); } MagickExport MagickBooleanType SyncImageSettings(const ImageInfo *image_info, Image *image,ExceptionInfo *exception) { const char *option; GeometryInfo geometry_info; MagickStatusType flags; ResolutionType units; /* Sync image options. */ assert(image_info != (const ImageInfo *) NULL); assert(image_info->signature == MagickCoreSignature); assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); option=GetImageOption(image_info,"background"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->background_color, exception); option=GetImageOption(image_info,"black-point-compensation"); if (option != (const char *) NULL) image->black_point_compensation=(MagickBooleanType) ParseCommandOption( MagickBooleanOptions,MagickFalse,option); option=GetImageOption(image_info,"blue-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.blue_primary.x=geometry_info.rho; image->chromaticity.blue_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.blue_primary.y=image->chromaticity.blue_primary.x; } option=GetImageOption(image_info,"bordercolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->border_color, exception); /* FUTURE: do not sync compose to per-image compose setting here */ option=GetImageOption(image_info,"compose"); if (option != (const char *) NULL) image->compose=(CompositeOperator) ParseCommandOption(MagickComposeOptions, MagickFalse,option); /* -- */ option=GetImageOption(image_info,"compress"); if (option != (const char *) NULL) image->compression=(CompressionType) ParseCommandOption( MagickCompressOptions,MagickFalse,option); option=GetImageOption(image_info,"debug"); if (option != (const char *) NULL) image->debug=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } option=GetImageOption(image_info,"depth"); if (option != (const char *) NULL) image->depth=StringToUnsignedLong(option); option=GetImageOption(image_info,"endian"); if (option != (const char *) NULL) image->endian=(EndianType) ParseCommandOption(MagickEndianOptions, MagickFalse,option); option=GetImageOption(image_info,"filter"); if (option != (const char *) NULL) image->filter=(FilterType) ParseCommandOption(MagickFilterOptions, MagickFalse,option); option=GetImageOption(image_info,"fuzz"); if (option != (const char *) NULL) image->fuzz=StringToDoubleInterval(option,(double) QuantumRange+1.0); option=GetImageOption(image_info,"gravity"); if (option != (const char *) NULL) image->gravity=(GravityType) ParseCommandOption(MagickGravityOptions, MagickFalse,option); option=GetImageOption(image_info,"green-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.green_primary.x=geometry_info.rho; image->chromaticity.green_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.green_primary.y=image->chromaticity.green_primary.x; } option=GetImageOption(image_info,"intent"); if (option != (const char *) NULL) image->rendering_intent=(RenderingIntent) ParseCommandOption( MagickIntentOptions,MagickFalse,option); option=GetImageOption(image_info,"intensity"); if (option != (const char *) NULL) image->intensity=(PixelIntensityMethod) ParseCommandOption( MagickPixelIntensityOptions,MagickFalse,option); option=GetImageOption(image_info,"interlace"); if (option != (const char *) NULL) image->interlace=(InterlaceType) ParseCommandOption(MagickInterlaceOptions, MagickFalse,option); option=GetImageOption(image_info,"interpolate"); if (option != (const char *) NULL) image->interpolate=(PixelInterpolateMethod) ParseCommandOption( MagickInterpolateOptions,MagickFalse,option); option=GetImageOption(image_info,"loop"); if (option != (const char *) NULL) image->iterations=StringToUnsignedLong(option); option=GetImageOption(image_info,"mattecolor"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->matte_color, exception); option=GetImageOption(image_info,"orient"); if (option != (const char *) NULL) image->orientation=(OrientationType) ParseCommandOption( MagickOrientationOptions,MagickFalse,option); option=GetImageOption(image_info,"page"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->page); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"quality"); if (option != (const char *) NULL) image->quality=StringToUnsignedLong(option); option=GetImageOption(image_info,"red-primary"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.red_primary.x=geometry_info.rho; image->chromaticity.red_primary.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.red_primary.y=image->chromaticity.red_primary.x; } if (image_info->quality != UndefinedCompressionQuality) image->quality=image_info->quality; option=GetImageOption(image_info,"scene"); if (option != (const char *) NULL) image->scene=StringToUnsignedLong(option); option=GetImageOption(image_info,"taint"); if (option != (const char *) NULL) image->taint=(MagickBooleanType) ParseCommandOption(MagickBooleanOptions, MagickFalse,option); option=GetImageOption(image_info,"tile-offset"); if (option != (const char *) NULL) { char *geometry; geometry=GetPageGeometry(option); flags=ParseAbsoluteGeometry(geometry,&image->tile_offset); geometry=DestroyString(geometry); } option=GetImageOption(image_info,"transparent-color"); if (option != (const char *) NULL) (void) QueryColorCompliance(option,AllCompliance,&image->transparent_color, exception); option=GetImageOption(image_info,"type"); if (option != (const char *) NULL) image->type=(ImageType) ParseCommandOption(MagickTypeOptions,MagickFalse, option); option=GetImageOption(image_info,"units"); units=image_info->units; if (option != (const char *) NULL) units=(ResolutionType) ParseCommandOption(MagickResolutionOptions, MagickFalse,option); if (units != UndefinedResolution) { if (image->units != units) switch (image->units) { case PixelsPerInchResolution: { if (units == PixelsPerCentimeterResolution) { image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case PixelsPerCentimeterResolution: { if (units == PixelsPerInchResolution) { image->resolution.x=(double) ((size_t) (100.0*2.54* image->resolution.x+0.5))/100.0; image->resolution.y=(double) ((size_t) (100.0*2.54* image->resolution.y+0.5))/100.0; } break; } default: break; } image->units=units; option=GetImageOption(image_info,"density"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->resolution.x=geometry_info.rho; image->resolution.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->resolution.y=image->resolution.x; } } option=GetImageOption(image_info,"virtual-pixel"); if (option != (const char *) NULL) (void) SetImageVirtualPixelMethod(image,(VirtualPixelMethod) ParseCommandOption(MagickVirtualPixelOptions,MagickFalse,option), exception); option=GetImageOption(image_info,"white-point"); if (option != (const char *) NULL) { flags=ParseGeometry(option,&geometry_info); image->chromaticity.white_point.x=geometry_info.rho; image->chromaticity.white_point.y=geometry_info.sigma; if ((flags & SigmaValue) == 0) image->chromaticity.white_point.y=image->chromaticity.white_point.x; } /* Pointer to allow the lookup of pre-image artifact will fallback to a global option setting/define. This saves a lot of duplication of global options into per-image artifacts, while ensuring only specifically set per-image artifacts are preserved when parenthesis ends. */ if (image->image_info != (ImageInfo *) NULL) image->image_info=DestroyImageInfo(image->image_info); image->image_info=CloneImageInfo(image_info); return(MagickTrue); }
compatibility.h
// -*- C++ -*- // Copyright (C) 2007-2017 Free Software Foundation, Inc. // // This file is part of the GNU ISO C++ Library. This library is free // software; you can redistribute it and/or modify it under the terms // of the GNU General Public License as published by the Free Software // Foundation; either version 3, or (at your option) any later // version. // This library is distributed in the hope that it will be useful, but // WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU // General Public License for more details. // Under Section 7 of GPL version 3, you are granted additional // permissions described in the GCC Runtime Library Exception, version // 3.1, as published by the Free Software Foundation. // You should have received a copy of the GNU General Public License and // a copy of the GCC Runtime Library Exception along with this program; // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see // <http://www.gnu.org/licenses/>. /** @file parallel/compatibility.h * @brief Compatibility layer, mostly concerned with atomic operations. * * This file is a GNU parallel extension to the Standard C++ Library * and contains implementation details for the library's internal use. */ // Written by Felix Putze. #ifndef _GLIBCXX_PARALLEL_COMPATIBILITY_H #define _GLIBCXX_PARALLEL_COMPATIBILITY_H 1 #include <parallel/types.h> #include <parallel/base.h> #if !defined(_WIN32) || defined (__CYGWIN__) #include <sched.h> #endif #ifdef __MINGW32__ // Including <windows.h> will drag in all the windows32 names. Since // that can cause user code portability problems, we just declare the // one needed function here. extern "C" __attribute((dllimport)) void __attribute__((stdcall)) Sleep (unsigned long); #endif namespace __gnu_parallel { template<typename _Tp> inline _Tp __add_omp(volatile _Tp* __ptr, _Tp __addend) { int64_t __res; #pragma omp critical { __res = *__ptr; *(__ptr) += __addend; } return __res; } /** @brief Add a value to a variable, atomically. * * @param __ptr Pointer to a signed integer. * @param __addend Value to add. */ template<typename _Tp> inline _Tp __fetch_and_add(volatile _Tp* __ptr, _Tp __addend) { if (__atomic_always_lock_free(sizeof(_Tp), __ptr)) return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL); return __add_omp(__ptr, __addend); } template<typename _Tp> inline bool __cas_omp(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement) { bool __res = false; #pragma omp critical { if (*__ptr == __comparand) { *__ptr = __replacement; __res = true; } } return __res; } /** @brief Compare-and-swap * * Compare @c *__ptr and @c __comparand. If equal, let @c * *__ptr=__replacement and return @c true, return @c false otherwise. * * @param __ptr Pointer to signed integer. * @param __comparand Compare value. * @param __replacement Replacement value. */ template<typename _Tp> inline bool __compare_and_swap(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement) { if (__atomic_always_lock_free(sizeof(_Tp), __ptr)) return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement, false, __ATOMIC_ACQ_REL, __ATOMIC_RELAXED); return __cas_omp(__ptr, __comparand, __replacement); } /** @brief Yield control to another thread, without waiting for * the end of the time slice. */ inline void __yield() { #if defined (_WIN32) && !defined (__CYGWIN__) Sleep(0); #else sched_yield(); #endif } } // end namespace #endif /* _GLIBCXX_PARALLEL_COMPATIBILITY_H */
GB_binop__isle_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__isle_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__isle_uint8) // A*D function (colscale): GB (_AxD__isle_uint8) // D*A function (rowscale): GB (_DxB__isle_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__isle_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__isle_uint8) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__isle_uint8) // C=scalar+B GB (_bind1st__isle_uint8) // C=scalar+B' GB (_bind1st_tran__isle_uint8) // C=A+scalar GB (_bind2nd__isle_uint8) // C=A'+scalar GB (_bind2nd_tran__isle_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x <= y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISLE || GxB_NO_UINT8 || GxB_NO_ISLE_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__isle_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__isle_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__isle_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__isle_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__isle_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__isle_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__isle_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__isle_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__isle_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__isle_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__isle_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__isle_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB (_bind1st_tran__isle_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB (_bind2nd_tran__isle_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
sum_openmp.c
/* Copyright (C) 2021 The Blosc Developers <[email protected]> https://blosc.org License: BSD 3-Clause (see LICENSE.txt) Example program showing how to operate with compressed buffers. To compile this program for synthetic data (default): $ gcc -fopenmp -O3 sum_openmp.c -o sum_openmp -lblosc2 To run: $ OMP_PROC_BIND=spread OMP_NUM_THREADS=8 ./sum_openmp Blosc version info: 2.0.0a6.dev ($Date:: 2018-05-18 #$) Sum for uncompressed data: 199950000000 Sum time for uncompressed data: 0.0288 s, 26459.3 MB/s Compression ratio: 762.9 MB -> 14.0 MB (54.6x) Compression time: 0.288 s, 2653.5 MB/s Sum for *compressed* data: 199950000000 Sum time for *compressed* data: 0.0188 s, 40653.7 MB/s To use real (rainfall) data: $ gcc -DRAINFALL -fopenmp -Ofast sum_openmp.c -o sum_openmp And running it: $ OMP_PROC_BIND=spread OMP_NUM_THREADS=8 ./sum_openmp Blosc version info: 2.0.0a6.dev ($Date:: 2018-05-18 #$) Sum for uncompressed data: 29741012 Sum time for uncompressed data: 0.0149 s, 25627.4 MB/s Compression ratio: 381.5 MB -> 71.3 MB (5.3x) Compression time: 1.53 s, 249.1 MB/s Sum for *compressed* data: 29741012 Sum time for *compressed* data: 0.0247 s, 15467.5 MB/s */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <sys/stat.h> #include <errno.h> #include <assert.h> #include "blosc2.h" #define KB 1024. #define MB (1024*KB) #define GB (1024*MB) #define N (100 * 1000 * 1000) #define CHUNKSIZE (16 * 1000) #define NCHUNKS (N / CHUNKSIZE) #define NTHREADS 8 #define NITER 5 #ifdef RAINFALL #define SYNTHETIC false #else #define SYNTHETIC true #endif #if SYNTHETIC == true #define DTYPE int64_t #define CLEVEL 3 #define CODEC BLOSC_BLOSCLZ #else #define DTYPE float #define CLEVEL 1 #define CODEC BLOSC_LZ4 #endif int main(void) { static DTYPE udata[N]; DTYPE chunk_buf[CHUNKSIZE]; int32_t isize = CHUNKSIZE * sizeof(DTYPE); DTYPE sum, compressed_sum; int64_t nbytes, cbytes; blosc2_schunk* schunk; int i, j, nchunk; blosc_timestamp_t last, current; double ttotal, itotal; char* envvar = NULL; printf("Blosc version info: %s (%s)\n", BLOSC_VERSION_STRING, BLOSC_VERSION_DATE); // Fill the buffer for a chunk if (SYNTHETIC) { for (j = 0; j < CHUNKSIZE; j++) { chunk_buf[j] = j; } } else { struct stat info; const char *filegrid = "rainfall-grid-150x150.bin"; if (stat(filegrid, &info) != 0) { printf("Grid file %s not found!", filegrid); exit(1); } char *cdata = malloc(info.st_size); FILE *f = fopen(filegrid, "rb"); size_t blocks_read = fread(cdata, info.st_size, 1, f); assert(blocks_read == 1); fclose(f); int dsize = blosc_getitem(cdata, 0, CHUNKSIZE, chunk_buf); if (dsize < 0) { printf("blosc_getitem() error. Error code: %d\n. Probaly reading too much data?", dsize); exit(1); } free(cdata); } // Fill the uncompressed dataset with data chunks for (i = 0; i < N / CHUNKSIZE; i++) { for (j = 0; j < CHUNKSIZE; j++) { udata[i * CHUNKSIZE + j] = chunk_buf[j]; } } // Reduce uncompressed dataset ttotal = 1e10; sum = 0; for (int n = 0; n < NITER; n++) { sum = 0; blosc_set_timestamp(&last); #pragma omp parallel for reduction (+:sum) for (i = 0; i < N; i++) { sum += udata[i]; } blosc_set_timestamp(&current); itotal = blosc_elapsed_secs(last, current); if (itotal < ttotal) ttotal = itotal; } printf("Sum for uncompressed data: %10.0f\n", (double)sum); printf("Sum time for uncompressed data: %.3g s, %.1f MB/s\n", ttotal, (double)(isize * NCHUNKS) / (double)(ttotal * MB)); // Create a super-chunk container for the compressed container long codec = CODEC; envvar = getenv("SUM_COMPRESSOR"); if (envvar != NULL) { codec = blosc_compname_to_compcode(envvar); if (codec < 0) { printf("Unknown compresssor: %s\n", envvar); return 1; } } blosc2_cparams cparams = BLOSC2_CPARAMS_DEFAULTS; cparams.compcode = (uint8_t)codec; long clevel = CLEVEL; envvar = getenv("SUM_CLEVEL"); if (envvar != NULL) { clevel = strtol(envvar, NULL, 10); } cparams.clevel = (uint8_t)clevel; cparams.typesize = sizeof(DTYPE); cparams.nthreads = 1; blosc2_dparams dparams = BLOSC2_DPARAMS_DEFAULTS; dparams.nthreads = 1; blosc_set_timestamp(&last); blosc2_storage storage = {.cparams=&cparams, .dparams=&dparams}; schunk = blosc2_schunk_new(&storage); for (nchunk = 0; nchunk < NCHUNKS; nchunk++) { for (i = 0; i < CHUNKSIZE; i++) { chunk_buf[i] = udata[i + nchunk * CHUNKSIZE]; } blosc2_schunk_append_buffer(schunk, chunk_buf, isize); } blosc_set_timestamp(&current); ttotal = blosc_elapsed_secs(last, current); nbytes = schunk->nbytes; cbytes = schunk->cbytes; printf("Compression ratio: %.1f MB -> %.1f MB (%.1fx)\n", nbytes / MB, cbytes / MB, (1. * nbytes) / cbytes); printf("Compression time: %.3g s, %.1f MB/s\n", ttotal, nbytes / (ttotal * MB)); int nthreads = NTHREADS; envvar = getenv("OMP_NUM_THREADS"); if (envvar != NULL) { long value; value = strtol(envvar, NULL, 10); if ((value != EINVAL) && (value >= 0)) { nthreads = (int)value; } } // Build buffers and contexts for computations int nchunks_thread = NCHUNKS / nthreads; int remaining_chunks = NCHUNKS - nchunks_thread * nthreads; blosc2_context **dctx = malloc(nthreads * sizeof(void*)); DTYPE** chunk = malloc(nthreads * sizeof(void*)); for (j = 0; j < nthreads; j++) { chunk[j] = malloc(CHUNKSIZE * sizeof(DTYPE)); } // Reduce uncompressed dataset blosc_set_timestamp(&last); ttotal = 1e10; compressed_sum = 0; for (int n = 0; n < NITER; n++) { compressed_sum = 0; #pragma omp parallel for private(nchunk) reduction (+:compressed_sum) for (j = 0; j < nthreads; j++) { dctx[j] = blosc2_create_dctx(dparams); for (nchunk = 0; nchunk < nchunks_thread; nchunk++) { blosc2_decompress_ctx(dctx[j], schunk->data[j * nchunks_thread + nchunk], INT32_MAX, (void*)(chunk[j]), isize); for (i = 0; i < CHUNKSIZE; i++) { compressed_sum += chunk[j][i]; //compressed_sum += i + (j * nchunks_thread + nchunk) * CHUNKSIZE; } } } for (nchunk = NCHUNKS - remaining_chunks; nchunk < NCHUNKS; nchunk++) { blosc2_decompress_ctx(dctx[0], schunk->data[nchunk], INT32_MAX, (void*)(chunk[0]), isize); for (i = 0; i < CHUNKSIZE; i++) { compressed_sum += chunk[0][i]; //compressed_sum += i + nchunk * CHUNKSIZE; } } blosc_set_timestamp(&current); itotal = blosc_elapsed_secs(last, current); if (itotal < ttotal) ttotal = itotal; } printf("Sum for *compressed* data: %10.0f\n", (double)compressed_sum); printf("Sum time for *compressed* data: %.3g s, %.1f MB/s\n", ttotal, nbytes / (ttotal * MB)); //printf("sum, csum: %f, %f\n", sum, compressed_sum); if (SYNTHETIC) { // difficult to fulfill for single precision assert(sum == compressed_sum); } /* Free resources */ blosc2_schunk_free(schunk); return 0; }
omp_for_bigbounds.c
// RUN: %libomp-compile -DMY_SCHEDULE=static && %libomp-run // RUN: %libomp-compile -DMY_SCHEDULE=dynamic && %libomp-run // RUN: %libomp-compile -DMY_SCHEDULE=guided && %libomp-run // Only works with Intel Compiler since at least version 15.0 // XFAIL: gcc, clang /* * Test that large bounds are handled properly and calculations of * loop iterations don't accidentally overflow */ #include <stdio.h> #include <omp.h> #include <stdlib.h> #include <limits.h> #include "omp_testsuite.h" #define INCR 50000000 #define MY_MAX 2000000000 #define MY_MIN -2000000000 #ifndef MY_SCHEDULE # define MY_SCHEDULE static #endif int a, b, a_known_value, b_known_value; int test_omp_for_bigbounds() { a = 0; b = 0; #pragma omp parallel { int i; #pragma omp for schedule(MY_SCHEDULE) for (i = INT_MIN; i < MY_MAX; i+=INCR) { #pragma omp atomic a++; } #pragma omp for schedule(MY_SCHEDULE) for (i = INT_MAX; i >= MY_MIN; i-=INCR) { #pragma omp atomic b++; } } printf("a = %d (should be %d), b = %d (should be %d)\n", a, a_known_value, b, b_known_value); return (a == a_known_value && b == b_known_value); } int main() { int i; int num_failed=0; a_known_value = 0; for (i = INT_MIN; i < MY_MAX; i+=INCR) { a_known_value++; } b_known_value = 0; for (i = INT_MAX; i >= MY_MIN; i-=INCR) { b_known_value++; } for(i = 0; i < REPETITIONS; i++) { if(!test_omp_for_bigbounds()) { num_failed++; } } return num_failed; }
GB_unop__trunc_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__trunc_fc32_fc32 // op(A') function: GB_unop_tran__trunc_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = GB_ctruncf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_ctruncf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = GB_ctruncf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TRUNC || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__trunc_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC32_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_ctruncf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = GB_ctruncf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__trunc_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % Cristy % % April 1993 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ''fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ''classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "MagickCore/studio.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "ios_error.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { double center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { double tau; ssize_t left, right; double mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { double tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static double OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void FreeNodes(IntervalTree *), InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const double,double *), ZeroCrossHistogram(double *,const double,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const double cluster_threshold, % const double weighting_exponent, % const MagickBooleanType verbose,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This double represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType Classify(Image *image,short **extrema, const double cluster_threshold, const double weighting_exponent,const MagickBooleanType verbose, ExceptionInfo *exception) { #define SegmentImageTag "Segment/Image" #define ThrowClassifyException(severity,tag,label) \ {\ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) \ { \ next_cluster=cluster->next; \ cluster=(Cluster *) RelinquishMagickMemory(cluster); \ } \ if (squares != (double *) NULL) \ { \ squares-=255; \ free_squares=squares; \ free_squares=(double *) RelinquishMagickMemory(free_squares); \ } \ ThrowBinaryException(severity,tag,label); \ } CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickOffsetType progress; double *free_squares; MagickStatusType status; register ssize_t i; register double *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; squares=(double *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireQuantumMemory(1, sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(double) ScaleQuantumToChar( GetPixelRed(image,p)); cluster->green.center+=(double) ScaleQuantumToChar( GetPixelGreen(image,p)); cluster->blue.center+=(double) ScaleQuantumToChar( GetPixelBlue(image,p)); cluster->count++; break; } p+=GetPixelChannels(image); } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(thread_stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(thread_stdout,"===================\n\n"); (void) FormatLocaleFile(thread_stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(thread_stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(thread_stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(thread_stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(thread_stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(thread_stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(thread_stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(thread_stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(thread_stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(thread_stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(thread_stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(thread_stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(thread_stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(thread_stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(thread_stdout,"\n"); } if (number_clusters > 256) ThrowClassifyException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(double *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (double *) NULL) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(double) i*(double) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters,exception) == MagickFalse) ThrowClassifyException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=(double) ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=(double) ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=(double) ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *clust; register const PixelInfo *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(image,(Quantum) 0,q); for (clust=head; clust != (Cluster *) NULL; clust=clust->next) { if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) >= (clust->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,q)) <= (clust->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) >= (clust->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q)) <= (clust->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) >= (clust->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q)) <= (clust->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(image,(Quantum) clust->id,q); break; } } if (clust == (Cluster *) NULL) { double distance_squared, local_minima, numerator, ratio, sum; register ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared=squares[(ssize_t) ScaleQuantumToChar( GetPixelRed(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->blue))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared=squares[(ssize_t) ScaleQuantumToChar( GetPixelRed(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->red))]+squares[ (ssize_t) ScaleQuantumToChar(GetPixelGreen(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->green))]+squares[ (ssize_t) ScaleQuantumToChar(GetPixelBlue(image,q))-(ssize_t) ScaleQuantumToChar(ClampToQuantum(p->blue))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(image,(Quantum) j,q); } } } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,SegmentImageTag,progress,2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image,exception); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(double *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { register ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const double *histogram, % double *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of doubles representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of doubles is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const double *histogram, double *derivative) { register ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % PixelInfo *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This double represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, PixelInfo *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; double threshold; register const Quantum *p; register ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetPixelInfo(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) memset(&red,0,sizeof(red)); (void) memset(&green,0,sizeof(green)); (void) memset(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireQuantumMemory(1, sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(image,p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(double) ScaleQuantumToChar( GetPixelRed(image,p)); cluster->green.center+=(double) ScaleQuantumToChar( GetPixelGreen(image,p)); cluster->blue.center+=(double) ScaleQuantumToChar( GetPixelBlue(image,p)); cluster->count++; break; } p+=GetPixelChannels(image); } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } if (background != (Cluster *) NULL) { threshold=(background->red.center+object->red.center)/2.0; pixel->red=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(double) ScaleCharToQuantum((unsigned char) (threshold+0.5)); } /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { register const Quantum *p; register ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(image,p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(image,p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(image,p))]++; p+=GetPixelChannels(image); } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { register IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { register ssize_t count; register double sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(double) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; register ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireCriticalMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; root->mean_stability=0.0; root->stability=0.0; (void) memset(list,0,TreeLength*sizeof(*list)); for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireQuantumMemory(1, sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireQuantumMemory(1, sizeof(*node->sibling)); node=node->sibling; } if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireQuantumMemory(1, sizeof(*node->sibling)); node=node->sibling; if (node == (IntervalTree *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); FreeNodes(root); return((IntervalTree *) NULL); } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % double OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static double OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { IntervalTree **list, *node, *root; MagickBooleanType peak; double average_tau, *derivative, *second_derivative, tau, value; register ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) { list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(double *) AcquireCriticalMemory(256*sizeof(*derivative)); second_derivative=(double *) AcquireCriticalMemory(256* sizeof(*second_derivative)); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(double) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(double *) RelinquishMagickMemory(derivative); second_derivative=(double *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) { zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(0.0); } /* Find active nodes: stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau*=PerceptibleReciprocal((double) number_nodes); /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const double tau, % double *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of doubles representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const double tau, double *scale_histogram) { double alpha, beta, *gamma, sum; register ssize_t u, x; gamma=(double *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (double *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateGammaMap"); alpha=PerceptibleReciprocal(tau*sqrt(2.0*MagickPI)); beta=(-1.0*PerceptibleReciprocal(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(double) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=alpha*sum; } gamma=(double *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold, % ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold, ExceptionInfo *exception) { ColorspaceType previous_colorspace; MagickBooleanType status; register ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } /* Initialize histogram. */ previous_colorspace=image->colorspace; (void) TransformImageColorspace(image,colorspace,exception); InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose, exception); (void) TransformImageColorspace(image,previous_colorspace,exception); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(double *second_derivative, % const double smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of doubles representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(double *second_derivative, const double smooth_threshold,short *crossings) { register ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
ssembpush2.c
/* SSE2 C Library for Skeleton 2-1/2D Electromagnetic OpenMP/Vector */ /* PIC Code */ /* written by Viktor K. Decyk, UCLA and Ricardo Fonseca, ISCTE */ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <complex.h> #include <math.h> #include <xmmintrin.h> #include "ssembpush2.h" /*--------------------------------------------------------------------*/ void csse2gbppush23lt(float ppart[], float fxy[], float bxy[], int kpic[], float qbm, float dt, float dtc, float *ek, int idimp, int nppmx, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ipbc) { /* for 2-1/2d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, with magnetic field. Using the Boris Mover. vector/OpenMP version using guard cells particles stored in segmented array 119 flops/particle, 1 divide, 29 loads, 5 stores input: all, output: ppart, ek velocity equations used are: vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fx(x(t),y(t))*dt) vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fy(x(t),y(t))*dt) vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fz(x(t),y(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and omz = (q/m)*bz(x(t),y(t)). position equations used are: x(t+dt)=x(t) + vx(t+dt/2)*dt y(t+dt)=y(t) + vy(t+dt/2)*dt fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t)) bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t)) are approximated by interpolation from the nearest grid points: fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1) + dx*fx(n+1,m+1)) where n,m = leftmost grid points and dx = x-n, dy = y-m similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = x velocity of particle n in tile m ppart[m][3][n] = y velocity of particle n in tile m ppart[m][4][n] = z velocity of particle n in tile m fxy[k][j][0] = x component of force/charge at grid (j,k) fxy[k][j][1] = y component of force/charge at grid (j,k) fxy[k][j][2] = z component of force/charge at grid (j,k) that is, convolution of electric field over particle shape bxy[k][j][0] = x component of magnetic field at grid (j,k) bxy[k][j][1] = y component of magnetic field at grid (j,k) bxy[k][j][2] = z component of magnetic field at grid (j,k) that is, the convolution of magnetic field over particle shape kpic = number of particles per tile qbm = particle charge/mass ratio dt = time interval between successive calculations dtc = time interval between successive co-ordinate calculations kinetic energy/mass at time t is also calculated, using ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + (vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2) idimp = size of phase space = 5 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = first dimension of field arrays, must be >= nx+1 nyv = second dimension of field arrays, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,2d periodic,2d reflecting,mixed reflecting/periodic) requires SSE2, ppart, fxy, and bxy need to be 16 byte aligned nppmx needs to be a multiple of 4, fxy, bxy need to have 4 components local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp, mxv; int i, j, k, nps, nn, mm, nm; float qtmh, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy; float dx, dy, dz, ox, oy, oz, acx, acy, acz, omxt, omyt, omzt, omt; float anorm, rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9; float x, y, vx, vy, vz; double sum1, sum2; __m128i v_noff, v_moff, v_mxv; __m128i v_nn, v_mm, v_it; __m128 v_qtmh, v_dt, v_dtc, v_one, v_two, v_half; __m128 v_dxp, v_dyp, v_amx, v_amy, v_at; __m128 v_x, v_y, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m128 v_edgelx, v_edgely, v_edgerx, v_edgery; __m128 a, b, c, d, e, f, g, h; __m128d v_sum1, v_d; __attribute__((aligned(16))) unsigned int ll[4]; __attribute__((aligned(16))) double dd[2]; __attribute__((aligned(16))) float sfxy[4*MXV*MYV], sbxy[4*MXV*MYV]; /* __attribute__((aligned(16))) float sfxy[4*(mx+1)*(my+1)]; */ /* __attribute__((aligned(16))) float sbxy[4*(mx+1)*(my+1)]; */ mxv = mx + 1; qtmh = 0.5f*qbm*dt; sum2 = 0.0; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgerx = (float) nx; edgery = (float) ny; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } else if (ipbc==3) { edgelx = 1.0f; edgerx = (float) (nx-1); } v_mxv = _mm_set1_epi32(mxv); v_qtmh = _mm_set1_ps(qtmh); v_dt = _mm_set1_ps(dt); v_dtc = _mm_set1_ps(dtc); v_one = _mm_set1_ps(1.0f); v_two = _mm_set1_ps(2.0f); v_half = _mm_set1_ps(0.5f); v_edgelx = _mm_set1_ps(edgelx); v_edgely = _mm_set1_ps(edgely); v_edgerx = _mm_set1_ps(edgerx); v_edgery = _mm_set1_ps(edgery); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nps,nn,mm,nm,x,y,vx,vy,vz,dxp,dyp,amx, \ amy,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm,rot1,rot2, \ rot3,rot4,rot5,rot6,rot7,rot8,rot9,sum1,v_noff,v_moff,v_nn,v_mm,v_it, \ v_x,v_y,v_vx,v_vy,v_vz,v_dxp,v_dyp,v_amx,v_amy,v_dx,v_dy,v_dz,v_at, \ v_d,v_sum1,a,b,c,d,e,f,g,h,ll,dd,sfxy,sbxy) \ reduction(+:sum2) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm_set1_epi32(noff); v_moff = _mm_set1_epi32(moff); npp = kpic[k]; npoff = idimp*nppmx*k; /* load local fields from global array */ nn = (mx < nx-noff ? mx : nx-noff) + 1; mm = (my < ny-moff ? my : ny-moff) + 1; for (j = 0; j < mm; j++) { /* for (i = 0; i < nn; i++) { */ /* sfxy[4*(i+mxv*j)] = fxy[4*(i+noff+nxv*(j+moff))]; */ /* sfxy[1+4*(i+mxv*j)] = fxy[1+4*(i+noff+nxv*(j+moff))]; */ /* sfxy[2+4*(i+mxv*j)] = fxy[2+4*(i+noff+nxv*(j+moff))]; */ /* } */ for (i = 0; i < nn; i++) { v_at = _mm_loadu_ps(&fxy[4*(i+noff+nxv*(j+moff))]); _mm_storeu_ps(&sfxy[4*(i+mxv*j)],v_at); } } for (j = 0; j < mm; j++) { /* for (i = 0; i < nn; i++) { */ /* sbxy[4*(i+mxv*j)] = bxy[4*(i+noff+nxv*(j+moff))]; */ /* sbxy[1+4*(i+mxv*j)] = bxy[1+4*(i+noff+nxv*(j+moff))]; */ /* sbxy[2+4*(i+mxv*j)] = bxy[2+4*(i+noff+nxv*(j+moff))]; */ /* } */ for (i = 0; i < nn; i++) { v_at = _mm_loadu_ps(&bxy[4*(i+noff+nxv*(j+moff))]); _mm_storeu_ps(&sbxy[4*(i+mxv*j)],v_at); } } nps = 4*(npp/4); sum1 = 0.0; v_sum1 = _mm_set1_pd(0.0); /* vector loop over particles in blocks of 4 */ for (j = 0; j < nps; j+=4) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ v_x = _mm_load_ps(&ppart[j+npoff]); v_y = _mm_load_ps(&ppart[j+nppmx+npoff]); /* nn = x; */ /* mm = y; */ v_nn = _mm_cvttps_epi32(v_x); v_mm = _mm_cvttps_epi32(v_y); /* dxp = x - (float) nn; */ v_dxp = _mm_sub_ps(v_x,_mm_cvtepi32_ps(v_nn)); /* dyp = y - (float) mm; */ v_dyp = _mm_sub_ps(v_y,_mm_cvtepi32_ps(v_mm)); /* nm = 4*(nn - noff + mxv*(mm - moff)); */ v_nn = _mm_sub_epi32(v_nn,v_noff); v_mm = _mm_sub_epi32(v_mm,v_moff); v_it = _mm_mul_epu32(v_mxv,_mm_srli_si128(v_mm,4)); v_mm = _mm_mul_epu32(v_mm,v_mxv); v_mm = _mm_add_epi32(v_mm,_mm_slli_si128(v_it,4)); v_nn = _mm_slli_epi32(_mm_add_epi32(v_nn,v_mm),2); /* amx = 1.0f - dxp; */ /* amy = 1.0f - dyp; */ v_amx = _mm_sub_ps(v_one,v_dxp); v_amy = _mm_sub_ps(v_one,v_dyp); _mm_store_si128((__m128i *)ll,v_nn); /* find electric field */ /* nn = nm; */ /* dx = amx*sfxy[nn]; */ /* dy = amx*sfxy[nn+1]; */ /* dz = amx*sfxy[nn+2]; */ /* mm = nn + 4; */ /* dx = amy*(dxp*sfxy[mm] + dx); */ /* dy = amy*(dxp*sfxy[mm+1] + dy); */ /* dz = amy*(dxp*sfxy[mm+2] + dz); */ /* nn += 4*mxv; */ /* acx = amx*sfxy[nn]; */ /* acy = amx*sfxy[nn+1]; */ /* acz = amx*sfxy[nn+2]; */ /* mm = nn + 4; */ /* dx += dyp*(dxp*sfxy[mm] + acx); */ /* dy += dyp*(dxp*sfxy[mm+1] + acy); */ /* dz += dyp*(dxp*sfxy[mm+2] + acz); */ /* find magnetic field */ /* nn = nm; */ /* ox = amx*sbxy[nn]; */ /* oy = amx*sbxy[nn+1]; */ /* oz = amx*sbxy[nn+2]; */ /* mm = nn + 4; */ /* ox = amy*(dxp*sbxy[mm] + ox); */ /* oy = amy*(dxp*sbxy[mm+1] + oy); */ /* oz = amy*(dxp*sbxy[mm+2] + oz); */ /* nn += 4*mxv; */ /* acx = amx*sbxy[nn]; */ /* acy = amx*sbxy[nn+1]; */ /* acz = amx*sbxy[nn+2]; */ /* mm = nn + 4; */ /* ox += dyp*(dxp*sbxy[mm] + acx); */ /* oy += dyp*(dxp*sbxy[mm+1] + acy); */ /* oz += dyp*(dxp*sbxy[mm+2] + acz); */ /* interpolate electric and magnetic fields for first particle */ nn = ll[0]; v_at = _mm_shuffle_ps(v_amx,v_amx,0); a = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn])); e = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn])); mm = nn + 4*mxv; v_dx = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm])); v_dy = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm])); v_at = _mm_shuffle_ps(v_dxp,v_dxp,0); nn += 4; a = _mm_add_ps(a,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn]))); e = _mm_add_ps(e,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn]))); mm += 4; v_dx = _mm_add_ps(v_dx,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm]))); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm]))); v_at = _mm_shuffle_ps(v_amy,v_amy,0); a = _mm_mul_ps(a,v_at); e = _mm_mul_ps(e,v_at); v_at = _mm_shuffle_ps(v_dyp,v_dyp,0); a = _mm_add_ps(a,_mm_mul_ps(v_dx,v_at)); e = _mm_add_ps(e,_mm_mul_ps(v_dy,v_at)); /* interpolate electric and magnetic fields for second particle */ nn = ll[1]; v_at = _mm_shuffle_ps(v_amx,v_amx,85); b = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn])); f = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn])); mm = nn + 4*mxv; v_dx = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm])); v_dy = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm])); v_at = _mm_shuffle_ps(v_dxp,v_dxp,85); nn += 4; b = _mm_add_ps(b,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn]))); f = _mm_add_ps(f,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn]))); mm += 4; v_dx = _mm_add_ps(v_dx,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm]))); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm]))); v_at = _mm_shuffle_ps(v_amy,v_amy,85); b = _mm_mul_ps(b,v_at); f = _mm_mul_ps(f,v_at); v_at = _mm_shuffle_ps(v_dyp,v_dyp,85); b = _mm_add_ps(b,_mm_mul_ps(v_dx,v_at)); f = _mm_add_ps(f,_mm_mul_ps(v_dy,v_at)); /* interpolate electric and magnetic fields for third particle */ nn = ll[2]; v_at = _mm_shuffle_ps(v_amx,v_amx,170); c = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn])); g = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn])); mm = nn + 4*mxv; v_dx = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm])); v_dy = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm])); v_at = _mm_shuffle_ps(v_dxp,v_dxp,170); nn += 4; c = _mm_add_ps(c,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn]))); g = _mm_add_ps(g,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn]))); mm += 4; v_dx = _mm_add_ps(v_dx,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm]))); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm]))); v_at = _mm_shuffle_ps(v_amy,v_amy,170); c = _mm_mul_ps(c,v_at); g = _mm_mul_ps(g,v_at); v_at = _mm_shuffle_ps(v_dyp,v_dyp,170); c = _mm_add_ps(c,_mm_mul_ps(v_dx,v_at)); g = _mm_add_ps(g,_mm_mul_ps(v_dy,v_at)); /* interpolate electric and magnetic fields for fourth particle */ nn = ll[3]; v_at = _mm_shuffle_ps(v_amx,v_amx,255); d = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn])); h = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn])); mm = nn + 4*mxv; v_dx = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm])); v_dy = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm])); v_at = _mm_shuffle_ps(v_dxp,v_dxp,255); nn += 4; d = _mm_add_ps(d,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn]))); h = _mm_add_ps(h,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn]))); mm += 4; v_dx = _mm_add_ps(v_dx,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm]))); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm]))); v_at = _mm_shuffle_ps(v_amy,v_amy,255); d = _mm_mul_ps(d,v_at); h = _mm_mul_ps(h,v_at); v_at = _mm_shuffle_ps(v_dyp,v_dyp,255); d = _mm_add_ps(d,_mm_mul_ps(v_dx,v_at)); h = _mm_add_ps(h,_mm_mul_ps(v_dy,v_at)); /* transpose so a,b,c,d contain electric fields for each particle */ _MM_TRANSPOSE4_PS(a,b,c,d); /* transpose so e,f,g,h contain magnetic fields for each particle */ _MM_TRANSPOSE4_PS(e,f,g,h); /* calculate half impulse */ /* dx *= qtmh; */ /* dy *= qtmh; */ /* dz *= qtmh; */ v_dx = _mm_mul_ps(a,v_qtmh); v_dy = _mm_mul_ps(b,v_qtmh); v_dz = _mm_mul_ps(c,v_qtmh); /* half acceleration */ /* acx = ppart[j+2*nppmx+npoff] + dx; */ /* acy = ppart[j+3*nppmx+npoff] + dy; */ /* acz = ppart[j+4*nppmx+npoff] + dz; */ a = _mm_add_ps(v_dx,_mm_load_ps(&ppart[j+2*nppmx+npoff])); b = _mm_add_ps(v_dy,_mm_load_ps(&ppart[j+3*nppmx+npoff])); c = _mm_add_ps(v_dz,_mm_load_ps(&ppart[j+4*nppmx+npoff])); /* time-centered kinetic energy */ /* sum1 += (acx*acx + acy*acy + acz*acz); */ v_at = _mm_add_ps(_mm_mul_ps(a,a),_mm_mul_ps(b,b)); v_at = _mm_add_ps(v_at,_mm_mul_ps(c,c)); /* convert to double precision before accumulating */ v_d = _mm_cvtps_pd(v_at); v_sum1 = _mm_add_pd(v_sum1,v_d); v_it = _mm_srli_si128((__m128i)v_at,8); v_d = _mm_cvtps_pd((__m128)v_it); v_sum1 = _mm_add_pd(v_sum1,v_d); /* calculate cyclotron frequency */ /* omxt = qtmh*ox; */ /* omyt = qtmh*oy; */ /* omzt = qtmh*oz; */ e = _mm_mul_ps(v_qtmh,e); f = _mm_mul_ps(v_qtmh,f); g = _mm_mul_ps(v_qtmh,g); /* calculate rotation matrix */ /* vx = omxt*omxt; */ v_vx = _mm_mul_ps(e,e); /* vy = omyt*omyt; */ v_vy = _mm_mul_ps(f,f); /* vz = omzt*omzt; */ v_vz = _mm_mul_ps(g,g); /* omt = omxt*omxt + omyt*omyt + omzt*omzt; */ v_at = _mm_add_ps(_mm_add_ps(v_vx,v_vy),v_vz); /* anorm = 2.0f/(1.0f + omt); */ d = _mm_div_ps(v_two,_mm_add_ps(v_one,v_at)); /* omt = 0.5f*(1.0f - omt); */ h = _mm_mul_ps(v_half,_mm_sub_ps(v_one,v_at)); /* vx = (omt + vx)*acx; */ v_vx = _mm_mul_ps(_mm_add_ps(h,v_vx),a); /* vy = (omt + vy)*acy; */ v_vy = _mm_mul_ps(_mm_add_ps(h,v_vy),b); /* vz = (omt + vz)*acz; */ v_vz = _mm_mul_ps(_mm_add_ps(h,v_vz),c); /* omt = omxt*omyt; */ h = _mm_mul_ps(e,f); /* vx = vx + (omzt + omt)*acy; */ v_vx = _mm_add_ps(v_vx,_mm_mul_ps(_mm_add_ps(h,g),b)); /* vy = vy + (omt - omzt)*acx; */ v_vy = _mm_add_ps(v_vy,_mm_mul_ps(_mm_sub_ps(h,g),a)); /* omt = omxt*omzt; */ h = _mm_mul_ps(e,g); /* vx = vx + (omt - omyt)*acz; */ v_vx = _mm_add_ps(v_vx,_mm_mul_ps(_mm_sub_ps(h,f),c)); /* vz = vz + (omt + omyt)*acx; */ v_vz = _mm_add_ps(v_vz,_mm_mul_ps(_mm_add_ps(h,f),a)); /* omt = omyt*omzt; */ h = _mm_mul_ps(f,g); /* vy = vy + (omt + omxt)*acz; */ v_vy = _mm_add_ps(v_vy,_mm_mul_ps(_mm_add_ps(h,e),c)); /* vz = vz + (omt - omxt)*acy; */ v_vz = _mm_add_ps(v_vz,_mm_mul_ps(_mm_sub_ps(h,e),b)); /* new velocity */ /* vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx; */ /* vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy; */ /* vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz; */ v_vx = _mm_add_ps(v_dx,_mm_mul_ps(v_vx,d)); v_vy = _mm_add_ps(v_dy,_mm_mul_ps(v_vy,d)); v_vz = _mm_add_ps(v_dz,_mm_mul_ps(v_vz,d)); /* new position */ /* dx = x + vx*dtc; */ /* dy = y + vy*dtc; */ v_dx = _mm_add_ps(v_x,_mm_mul_ps(v_vx,v_dtc)); v_dy = _mm_add_ps(v_y,_mm_mul_ps(v_vy,v_dtc)); /* reflecting boundary conditions */ if (ipbc==2) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* vx = -vx; */ /* } */ v_at = _mm_cmplt_ps(v_dx,v_edgelx); v_at = _mm_or_ps(v_at,_mm_cmpge_ps(v_dx,v_edgerx)); v_x = _mm_and_ps(v_at,v_x); v_dx = _mm_add_ps(_mm_andnot_ps(v_at,v_dx),v_x); v_dxp = _mm_and_ps(v_at,v_vx); v_vx = _mm_sub_ps(_mm_andnot_ps(v_at,v_vx),v_dxp); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* vy = -vy; */ /* } */ v_at = _mm_cmplt_ps(v_dy,v_edgely); v_at = _mm_or_ps(v_at,_mm_cmpge_ps(v_dy,v_edgery)); v_y = _mm_and_ps(v_at,v_y); v_dy = _mm_add_ps(_mm_andnot_ps(v_at,v_dy),v_y); v_dyp = _mm_and_ps(v_at,v_vy); v_vy = _mm_sub_ps(_mm_andnot_ps(v_at,v_vy),v_dyp); } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* vx = -vx; */ /* } */ v_at = _mm_cmplt_ps(v_dx,v_edgelx); v_at = _mm_or_ps(v_at,_mm_cmpge_ps(v_dx,v_edgerx)); v_x = _mm_and_ps(v_at,v_x); v_dx = _mm_add_ps(_mm_andnot_ps(v_at,v_dx),v_x); v_dxp = _mm_and_ps(v_at,v_vx); v_vx = _mm_sub_ps(_mm_andnot_ps(v_at,v_vx),v_dxp); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ _mm_store_ps(&ppart[j+npoff],v_dx); _mm_store_ps(&ppart[j+nppmx+npoff],v_dy); /* set new velocity */ /* ppart[j+2*nppmx+npoff] = vx; */ /* ppart[j+3*nppmx+npoff] = vy; */ /* ppart[j+4*nppmx+npoff] = vz; */ _mm_store_ps(&ppart[j+2*nppmx+npoff],v_vx); _mm_store_ps(&ppart[j+3*nppmx+npoff],v_vy); _mm_store_ps(&ppart[j+4*nppmx+npoff],v_vz); } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; nn = x; mm = y; dxp = x - (float) nn; dyp = y - (float) mm; nm = 4*(nn - noff + mxv*(mm - moff)); amx = 1.0f - dxp; amy = 1.0f - dyp; /* find electric field */ nn = nm; dx = amx*sfxy[nn]; dy = amx*sfxy[nn+1]; dz = amx*sfxy[nn+2]; mm = nn + 4; dx = amy*(dxp*sfxy[mm] + dx); dy = amy*(dxp*sfxy[mm+1] + dy); dz = amy*(dxp*sfxy[mm+2] + dz); nn += 4*mxv; acx = amx*sfxy[nn]; acy = amx*sfxy[nn+1]; acz = amx*sfxy[nn+2]; mm = nn + 4; dx += dyp*(dxp*sfxy[mm] + acx); dy += dyp*(dxp*sfxy[mm+1] + acy); dz += dyp*(dxp*sfxy[mm+2] + acz); /* find magnetic field */ nn = nm; ox = amx*sbxy[nn]; oy = amx*sbxy[nn+1]; oz = amx*sbxy[nn+2]; mm = nn + 4; ox = amy*(dxp*sbxy[mm] + ox); oy = amy*(dxp*sbxy[mm+1] + oy); oz = amy*(dxp*sbxy[mm+2] + oz); nn += 4*mxv; acx = amx*sbxy[nn]; acy = amx*sbxy[nn+1]; acz = amx*sbxy[nn+2]; mm = nn + 4; ox += dyp*(dxp*sbxy[mm] + acx); oy += dyp*(dxp*sbxy[mm+1] + acy); oz += dyp*(dxp*sbxy[mm+2] + acz); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[j+2*nppmx+npoff] + dx; acy = ppart[j+3*nppmx+npoff] + dy; acz = ppart[j+4*nppmx+npoff] + dz; /* time-centered kinetic energy */ sum1 += (acx*acx + acy*acy + acz*acz); /* calculate cyclotron frequency */ omxt = qtmh*ox; omyt = qtmh*oy; omzt = qtmh*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0f/(1.0f + omt); omt = 0.5f*(1.0f - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new velocity */ vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx; vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy; vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz; /* new position */ dx = x + vx*dtc; dy = y + vy*dtc; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; vx = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; vy = -vy; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; vx = -vx; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; /* set new velocity */ ppart[j+2*nppmx+npoff] = vx; ppart[j+3*nppmx+npoff] = vy; ppart[j+4*nppmx+npoff] = vz; } /* sum2 += sum1; */ _mm_store_pd(&dd[0],v_sum1); for (j = 1; j < 2; j++) { dd[0] += dd[j]; } sum2 += (sum1 + dd[0]); } /* normalize kinetic energy */ *ek += 0.5*sum2; return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void csse2gbppushf23lt(float ppart[], float fxy[], float bxy[], int kpic[], int ncl[], int ihole[], float qbm, float dt, float dtc, float *ek, int idimp, int nppmx, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ntmax, int *irc) { /* for 2-1/2d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, with magnetic field. Using the Boris Mover. with periodic boundary conditions. also determines list of particles which are leaving this tile vector/OpenMP version using guard cells particles stored in segmented array 119 flops/particle, 1 divide, 29 loads, 5 stores input: all except ncl, ihole, irc, output: ppart, ncl, ihole, irc, ek velocity equations used are: vx(t+dt/2) = rot(1)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(2)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(3)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fx(x(t),y(t))*dt) vy(t+dt/2) = rot(4)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(5)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(6)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fy(x(t),y(t))*dt) vz(t+dt/2) = rot(7)*(vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(8)*(vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(9)*(vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fz(x(t),y(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t)), omy = (q/m)*by(x(t),y(t)), and omz = (q/m)*bz(x(t),y(t)). position equations used are: x(t+dt)=x(t) + vx(t+dt/2)*dt y(t+dt)=y(t) + vy(t+dt/2)*dt fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t)) bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t)) are approximated by interpolation from the nearest grid points: fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1) + dx*fx(n+1,m+1)) where n,m = leftmost grid points and dx = x-n, dy = y-m similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = x velocity of particle n in tile m ppart[m][3][n] = y velocity of particle n in tile m ppart[m][4][n] = z velocity of particle n in tile m fxy[k][j][0] = x component of force/charge at grid (j,k) fxy[k][j][1] = y component of force/charge at grid (j,k) fxy[k][j][2] = z component of force/charge at grid (j,k) that is, convolution of electric field over particle shape bxy[k][j][0] = x component of magnetic field at grid (j,k) bxy[k][j][1] = y component of magnetic field at grid (j,k) bxy[k][j][2] = z component of magnetic field at grid (j,k) that is, the convolution of magnetic field over particle shape kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = destination of particle leaving hole ihole[k][0][0] = ih, number of holes left (error, if negative) qbm = particle charge/mass ratio dt = time interval between successive calculations dtc = time interval between successive co-ordinate calculations kinetic energy/mass at time t is also calculated, using ek = .5*sum((vx(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (vy(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + (vz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2) idimp = size of phase space = 5 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = first dimension of field arrays, must be >= nx+1 nyv = second dimension of field arrays, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 optimized version requires SSE2, ppart, fxy, and bxy need to be 16 byte aligned nppmx needs to be a multiple of 4, fxy, bxy need to have 4 components local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp, mxv; int i, j, k, nps, ih, nh, nn, mm, nm, kk; float qtmh, dxp, dyp, amx, amy, dx, dy, dz, ox, oy, oz; float acx, acy, acz, omxt, omyt, omzt, omt, anorm; float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9; float anx, any, edgelx, edgely, edgerx, edgery; float x, y, vx, vy, vz; double sum1, sum2; __m128i v_noff, v_moff, v_mxv; __m128i v_nn, v_mm, v_it; __m128 v_qtmh, v_dt, v_dtc, v_one, v_two, v_half; __m128 v_dxp, v_dyp, v_amx, v_amy, v_st, v_at; __m128 v_x, v_y, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m128 v_anx, v_any, v_edgelx, v_edgely, v_edgerx, v_edgery; __m128 v_zero, v_three, v_six; __m128 a, b, c, d, e, f, g, h; __m128d v_sum1, v_d; __attribute__((aligned(16))) unsigned int ll[4], lm[8]; __attribute__((aligned(16))) unsigned long jj[1]; __attribute__((aligned(16))) double dd[2]; __attribute__((aligned(16))) float sfxy[4*MXV*MYV], sbxy[4*MXV*MYV]; /* __attribute__((aligned(16))) float sfxy[4*(mx+1)*(my+1)]; */ /* __attribute__((aligned(16))) float sbxy[4*(mx+1)*(my+1)]; */ mxv = mx + 1; qtmh = 0.5f*qbm*dt; anx = (float) nx; any = (float) ny; sum2 = 0.0; v_mxv = _mm_set1_epi32(mxv); v_qtmh = _mm_set1_ps(qtmh); v_dt = _mm_set1_ps(dt); v_dtc = _mm_set1_ps(dtc); v_anx = _mm_set1_ps(anx); v_any = _mm_set1_ps(any); v_zero = _mm_setzero_ps(); v_one = _mm_set1_ps(1.0f); v_two = _mm_set1_ps(2.0f); v_half = _mm_set1_ps(0.5f); v_three = _mm_set1_ps(3.0f); v_six = _mm_set1_ps(6.0f); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nps,nn,mm,nm,kk,ih,nh,x,y,vx,vy,vz, \ dxp,dyp,amx,amy,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm, \ rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,edgelx,edgely,edgerx, \ edgery,sum1,v_noff,v_moff,v_nn,v_mm,v_it,v_x,v_y,v_vx,v_vy,v_vz,v_dxp, \ v_dyp,v_amx,v_amy,v_dx,v_dy,v_dz,v_st,v_at,v_edgelx,v_edgely,v_edgerx, \ v_edgery,v_d,v_sum1,a,b,c,d,e,f,g,h,jj,ll,lm,dd,sfxy,sbxy) \ reduction(+:sum2) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm_set1_epi32(noff); v_moff = _mm_set1_epi32(moff); npp = kpic[k]; npoff = idimp*nppmx*k; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; v_edgelx = _mm_set1_ps(edgelx); v_edgely = _mm_set1_ps(edgely); v_edgerx = _mm_set1_ps(edgerx); v_edgery = _mm_set1_ps(edgery); ih = 0; nh = 0; nn += 1; mm += 1; /* load local fields from global array */ for (j = 0; j < mm; j++) { /* for (i = 0; i < nn; i++) { */ /* sfxy[4*(i+mxv*j)] = fxy[4*(i+noff+nxv*(j+moff))]; */ /* sfxy[1+4*(i+mxv*j)] = fxy[1+4*(i+noff+nxv*(j+moff))]; */ /* sfxy[2+4*(i+mxv*j)] = fxy[2+4*(i+noff+nxv*(j+moff))]; */ /* } */ for (i = 0; i < nn; i++) { v_at = _mm_loadu_ps(&fxy[4*(i+noff+nxv*(j+moff))]); _mm_storeu_ps(&sfxy[4*(i+mxv*j)],v_at); } } for (j = 0; j < mm; j++) { /* for (i = 0; i < nn; i++) { */ /* sbxy[4*(i+mxv*j)] = bxy[4*(i+noff+nxv*(j+moff))]; */ /* sbxy[1+4*(i+mxv*j)] = bxy[1+4*(i+noff+nxv*(j+moff))]; */ /* sbxy[2+4*(i+mxv*j)] = bxy[2+4*(i+noff+nxv*(j+moff))]; */ /* } */ for (i = 0; i < nn; i++) { v_at = _mm_loadu_ps(&bxy[4*(i+noff+nxv*(j+moff))]); _mm_storeu_ps(&sbxy[4*(i+mxv*j)],v_at); } } /* clear counters */ /* for (j = 0; j < 8; j++) { */ /* ncl[j+8*k] = 0; */ /* } */ memset((void*)&ncl[8*k],0,8*sizeof(int)); nps = 4*(npp/4); sum1 = 0.0; v_sum1 = _mm_set1_pd(0.0); /* vector loop over particles in blocks of 4 */ for (j = 0; j < nps; j+=4) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ v_x = _mm_load_ps(&ppart[j+npoff]); v_y = _mm_load_ps(&ppart[j+nppmx+npoff]); /* nn = x; */ /* mm = y; */ v_nn = _mm_cvttps_epi32(v_x); v_mm = _mm_cvttps_epi32(v_y); /* dxp = x - (float) nn; */ v_dxp = _mm_sub_ps(v_x,_mm_cvtepi32_ps(v_nn)); /* dyp = y - (float) mm; */ v_dyp = _mm_sub_ps(v_y,_mm_cvtepi32_ps(v_mm)); /* nm = 4*(nn - noff + mxv*(mm - moff)); */ v_nn = _mm_sub_epi32(v_nn,v_noff); v_mm = _mm_sub_epi32(v_mm,v_moff); v_it = _mm_mul_epu32(v_mxv,_mm_srli_si128(v_mm,4)); v_mm = _mm_mul_epu32(v_mm,v_mxv); v_mm = _mm_add_epi32(v_mm,_mm_slli_si128(v_it,4)); v_nn = _mm_slli_epi32(_mm_add_epi32(v_nn,v_mm),2); /* amx = 1.0f - dxp; */ /* amy = 1.0f - dyp; */ v_amx = _mm_sub_ps(v_one,v_dxp); v_amy = _mm_sub_ps(v_one,v_dyp); _mm_store_si128((__m128i *)ll,v_nn); /* find electric field */ /* nn = nm; */ /* dx = amx*sfxy[nn]; */ /* dy = amx*sfxy[nn+1]; */ /* dz = amx*sfxy[nn+2]; */ /* mm = nn + 4; */ /* dx = amy*(dxp*sfxy[mm] + dx); */ /* dy = amy*(dxp*sfxy[mm+1] + dy); */ /* dz = amy*(dxp*sfxy[mm+2] + dz); */ /* nn += 4*mxv; */ /* acx = amx*sfxy[nn]; */ /* acy = amx*sfxy[nn+1]; */ /* acz = amx*sfxy[nn+2]; */ /* mm = nn + 4; */ /* dx += dyp*(dxp*sfxy[mm] + acx); */ /* dy += dyp*(dxp*sfxy[mm+1] + acy); */ /* dz += dyp*(dxp*sfxy[mm+2] + acz); */ /* find magnetic field */ /* nn = nm; */ /* ox = amx*sbxy[nn]; */ /* oy = amx*sbxy[nn+1]; */ /* oz = amx*sbxy[nn+2]; */ /* mm = nn + 4; */ /* ox = amy*(dxp*sbxy[mm] + ox); */ /* oy = amy*(dxp*sbxy[mm+1] + oy); */ /* oz = amy*(dxp*sbxy[mm+2] + oz); */ /* nn += 4*mxv; */ /* acx = amx*sbxy[nn]; */ /* acy = amx*sbxy[nn+1]; */ /* acz = amx*sbxy[nn+2]; */ /* mm = nn + 4; */ /* ox += dyp*(dxp*sbxy[mm] + acx); */ /* oy += dyp*(dxp*sbxy[mm+1] + acy); */ /* oz += dyp*(dxp*sbxy[mm+2] + acz); */ /* interpolate electric and magnetic fields for first particle */ nn = ll[0]; v_at = _mm_shuffle_ps(v_amx,v_amx,0); a = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn])); e = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn])); mm = nn + 4*mxv; v_dx = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm])); v_dy = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm])); v_at = _mm_shuffle_ps(v_dxp,v_dxp,0); nn += 4; a = _mm_add_ps(a,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn]))); e = _mm_add_ps(e,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn]))); mm += 4; v_dx = _mm_add_ps(v_dx,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm]))); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm]))); v_at = _mm_shuffle_ps(v_amy,v_amy,0); a = _mm_mul_ps(a,v_at); e = _mm_mul_ps(e,v_at); v_at = _mm_shuffle_ps(v_dyp,v_dyp,0); a = _mm_add_ps(a,_mm_mul_ps(v_dx,v_at)); e = _mm_add_ps(e,_mm_mul_ps(v_dy,v_at)); /* interpolate electric and magnetic fields for second particle */ nn = ll[1]; v_at = _mm_shuffle_ps(v_amx,v_amx,85); b = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn])); f = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn])); mm = nn + 4*mxv; v_dx = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm])); v_dy = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm])); v_at = _mm_shuffle_ps(v_dxp,v_dxp,85); nn += 4; b = _mm_add_ps(b,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn]))); f = _mm_add_ps(f,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn]))); mm += 4; v_dx = _mm_add_ps(v_dx,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm]))); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm]))); v_at = _mm_shuffle_ps(v_amy,v_amy,85); b = _mm_mul_ps(b,v_at); f = _mm_mul_ps(f,v_at); v_at = _mm_shuffle_ps(v_dyp,v_dyp,85); b = _mm_add_ps(b,_mm_mul_ps(v_dx,v_at)); f = _mm_add_ps(f,_mm_mul_ps(v_dy,v_at)); /* interpolate electric and magnetic fields for third particle */ nn = ll[2]; v_at = _mm_shuffle_ps(v_amx,v_amx,170); c = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn])); g = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn])); mm = nn + 4*mxv; v_dx = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm])); v_dy = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm])); v_at = _mm_shuffle_ps(v_dxp,v_dxp,170); nn += 4; c = _mm_add_ps(c,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn]))); g = _mm_add_ps(g,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn]))); mm += 4; v_dx = _mm_add_ps(v_dx,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm]))); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm]))); v_at = _mm_shuffle_ps(v_amy,v_amy,170); c = _mm_mul_ps(c,v_at); g = _mm_mul_ps(g,v_at); v_at = _mm_shuffle_ps(v_dyp,v_dyp,170); c = _mm_add_ps(c,_mm_mul_ps(v_dx,v_at)); g = _mm_add_ps(g,_mm_mul_ps(v_dy,v_at)); /* interpolate electric and magnetic fields for fourth particle */ nn = ll[3]; v_at = _mm_shuffle_ps(v_amx,v_amx,255); d = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn])); h = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn])); mm = nn + 4*mxv; v_dx = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm])); v_dy = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm])); v_at = _mm_shuffle_ps(v_dxp,v_dxp,255); nn += 4; d = _mm_add_ps(d,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn]))); h = _mm_add_ps(h,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn]))); mm += 4; v_dx = _mm_add_ps(v_dx,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm]))); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm]))); v_at = _mm_shuffle_ps(v_amy,v_amy,255); d = _mm_mul_ps(d,v_at); h = _mm_mul_ps(h,v_at); v_at = _mm_shuffle_ps(v_dyp,v_dyp,255); d = _mm_add_ps(d,_mm_mul_ps(v_dx,v_at)); h = _mm_add_ps(h,_mm_mul_ps(v_dy,v_at)); /* transpose so a,b,c,d contain electric fields for each particle */ _MM_TRANSPOSE4_PS(a,b,c,d); /* transpose so e,f,g,h contain magnetic fields for each particle */ _MM_TRANSPOSE4_PS(e,f,g,h); /* calculate half impulse */ /* dx *= qtmh; */ /* dy *= qtmh; */ /* dz *= qtmh; */ v_dx = _mm_mul_ps(a,v_qtmh); v_dy = _mm_mul_ps(b,v_qtmh); v_dz = _mm_mul_ps(c,v_qtmh); /* half acceleration */ /* acx = ppart[j+2*nppmx+npoff] + dx; */ /* acy = ppart[j+3*nppmx+npoff] + dy; */ /* acz = ppart[j+4*nppmx+npoff] + dz; */ a = _mm_add_ps(v_dx,_mm_load_ps(&ppart[j+2*nppmx+npoff])); b = _mm_add_ps(v_dy,_mm_load_ps(&ppart[j+3*nppmx+npoff])); c = _mm_add_ps(v_dz,_mm_load_ps(&ppart[j+4*nppmx+npoff])); /* time-centered kinetic energy */ /* sum1 += (acx*acx + acy*acy + acz*acz); */ v_at = _mm_add_ps(_mm_mul_ps(a,a),_mm_mul_ps(b,b)); v_at = _mm_add_ps(v_at,_mm_mul_ps(c,c)); /* convert to double precision before accumulating */ v_d = _mm_cvtps_pd(v_at); v_sum1 = _mm_add_pd(v_sum1,v_d); v_it = _mm_srli_si128((__m128i)v_at,8); v_d = _mm_cvtps_pd((__m128)v_it); v_sum1 = _mm_add_pd(v_sum1,v_d); /* calculate cyclotron frequency */ /* omxt = qtmh*ox; */ /* omyt = qtmh*oy; */ /* omzt = qtmh*oz; */ e = _mm_mul_ps(v_qtmh,e); f = _mm_mul_ps(v_qtmh,f); g = _mm_mul_ps(v_qtmh,g); /* calculate rotation matrix */ /* vx = omxt*omxt; */ v_vx = _mm_mul_ps(e,e); /* vy = omyt*omyt; */ v_vy = _mm_mul_ps(f,f); /* vz = omzt*omzt; */ v_vz = _mm_mul_ps(g,g); /* omt = omxt*omxt + omyt*omyt + omzt*omzt; */ v_at = _mm_add_ps(_mm_add_ps(v_vx,v_vy),v_vz); /* anorm = 2.0f/(1.0f + omt); */ d = _mm_div_ps(v_two,_mm_add_ps(v_one,v_at)); /* omt = 0.5f*(1.0f - omt); */ h = _mm_mul_ps(v_half,_mm_sub_ps(v_one,v_at)); /* vx = (omt + vx)*acx; */ v_vx = _mm_mul_ps(_mm_add_ps(h,v_vx),a); /* vy = (omt + vy)*acy; */ v_vy = _mm_mul_ps(_mm_add_ps(h,v_vy),b); /* vz = (omt + vz)*acz; */ v_vz = _mm_mul_ps(_mm_add_ps(h,v_vz),c); /* omt = omxt*omyt; */ h = _mm_mul_ps(e,f); /* vx = vx + (omzt + omt)*acy; */ v_vx = _mm_add_ps(v_vx,_mm_mul_ps(_mm_add_ps(h,g),b)); /* vy = vy + (omt - omzt)*acx; */ v_vy = _mm_add_ps(v_vy,_mm_mul_ps(_mm_sub_ps(h,g),a)); /* omt = omxt*omzt; */ h = _mm_mul_ps(e,g); /* vx = vx + (omt - omyt)*acz; */ v_vx = _mm_add_ps(v_vx,_mm_mul_ps(_mm_sub_ps(h,f),c)); /* vz = vz + (omt + omyt)*acx; */ v_vz = _mm_add_ps(v_vz,_mm_mul_ps(_mm_add_ps(h,f),a)); /* omt = omyt*omzt; */ h = _mm_mul_ps(f,g); /* vy = vy + (omt + omxt)*acz; */ v_vy = _mm_add_ps(v_vy,_mm_mul_ps(_mm_add_ps(h,e),c)); /* vz = vz + (omt - omxt)*acy; */ v_vz = _mm_add_ps(v_vz,_mm_mul_ps(_mm_sub_ps(h,e),b)); /* new velocity */ /* vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx; */ /* vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy; */ /* vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz; */ v_vx = _mm_add_ps(v_dx,_mm_mul_ps(v_vx,d)); v_vy = _mm_add_ps(v_dy,_mm_mul_ps(v_vy,d)); v_vz = _mm_add_ps(v_dz,_mm_mul_ps(v_vz,d)); /* new position */ /* dx = x + vx*dtc; */ /* dy = y + vy*dtc; */ v_dx = _mm_add_ps(v_x,_mm_mul_ps(v_vx,v_dtc)); v_dy = _mm_add_ps(v_y,_mm_mul_ps(v_vy,v_dtc)); /* find particles going out of bounds */ mm = 0; v_st = v_zero; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ /* if (dx >= edgerx) { */ /* if (dx >= anx) */ /* dx -= anx; */ /* mm = 2; */ /* } */ v_x = _mm_cmpge_ps(v_dx,v_edgerx); v_y = _mm_cmplt_ps(v_dx,v_edgelx); v_at = _mm_or_ps(v_x,v_y); v_it = _mm_srli_si128((__m128i)v_at,8); v_it = _mm_add_epi64((__m128i)v_at,v_it); _mm_storel_epi64((__m128i *)&jj[0],v_it); /* execute if either test result is true for any particle */ if (jj[0] != 0) { v_st = _mm_and_ps(v_two,v_x); v_x = _mm_and_ps(v_x,_mm_cmpge_ps(v_dx,v_anx)); v_dx = _mm_sub_ps(v_dx,_mm_and_ps(v_anx,v_x)); /* if (dx < edgelx) { */ /* if (dx < 0.0f) { */ /* dx += anx; */ /* if (dx < anx) */ /* mm = 1; */ /* else */ /* dx = 0.0; */ /* } */ /* else { */ /* mm = 1; */ /* } */ /* } */ v_at = _mm_and_ps(v_one,v_y); v_x = _mm_and_ps(v_y,_mm_cmplt_ps(v_dx,v_zero)); v_dx = _mm_add_ps(v_dx,_mm_and_ps(v_anx,v_x)); v_y = _mm_cmplt_ps(v_dx,v_anx); v_dx = _mm_and_ps(v_dx,v_y); v_st = _mm_add_ps(v_st,_mm_and_ps(v_at,v_y)); } /* if (dy >= edgery) { */ /* if (dy >= any) */ /* dy -= any; */ /* mm += 6; */ /* } */ v_y = _mm_cmpge_ps(v_dy,v_edgery); v_x = _mm_cmplt_ps(v_dy,v_edgely); v_at = _mm_or_ps(v_x,v_y); v_it = _mm_srli_si128((__m128i)v_at,8); v_it = _mm_add_epi64((__m128i)v_at,v_it); _mm_storel_epi64((__m128i *)&jj[0],v_it); /* execute if either test result is true for any particle */ if (jj[0] != 0) { v_st = _mm_add_ps(v_st,_mm_and_ps(v_six,v_y)); v_y = _mm_and_ps(v_y,_mm_cmpge_ps(v_dy,v_any)); v_dy = _mm_sub_ps(v_dy,_mm_and_ps(v_any,v_y)); /* if (dy < edgely) { */ /* if (dy < 0.0) { */ /* dy += any; */ /* if (dy < any) */ /* mm += 3; */ /* else */ /* dy = 0.0; */ /* } */ /* else { */ /* mm += 3; */ /* } */ /* } */ v_at = _mm_and_ps(v_three,v_x); v_y = _mm_and_ps(v_x,_mm_cmplt_ps(v_dy,v_zero)); v_dy = _mm_add_ps(v_dy,_mm_and_ps(v_any,v_y)); v_x = _mm_cmplt_ps(v_dy,v_any); v_dy = _mm_and_ps(v_dy,v_x); v_st = _mm_add_ps(v_st,_mm_and_ps(v_at,v_x)); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ _mm_store_ps(&ppart[j+npoff],v_dx); _mm_store_ps(&ppart[j+nppmx+npoff],v_dy); /* set new velocity */ /* ppart[j+2*nppmx+npoff] = vx; */ /* ppart[j+3*nppmx+npoff] = vy; */ /* ppart[j+4*nppmx+npoff] = vz; */ _mm_store_ps(&ppart[j+2*nppmx+npoff],v_vx); _mm_store_ps(&ppart[j+3*nppmx+npoff],v_vy); _mm_store_ps(&ppart[j+4*nppmx+npoff],v_vz); /* increment counters */ /* if (mm > 0) { */ /* ncl[mm+8*k-1] += 1; */ /* ih += 1; */ /* if (ih <= ntmax) { */ /* ihole[2*(ih+(ntmax+1)*k)] = j + 1; */ /* ihole[1+2*(ih+(ntmax+1)*k)] = mm; */ /* } */ /* else { */ /* nh = 1; */ /* } */ /* } */ _mm_store_si128((__m128i *)ll,_mm_cvttps_epi32(v_st)); /* remove zero ist values and left shift data */ kk = 0; memset((void*)lm,0,8*sizeof(int)); for (i = 0; i < 4; i++) { mm = ll[i]; if (mm > 0) { lm[2*kk] = j + i + 1; lm[1+2*kk] = mm; ncl[mm+8*k-1] += 1; kk += 1; } } if (kk > 0) { if ((ih+kk) > ntmax) { nh = 1; } else { v_it = _mm_load_si128((__m128i *)lm); _mm_storeu_si128((__m128i *)&ihole[2*(ih+1+(ntmax+1)*k)],v_it); if (kk > 2) { v_it = _mm_load_si128((__m128i *)&lm[4]); _mm_storeu_si128((__m128i *)&ihole[2*(ih+3+(ntmax+1)*k)],v_it); } } ih += kk; } } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; nn = x; mm = y; dxp = x - (float) nn; dyp = y - (float) mm; nm = 4*(nn - noff + mxv*(mm - moff)); amx = 1.0f - dxp; amy = 1.0f - dyp; /* find electric field */ nn = nm; dx = amx*sfxy[nn]; dy = amx*sfxy[nn+1]; dz = amx*sfxy[nn+2]; mm = nn + 4; dx = amy*(dxp*sfxy[mm] + dx); dy = amy*(dxp*sfxy[mm+1] + dy); dz = amy*(dxp*sfxy[mm+2] + dz); nn += 4*mxv; acx = amx*sfxy[nn]; acy = amx*sfxy[nn+1]; acz = amx*sfxy[nn+2]; mm = nn + 4; dx += dyp*(dxp*sfxy[mm] + acx); dy += dyp*(dxp*sfxy[mm+1] + acy); dz += dyp*(dxp*sfxy[mm+2] + acz); /* find magnetic field */ nn = nm; ox = amx*sbxy[nn]; oy = amx*sbxy[nn+1]; oz = amx*sbxy[nn+2]; mm = nn + 4; ox = amy*(dxp*sbxy[mm] + ox); oy = amy*(dxp*sbxy[mm+1] + oy); oz = amy*(dxp*sbxy[mm+2] + oz); nn += 4*mxv; acx = amx*sbxy[nn]; acy = amx*sbxy[nn+1]; acz = amx*sbxy[nn+2]; mm = nn + 4; ox += dyp*(dxp*sbxy[mm] + acx); oy += dyp*(dxp*sbxy[mm+1] + acy); oz += dyp*(dxp*sbxy[mm+2] + acz); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[j+2*nppmx+npoff] + dx; acy = ppart[j+3*nppmx+npoff] + dy; acz = ppart[j+4*nppmx+npoff] + dz; /* time-centered kinetic energy */ sum1 += (acx*acx + acy*acy + acz*acz); /* calculate cyclotron frequency */ omxt = qtmh*ox; omyt = qtmh*oy; omzt = qtmh*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0f/(1.0f + omt); omt = 0.5f*(1.0f - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new velocity */ vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx; vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy; vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz; /* new position */ dx = x + vx*dtc; dy = y + vy*dtc; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx -= anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy -= any; mm += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) mm += 3; else dy = 0.0; } else { mm += 3; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; /* set new velocity */ ppart[j+2*nppmx+npoff] = vx; ppart[j+3*nppmx+npoff] = vy; ppart[j+4*nppmx+npoff] = vz; /* increment counters */ if (mm > 0) { ncl[mm+8*k-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*k)] = j + 1; ihole[1+2*(ih+(ntmax+1)*k)] = mm; } else { nh = 1; } } } /* sum2 += sum1; */ _mm_store_pd(&dd[0],v_sum1); for (j = 1; j < 2; j++) { dd[0] += dd[j]; } sum2 += (sum1 + dd[0]); /* set error and end of file flag */ /* ihole overflow */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*k] = ih; } /* normalize kinetic energy */ *ek += 0.5*sum2; return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void csse2grbppush23lt(float ppart[], float fxy[], float bxy[], int kpic[], float qbm, float dt, float dtc, float ci, float *ek, int idimp, int nppmx, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ipbc) { /* for 2-1/2d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, for relativistic particles with magnetic field Using the Boris Mover. vector/OpenMP version using guard cells particles stored in segmented array 131 flops/particle, 4 divides, 2 sqrts, 25 loads, 5 stores input: all, output: ppart, ek momentum equations used are: px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fx(x(t),y(t))*dt) py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fy(x(t),y(t))*dt) pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fz(x(t),y(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t))*gami, omy = (q/m)*by(x(t),y(t))*gami, and omz = (q/m)*bz(x(t),y(t))*gami, where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci) position equations used are: x(t+dt) = x(t) + px(t+dt/2)*dtg y(t+dt) = y(t) + py(t+dt/2)*dtg where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+ pz(t+dt/2)*pz(t+dt/2))*ci*ci) fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t)) bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t)) are approximated by interpolation from the nearest grid points: fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1) + dx*fx(n+1,m+1)) where n,m = leftmost grid points and dx = x-n, dy = y-m similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = x momentum of particle n in tile m ppart[m][3][n] = y momentum of particle n in tile m ppart[m][4][n] = z momentum of particle n in tile m fxy[k][j][0] = x component of force/charge at grid (j,k) fxy[k][j][1] = y component of force/charge at grid (j,k) fxy[k][j][2] = z component of force/charge at grid (j,k) that is, convolution of electric field over particle shape bxy[k][j][0] = x component of magnetic field at grid (j,k) bxy[k][j][1] = y component of magnetic field at grid (j,k) bxy[k][j][2] = z component of magnetic field at grid (j,k) that is, the convolution of magnetic field over particle shape kpic = number of particles per tile qbm = particle charge/mass ratio dt = time interval between successive calculations dtc = time interval between successive co-ordinate calculations ci = reciprocal of velocity of light kinetic energy/mass at time t is also calculated, using ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + (pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami) idimp = size of phase space = 5 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = first dimension of field arrays, must be >= nx+1 nyv = second dimension of field arrays, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,2d periodic,2d reflecting,mixed reflecting/periodic) requires SSE2, ppart, fxy, and bxy need to be 16 byte aligned nppmx needs to be a multiple of 4, fxy, bxy need to have 4 components local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp, mxv; int i, j, k, nps, nn, mm, nm; float qtmh, ci2, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy; float dx, dy, dz, ox, oy, oz, acx, acy, acz, p2, gami, qtmg, dtg; float omxt, omyt, omzt, omt, anorm; float rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9; float x, y, vx, vy, vz; double sum1, sum2; __m128i v_noff, v_moff, v_mxv; __m128i v_nn, v_mm, v_it; __m128 v_qtmh, v_ci2, v_dt, v_dtc, v_one, v_two, v_half; __m128 v_dxp, v_dyp, v_amx, v_amy, v_gami, v_at; __m128 v_x, v_y, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m128 v_edgelx, v_edgely, v_edgerx, v_edgery; __m128 a, b, c, d, e, f, g, h; __m128d v_sum1, v_d; __attribute__((aligned(16))) unsigned int ll[4]; __attribute__((aligned(16))) double dd[2]; __attribute__((aligned(16))) float sfxy[4*MXV*MYV], sbxy[4*MXV*MYV]; /* __attribute__((aligned(16))) float sfxy[4*(mx+1)*(my+1)]; */ /* __attribute__((aligned(16))) float sbxy[4*(mx+1)*(my+1)]; */ mxv = mx + 1; qtmh = 0.5f*qbm*dt; ci2 = ci*ci; sum2 = 0.0; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgerx = (float) nx; edgery = (float) ny; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } else if (ipbc==3) { edgelx = 1.0f; edgerx = (float) (nx-1); } v_mxv = _mm_set1_epi32(mxv); v_qtmh = _mm_set1_ps(qtmh); v_ci2 = _mm_set1_ps(ci2); v_dt = _mm_set1_ps(dt); v_dtc = _mm_set1_ps(dtc); v_one = _mm_set1_ps(1.0f); v_two = _mm_set1_ps(2.0f); v_half = _mm_set1_ps(0.5f); v_edgelx = _mm_set1_ps(edgelx); v_edgely = _mm_set1_ps(edgely); v_edgerx = _mm_set1_ps(edgerx); v_edgery = _mm_set1_ps(edgery); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nps,nn,mm,nm,x,y,vx,vy,vz,dxp,dyp, \ amx,amy,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm,rot1, \ rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,p2,gami,qtmg,dtg,sum1,v_noff, \ v_moff,v_nn,v_mm,v_it,v_x,v_y,v_vx,v_vy,v_vz,v_dxp,v_dyp,v_amx,v_amy, \ v_dx,v_dy,v_dz,v_gami,v_at,v_d,v_sum1,a,b,c,d,e,f,g,h,ll,dd,sfxy,sbxy) \ reduction(+:sum2) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm_set1_epi32(noff); v_moff = _mm_set1_epi32(moff); npp = kpic[k]; npoff = idimp*nppmx*k; /* load local fields from global array */ nn = (mx < nx-noff ? mx : nx-noff) + 1; mm = (my < ny-moff ? my : ny-moff) + 1; for (j = 0; j < mm; j++) { /* for (i = 0; i < nn; i++) { */ /* sfxy[4*(i+mxv*j)] = fxy[4*(i+noff+nxv*(j+moff))]; */ /* sfxy[1+4*(i+mxv*j)] = fxy[1+4*(i+noff+nxv*(j+moff))]; */ /* sfxy[2+4*(i+mxv*j)] = fxy[2+4*(i+noff+nxv*(j+moff))]; */ /* } */ for (i = 0; i < nn; i++) { v_at = _mm_loadu_ps(&fxy[4*(i+noff+nxv*(j+moff))]); _mm_storeu_ps(&sfxy[4*(i+mxv*j)],v_at); } } for (j = 0; j < mm; j++) { /* for (i = 0; i < nn; i++) { */ /* sbxy[4*(i+mxv*j)] = bxy[4*(i+noff+nxv*(j+moff))]; */ /* sbxy[1+4*(i+mxv*j)] = bxy[1+4*(i+noff+nxv*(j+moff))]; */ /* sbxy[2+4*(i+mxv*j)] = bxy[2+4*(i+noff+nxv*(j+moff))]; */ /* } */ for (i = 0; i < nn; i++) { v_at = _mm_loadu_ps(&bxy[4*(i+noff+nxv*(j+moff))]); _mm_storeu_ps(&sbxy[4*(i+mxv*j)],v_at); } } nps = 4*(npp/4); sum1 = 0.0; v_sum1 = _mm_set1_pd(0.0); /* vector loop over particles in blocks of 4 */ for (j = 0; j < nps; j+=4) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ v_x = _mm_load_ps(&ppart[j+npoff]); v_y = _mm_load_ps(&ppart[j+nppmx+npoff]); /* nn = x; */ /* mm = y; */ v_nn = _mm_cvttps_epi32(v_x); v_mm = _mm_cvttps_epi32(v_y); /* dxp = x - (float) nn; */ v_dxp = _mm_sub_ps(v_x,_mm_cvtepi32_ps(v_nn)); /* dyp = y - (float) mm; */ v_dyp = _mm_sub_ps(v_y,_mm_cvtepi32_ps(v_mm)); /* nm = 4*(nn - noff + mxv*(mm - moff)); */ v_nn = _mm_sub_epi32(v_nn,v_noff); v_mm = _mm_sub_epi32(v_mm,v_moff); v_it = _mm_mul_epu32(v_mxv,_mm_srli_si128(v_mm,4)); v_mm = _mm_mul_epu32(v_mm,v_mxv); v_mm = _mm_add_epi32(v_mm,_mm_slli_si128(v_it,4)); v_nn = _mm_slli_epi32(_mm_add_epi32(v_nn,v_mm),2); /* amx = 1.0f - dxp; */ /* amy = 1.0f - dyp; */ v_amx = _mm_sub_ps(v_one,v_dxp); v_amy = _mm_sub_ps(v_one,v_dyp); _mm_store_si128((__m128i *)ll,v_nn); /* find electric field */ /* nn = nm; */ /* dx = amx*sfxy[nn]; */ /* dy = amx*sfxy[nn+1]; */ /* dz = amx*sfxy[nn+2]; */ /* mm = nn + 4; */ /* dx = amy*(dxp*sfxy[mm] + dx); */ /* dy = amy*(dxp*sfxy[mm+1] + dy); */ /* dz = amy*(dxp*sfxy[mm+2] + dz); */ /* nn += 4*mxv; */ /* acx = amx*sfxy[nn]; */ /* acy = amx*sfxy[nn+1]; */ /* acz = amx*sfxy[nn+2]; */ /* mm = nn + 4; */ /* dx += dyp*(dxp*sfxy[mm] + acx); */ /* dy += dyp*(dxp*sfxy[mm+1] + acy); */ /* dz += dyp*(dxp*sfxy[mm+2] + acz); */ /* find magnetic field */ /* nn = nm; */ /* ox = amx*sbxy[nn]; */ /* oy = amx*sbxy[nn+1]; */ /* oz = amx*sbxy[nn+2]; */ /* mm = nn + 4; */ /* ox = amy*(dxp*sbxy[mm] + ox); */ /* oy = amy*(dxp*sbxy[mm+1] + oy); */ /* oz = amy*(dxp*sbxy[mm+2] + oz); */ /* nn += 4*mxv; */ /* acx = amx*sbxy[nn]; */ /* acy = amx*sbxy[nn+1]; */ /* acz = amx*sbxy[nn+2]; */ /* mm = nn + 4; */ /* ox += dyp*(dxp*sbxy[mm] + acx); */ /* oy += dyp*(dxp*sbxy[mm+1] + acy); */ /* oz += dyp*(dxp*sbxy[mm+2] + acz); */ /* interpolate electric and magnetic fields for first particle */ nn = ll[0]; v_at = _mm_shuffle_ps(v_amx,v_amx,0); a = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn])); e = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn])); mm = nn + 4*mxv; v_dx = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm])); v_dy = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm])); v_at = _mm_shuffle_ps(v_dxp,v_dxp,0); nn += 4; a = _mm_add_ps(a,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn]))); e = _mm_add_ps(e,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn]))); mm += 4; v_dx = _mm_add_ps(v_dx,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm]))); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm]))); v_at = _mm_shuffle_ps(v_amy,v_amy,0); a = _mm_mul_ps(a,v_at); e = _mm_mul_ps(e,v_at); v_at = _mm_shuffle_ps(v_dyp,v_dyp,0); a = _mm_add_ps(a,_mm_mul_ps(v_dx,v_at)); e = _mm_add_ps(e,_mm_mul_ps(v_dy,v_at)); /* interpolate electric and magnetic fields for second particle */ nn = ll[1]; v_at = _mm_shuffle_ps(v_amx,v_amx,85); b = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn])); f = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn])); mm = nn + 4*mxv; v_dx = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm])); v_dy = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm])); v_at = _mm_shuffle_ps(v_dxp,v_dxp,85); nn += 4; b = _mm_add_ps(b,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn]))); f = _mm_add_ps(f,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn]))); mm += 4; v_dx = _mm_add_ps(v_dx,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm]))); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm]))); v_at = _mm_shuffle_ps(v_amy,v_amy,85); b = _mm_mul_ps(b,v_at); f = _mm_mul_ps(f,v_at); v_at = _mm_shuffle_ps(v_dyp,v_dyp,85); b = _mm_add_ps(b,_mm_mul_ps(v_dx,v_at)); f = _mm_add_ps(f,_mm_mul_ps(v_dy,v_at)); /* interpolate electric and magnetic fields for third particle */ nn = ll[2]; v_at = _mm_shuffle_ps(v_amx,v_amx,170); c = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn])); g = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn])); mm = nn + 4*mxv; v_dx = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm])); v_dy = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm])); v_at = _mm_shuffle_ps(v_dxp,v_dxp,170); nn += 4; c = _mm_add_ps(c,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn]))); g = _mm_add_ps(g,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn]))); mm += 4; v_dx = _mm_add_ps(v_dx,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm]))); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm]))); v_at = _mm_shuffle_ps(v_amy,v_amy,170); c = _mm_mul_ps(c,v_at); g = _mm_mul_ps(g,v_at); v_at = _mm_shuffle_ps(v_dyp,v_dyp,170); c = _mm_add_ps(c,_mm_mul_ps(v_dx,v_at)); g = _mm_add_ps(g,_mm_mul_ps(v_dy,v_at)); /* interpolate electric and magnetic fields for fourth particle */ nn = ll[3]; v_at = _mm_shuffle_ps(v_amx,v_amx,255); d = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn])); h = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn])); mm = nn + 4*mxv; v_dx = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm])); v_dy = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm])); v_at = _mm_shuffle_ps(v_dxp,v_dxp,255); nn += 4; d = _mm_add_ps(d,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn]))); h = _mm_add_ps(h,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn]))); mm += 4; v_dx = _mm_add_ps(v_dx,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm]))); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm]))); v_at = _mm_shuffle_ps(v_amy,v_amy,255); d = _mm_mul_ps(d,v_at); h = _mm_mul_ps(h,v_at); v_at = _mm_shuffle_ps(v_dyp,v_dyp,255); d = _mm_add_ps(d,_mm_mul_ps(v_dx,v_at)); h = _mm_add_ps(h,_mm_mul_ps(v_dy,v_at)); /* transpose so a,b,c,d contain electric fields for each particle */ _MM_TRANSPOSE4_PS(a,b,c,d); /* transpose so e,f,g,h contain magnetic fields for each particle */ _MM_TRANSPOSE4_PS(e,f,g,h); /* calculate half impulse */ /* dx *= qtmh; */ /* dy *= qtmh; */ /* dz *= qtmh; */ v_dx = _mm_mul_ps(a,v_qtmh); v_dy = _mm_mul_ps(b,v_qtmh); v_dz = _mm_mul_ps(c,v_qtmh); /* half acceleration */ /* acx = ppart[j+2*nppmx+npoff] + dx; */ /* acy = ppart[j+3*nppmx+npoff] + dy; */ /* acz = ppart[j+4*nppmx+npoff] + dz; */ a = _mm_add_ps(v_dx,_mm_load_ps(&ppart[j+2*nppmx+npoff])); b = _mm_add_ps(v_dy,_mm_load_ps(&ppart[j+3*nppmx+npoff])); c = _mm_add_ps(v_dz,_mm_load_ps(&ppart[j+4*nppmx+npoff])); /* find inverse gamma */ /* p2 = acx*acx + acy*acy + acz*acz; */ v_at = _mm_add_ps(_mm_mul_ps(a,a),_mm_mul_ps(b,b)); v_at = _mm_add_ps(v_at,_mm_mul_ps(c,c)); /* gami = 1.0f/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_gami = _mm_rsqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); */ /* full accuracy calculation */ v_gami = _mm_sqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); v_gami = _mm_div_ps(v_one,v_gami); /* full accuracy calculation with SVML */ /* v_gami = _mm_invsqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); */ /* time-centered kinetic energy */ /* sum1 += gami*p2/(1.0f + gami); */ v_at = _mm_mul_ps(v_gami,v_at); v_at = _mm_div_ps(v_at,_mm_add_ps(v_one,v_gami)); /* convert to double precision before accumulating */ v_d = _mm_cvtps_pd(v_at); v_sum1 = _mm_add_pd(v_sum1,v_d); v_it = _mm_srli_si128((__m128i)v_at,8); v_d = _mm_cvtps_pd((__m128)v_it); v_sum1 = _mm_add_pd(v_sum1,v_d); /* renormalize magnetic field */ /* qtmg = qtmh*gami; */ v_at = _mm_mul_ps(v_qtmh,v_gami); /* calculate cyclotron frequency */ /* omxt = qtmg*ox; */ /* omyt = qtmg*oy; */ /* omzt = qtmg*oz; */ e = _mm_mul_ps(v_at,e); f = _mm_mul_ps(v_at,f); g = _mm_mul_ps(v_at,g); /* calculate rotation matrix */ /* vx = omxt*omxt; */ v_vx = _mm_mul_ps(e,e); /* vy = omyt*omyt; */ v_vy = _mm_mul_ps(f,f); /* vz = omzt*omzt; */ v_vz = _mm_mul_ps(g,g); /* omt = omxt*omxt + omyt*omyt + omzt*omzt; */ v_at = _mm_add_ps(_mm_add_ps(v_vx,v_vy),v_vz); /* anorm = 2.0f/(1.0f + omt); */ d = _mm_div_ps(v_two,_mm_add_ps(v_one,v_at)); /* omt = 0.5f*(1.0f - omt); */ h = _mm_mul_ps(v_half,_mm_sub_ps(v_one,v_at)); /* vx = (omt + vx)*acx; */ v_vx = _mm_mul_ps(_mm_add_ps(h,v_vx),a); /* vy = (omt + vy)*acy; */ v_vy = _mm_mul_ps(_mm_add_ps(h,v_vy),b); /* vz = (omt + vz)*acz; */ v_vz = _mm_mul_ps(_mm_add_ps(h,v_vz),c); /* omt = omxt*omyt; */ h = _mm_mul_ps(e,f); /* vx = vx + (omzt + omt)*acy; */ v_vx = _mm_add_ps(v_vx,_mm_mul_ps(_mm_add_ps(h,g),b)); /* vy = vy + (omt - omzt)*acx; */ v_vy = _mm_add_ps(v_vy,_mm_mul_ps(_mm_sub_ps(h,g),a)); /* omt = omxt*omzt; */ h = _mm_mul_ps(e,g); /* vx = vx + (omt - omyt)*acz; */ v_vx = _mm_add_ps(v_vx,_mm_mul_ps(_mm_sub_ps(h,f),c)); /* vz = vz + (omt + omyt)*acx; */ v_vz = _mm_add_ps(v_vz,_mm_mul_ps(_mm_add_ps(h,f),a)); /* omt = omyt*omzt; */ h = _mm_mul_ps(f,g); /* vy = vy + (omt + omxt)*acz; */ v_vy = _mm_add_ps(v_vy,_mm_mul_ps(_mm_add_ps(h,e),c)); /* vz = vz + (omt - omxt)*acy; */ v_vz = _mm_add_ps(v_vz,_mm_mul_ps(_mm_sub_ps(h,e),b)); /* new momentum */ /* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */ /* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */ /* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */ v_vx = _mm_add_ps(v_dx,_mm_mul_ps(v_vx,d)); v_vy = _mm_add_ps(v_dy,_mm_mul_ps(v_vy,d)); v_vz = _mm_add_ps(v_dz,_mm_mul_ps(v_vz,d)); /* update inverse gamma */ /* p2 = vx*vx + vy*vy + vz*vz; */ v_at = _mm_mul_ps(v_vx,v_vx); v_at = _mm_add_ps(v_at,_mm_mul_ps(v_vy,v_vy)); v_at = _mm_add_ps(v_at,_mm_mul_ps(v_vz,v_vz)); /* dtg = dtc/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_at = _mm_rsqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); */ /* v_at = _mm_mul_ps(v_dtc,v_at); */ /* full accuracy calculation */ v_at = _mm_sqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); v_at = _mm_div_ps(v_dtc,v_at); /* full accuracy calculation with SVML */ /* v_at = _mm_invsqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); */ /* v_at = _mm_mul_ps(v_dtc,v_at); */ /* new position */ /* dx = x + vx*dtg; */ /* dy = y + vy*dtg; */ v_dx = _mm_add_ps(v_x,_mm_mul_ps(v_vx,v_at)); v_dy = _mm_add_ps(v_y,_mm_mul_ps(v_vy,v_at)); /* reflecting boundary conditions */ if (ipbc==2) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* vx = -vx; */ /* } */ v_at = _mm_cmplt_ps(v_dx,v_edgelx); v_at = _mm_or_ps(v_at,_mm_cmpge_ps(v_dx,v_edgerx)); v_x = _mm_and_ps(v_at,v_x); v_dx = _mm_add_ps(_mm_andnot_ps(v_at,v_dx),v_x); v_dxp = _mm_and_ps(v_at,v_vx); v_vx = _mm_sub_ps(_mm_andnot_ps(v_at,v_vx),v_dxp); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* vy = -vy; */ /* } */ v_at = _mm_cmplt_ps(v_dy,v_edgely); v_at = _mm_or_ps(v_at,_mm_cmpge_ps(v_dy,v_edgery)); v_y = _mm_and_ps(v_at,v_y); v_dy = _mm_add_ps(_mm_andnot_ps(v_at,v_dy),v_y); v_dyp = _mm_and_ps(v_at,v_vy); v_vy = _mm_sub_ps(_mm_andnot_ps(v_at,v_vy),v_dyp); } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* vx = -vx; */ /* } */ v_at = _mm_cmplt_ps(v_dx,v_edgelx); v_at = _mm_or_ps(v_at,_mm_cmpge_ps(v_dx,v_edgerx)); v_x = _mm_and_ps(v_at,v_x); v_dx = _mm_add_ps(_mm_andnot_ps(v_at,v_dx),v_x); v_dxp = _mm_and_ps(v_at,v_vx); v_vx = _mm_sub_ps(_mm_andnot_ps(v_at,v_vx),v_dxp); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ _mm_store_ps(&ppart[j+npoff],v_dx); _mm_store_ps(&ppart[j+nppmx+npoff],v_dy); /* set new momentum */ /* ppart[j+2*nppmx+npoff] = vx; */ /* ppart[j+3*nppmx+npoff] = vy; */ /* ppart[j+4*nppmx+npoff] = vz; */ _mm_store_ps(&ppart[j+2*nppmx+npoff],v_vx); _mm_store_ps(&ppart[j+3*nppmx+npoff],v_vy); _mm_store_ps(&ppart[j+4*nppmx+npoff],v_vz); } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; nn = x; mm = y; dxp = x - (float) nn; dyp = y - (float) mm; nm = 4*(nn - noff + mxv*(mm - moff)); amx = 1.0f - dxp; amy = 1.0f - dyp; /* find electric field */ nn = nm; dx = amx*sfxy[nn]; dy = amx*sfxy[nn+1]; dz = amx*sfxy[nn+2]; mm = nn + 4; dx = amy*(dxp*sfxy[mm] + dx); dy = amy*(dxp*sfxy[mm+1] + dy); dz = amy*(dxp*sfxy[mm+2] + dz); nn += 4*mxv; acx = amx*sfxy[nn]; acy = amx*sfxy[nn+1]; acz = amx*sfxy[nn+2]; mm = nn + 4; dx += dyp*(dxp*sfxy[mm] + acx); dy += dyp*(dxp*sfxy[mm+1] + acy); dz += dyp*(dxp*sfxy[mm+2] + acz); /* find magnetic field */ nn = nm; ox = amx*sbxy[nn]; oy = amx*sbxy[nn+1]; oz = amx*sbxy[nn+2]; mm = nn + 4; ox = amy*(dxp*sbxy[mm] + ox); oy = amy*(dxp*sbxy[mm+1] + oy); oz = amy*(dxp*sbxy[mm+2] + oz); nn += 4*mxv; acx = amx*sbxy[nn]; acy = amx*sbxy[nn+1]; acz = amx*sbxy[nn+2]; mm = nn + 4; ox += dyp*(dxp*sbxy[mm] + acx); oy += dyp*(dxp*sbxy[mm+1] + acy); oz += dyp*(dxp*sbxy[mm+2] + acz); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[j+2*nppmx+npoff] + dx; acy = ppart[j+3*nppmx+npoff] + dy; acz = ppart[j+4*nppmx+npoff] + dz; /* find inverse gamma */ p2 = acx*acx + acy*acy + acz*acz; gami = 1.0f/sqrtf(1.0f + p2*ci2); /* renormalize magnetic field */ qtmg = qtmh*gami; /* time-centered kinetic energy */ sum1 += gami*p2/(1.0 + gami); /* calculate cyclotron frequency */ omxt = qtmg*ox; omyt = qtmg*oy; omzt = qtmg*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0f/(1.0f + omt); omt = 0.5f*(1.0f - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new momentum */ vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx; vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy; vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz; /* update inverse gamma */ p2 = vx*vx + vy*vy + vz*vz; dtg = dtc/sqrtf(1.0f + p2*ci2); /* new position */ dx = x + vx*dtg; dy = y + vy*dtg; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; vx = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; vy = -vy; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; vx = -vx; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; /* set new momentum */ ppart[j+2*nppmx+npoff] = vx; ppart[j+3*nppmx+npoff] = vy; ppart[j+4*nppmx+npoff] = vz; } /* sum2 += sum1; */ _mm_store_pd(&dd[0],v_sum1); for (j = 1; j < 2; j++) { dd[0] += dd[j]; } sum2 += (sum1 + dd[0]); } /* normalize kinetic energy */ *ek += sum2; return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void csse2grbppushf23lt(float ppart[], float fxy[], float bxy[], int kpic[], int ncl[], int ihole[], float qbm, float dt, float dtc, float ci, float *ek, int idimp, int nppmx, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ntmax, int *irc) { /* for 2-1/2d code, this subroutine updates particle co-ordinates and velocities using leap-frog scheme in time and first-order linear interpolation in space, for relativistic particles with magnetic field with periodic boundary conditions. Using the Boris Mover. also determines list of particles which are leaving this tile vector/OpenMP version using guard cells particles stored in segmented array 131 flops/particle, 4 divides, 2 sqrts, 25 loads, 5 stores input: all except ncl, ihole, irc, output: ppart, ncl, ihole, irc, ek momentum equations used are: px(t+dt/2) = rot(1)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(2)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(3)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fx(x(t),y(t))*dt) py(t+dt/2) = rot(4)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(5)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(6)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fy(x(t),y(t))*dt) pz(t+dt/2) = rot(7)*(px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt) + rot(8)*(py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt) + rot(9)*(pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt) + .5*(q/m)*fz(x(t),y(t))*dt) where q/m is charge/mass, and the rotation matrix is given by: rot[0] = (1 - (om*dt/2)**2 + 2*(omx*dt/2)**2)/(1 + (om*dt/2)**2) rot[1] = 2*(omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[2] = 2*(-omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[3] = 2*(-omz*dt/2 + (omx*dt/2)*(omy*dt/2))/(1 + (om*dt/2)**2) rot[4] = (1 - (om*dt/2)**2 + 2*(omy*dt/2)**2)/(1 + (om*dt/2)**2) rot[5] = 2*(omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[6] = 2*(omy*dt/2 + (omx*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[7] = 2*(-omx*dt/2 + (omy*dt/2)*(omz*dt/2))/(1 + (om*dt/2)**2) rot[8] = (1 - (om*dt/2)**2 + 2*(omz*dt/2)**2)/(1 + (om*dt/2)**2) and om**2 = omx**2 + omy**2 + omz**2 the rotation matrix is determined by: omx = (q/m)*bx(x(t),y(t))*gami, omy = (q/m)*by(x(t),y(t))*gami, and omz = (q/m)*bz(x(t),y(t))*gami, where gami = 1./sqrt(1.+(px(t)*px(t)+py(t)*py(t)+pz(t)*pz(t))*ci*ci) position equations used are: x(t+dt) = x(t) + px(t+dt/2)*dtg y(t+dt) = y(t) + py(t+dt/2)*dtg where dtg = dtc/sqrt(1.+(px(t+dt/2)*px(t+dt/2)+py(t+dt/2)*py(t+dt/2)+ pz(t+dt/2)*pz(t+dt/2))*ci*ci) fx(x(t),y(t)), fy(x(t),y(t)), and fz(x(t),y(t)) bx(x(t),y(t)), by(x(t),y(t)), and bz(x(t),y(t)) are approximated by interpolation from the nearest grid points: fx(x,y) = (1-dy)*((1-dx)*fx(n,m)+dx*fx(n+1,m)) + dy*((1-dx)*fx(n,m+1) + dx*fx(n+1,m+1)) where n,m = leftmost grid points and dx = x-n, dy = y-m similarly for fy(x,y), fz(x,y), bx(x,y), by(x,y), bz(x,y) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = x momentum of particle n in tile m ppart[m][3][n] = y momentum of particle n in tile m ppart[m][4][n] = z momentum of particle n in tile m fxy[k][j][0] = x component of force/charge at grid (j,k) fxy[k][j][1] = y component of force/charge at grid (j,k) fxy[k][j][2] = z component of force/charge at grid (j,k) that is, convolution of electric field over particle shape bxy[k][j][0] = x component of magnetic field at grid (j,k) bxy[k][j][1] = y component of magnetic field at grid (j,k) bxy[k][j][2] = z component of magnetic field at grid (j,k) that is, the convolution of magnetic field over particle shape kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = destination of particle leaving hole ihole[k][0][0] = ih, number of holes left (error, if negative) qbm = particle charge/mass ratio dt = time interval between successive calculations dtc = time interval between successive co-ordinate calculations ci = reciprocal of velocity of light kinetic energy/mass at time t is also calculated, using ek = gami*sum((px(t-dt/2) + .5*(q/m)*fx(x(t),y(t))*dt)**2 + (py(t-dt/2) + .5*(q/m)*fy(x(t),y(t))*dt)**2 + (pz(t-dt/2) + .5*(q/m)*fz(x(t),y(t))*dt)**2)/(1. + gami) idimp = size of phase space = 5 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = first dimension of field arrays, must be >= nx+1 nyv = second dimension of field arrays, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 optimized version requires SSE2, ppart, fxy, and bxy need to be 16 byte aligned nppmx needs to be a multiple of 4, fxy, bxy need to have 4 components local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp, mxv; int i, j, k, nps, ih, nh, nn, mm, nm, kk; float qtmh, ci2, dxp, dyp, amx, amy, dx, dy, dz, ox, oy, oz; float acx, acy, acz, p2, gami, qtmg, dtg, omxt, omyt, omzt, omt; float anorm, rot1, rot2, rot3, rot4, rot5, rot6, rot7, rot8, rot9; float anx, any, edgelx, edgely, edgerx, edgery; float x, y, vx, vy, vz; double sum1, sum2; __m128i v_noff, v_moff, v_mxv; __m128i v_nn, v_mm, v_it; __m128 v_qtmh, v_ci2, v_dt, v_dtc, v_one, v_two, v_half; __m128 v_dxp, v_dyp, v_amx, v_amy, v_gami, v_st, v_at; __m128 v_x, v_y, v_dx, v_dy, v_dz, v_vx, v_vy, v_vz; __m128 v_anx, v_any, v_edgelx, v_edgely, v_edgerx, v_edgery; __m128 v_zero, v_three, v_six; __m128 a, b, c, d, e, f, g, h; __m128d v_sum1, v_d; __attribute__((aligned(16))) unsigned int ll[4], lm[8]; __attribute__((aligned(16))) unsigned long jj[1]; __attribute__((aligned(16))) double dd[2]; __attribute__((aligned(16))) float sfxy[4*MXV*MYV], sbxy[4*MXV*MYV]; /* __attribute__((aligned(16))) float sfxy[4*(mx+1)*(my+1)]; */ /* __attribute__((aligned(16))) float sbxy[4*(mx+1)*(my+1)]; */ mxv = mx + 1; qtmh = 0.5f*qbm*dt; ci2 = ci*ci; anx = (float) nx; any = (float) ny; sum2 = 0.0; v_mxv = _mm_set1_epi32(mxv); v_qtmh = _mm_set1_ps(qtmh); v_ci2 = _mm_set1_ps(ci2); v_dt = _mm_set1_ps(dt); v_dtc = _mm_set1_ps(dtc); v_anx = _mm_set1_ps(anx); v_any = _mm_set1_ps(any); v_zero = _mm_setzero_ps(); v_one = _mm_set1_ps(1.0f); v_two = _mm_set1_ps(2.0f); v_half = _mm_set1_ps(0.5f); v_three = _mm_set1_ps(3.0f); v_six = _mm_set1_ps(6.0f); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nps,nn,mm,nm,kk,ih,nh,x,y,vx,vy,vz, \ dxp,dyp,amx,amy,dx,dy,dz,ox,oy,oz,acx,acy,acz,omxt,omyt,omzt,omt,anorm, \ rot1,rot2,rot3,rot4,rot5,rot6,rot7,rot8,rot9,edgelx,edgely,edgerx, \ edgery,p2,gami,qtmg,dtg,sum1,v_noff,v_moff,v_nn,v_mm,v_it,v_x,v_y,v_vx, \ v_vy,v_vz,v_dxp,v_dyp,v_amx,v_amy,v_dx,v_dy,v_dz,v_gami,v_st,v_at, \ v_edgelx,v_edgely,v_edgerx,v_edgery,v_d,v_sum1,a,b,c,d,e,f,g,h,jj,ll, \ lm,dd,sfxy,sbxy) \ reduction(+:sum2) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm_set1_epi32(noff); v_moff = _mm_set1_epi32(moff); npp = kpic[k]; npoff = idimp*nppmx*k; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; v_edgelx = _mm_set1_ps(edgelx); v_edgely = _mm_set1_ps(edgely); v_edgerx = _mm_set1_ps(edgerx); v_edgery = _mm_set1_ps(edgery); ih = 0; nh = 0; nn += 1; mm += 1; /* load local fields from global array */ for (j = 0; j < mm; j++) { /* for (i = 0; i < nn; i++) { */ /* sfxy[4*(i+mxv*j)] = fxy[4*(i+noff+nxv*(j+moff))]; */ /* sfxy[1+4*(i+mxv*j)] = fxy[1+4*(i+noff+nxv*(j+moff))]; */ /* sfxy[2+4*(i+mxv*j)] = fxy[2+4*(i+noff+nxv*(j+moff))]; */ /* } */ for (i = 0; i < nn; i++) { v_at = _mm_loadu_ps(&fxy[4*(i+noff+nxv*(j+moff))]); _mm_storeu_ps(&sfxy[4*(i+mxv*j)],v_at); } } for (j = 0; j < mm; j++) { /* for (i = 0; i < nn; i++) { */ /* sbxy[4*(i+mxv*j)] = bxy[4*(i+noff+nxv*(j+moff))]; */ /* sbxy[1+4*(i+mxv*j)] = bxy[1+4*(i+noff+nxv*(j+moff))]; */ /* sbxy[2+4*(i+mxv*j)] = bxy[2+4*(i+noff+nxv*(j+moff))]; */ /* } */ for (i = 0; i < nn; i++) { v_at = _mm_loadu_ps(&bxy[4*(i+noff+nxv*(j+moff))]); _mm_storeu_ps(&sbxy[4*(i+mxv*j)],v_at); } } /* clear counters */ /* for (j = 0; j < 8; j++) { */ /* ncl[j+8*k] = 0; */ /* } */ memset((void*)&ncl[8*k],0,8*sizeof(int)); nps = 4*(npp/4); sum1 = 0.0; v_sum1 = _mm_set1_pd(0.0); /* vector loop over particles in blocks of 4 */ for (j = 0; j < nps; j+=4) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ v_x = _mm_load_ps(&ppart[j+npoff]); v_y = _mm_load_ps(&ppart[j+nppmx+npoff]); /* nn = x; */ /* mm = y; */ v_nn = _mm_cvttps_epi32(v_x); v_mm = _mm_cvttps_epi32(v_y); /* dxp = x - (float) nn; */ v_dxp = _mm_sub_ps(v_x,_mm_cvtepi32_ps(v_nn)); /* dyp = y - (float) mm; */ v_dyp = _mm_sub_ps(v_y,_mm_cvtepi32_ps(v_mm)); /* nm = 4*(nn - noff + mxv*(mm - moff)); */ v_nn = _mm_sub_epi32(v_nn,v_noff); v_mm = _mm_sub_epi32(v_mm,v_moff); v_it = _mm_mul_epu32(v_mxv,_mm_srli_si128(v_mm,4)); v_mm = _mm_mul_epu32(v_mm,v_mxv); v_mm = _mm_add_epi32(v_mm,_mm_slli_si128(v_it,4)); v_nn = _mm_slli_epi32(_mm_add_epi32(v_nn,v_mm),2); /* amx = 1.0f - dxp; */ /* amy = 1.0f - dyp; */ v_amx = _mm_sub_ps(v_one,v_dxp); v_amy = _mm_sub_ps(v_one,v_dyp); _mm_store_si128((__m128i *)ll,v_nn); /* find electric field */ /* nn = nm; */ /* dx = amx*sfxy[nn]; */ /* dy = amx*sfxy[nn+1]; */ /* dz = amx*sfxy[nn+2]; */ /* mm = nn + 4; */ /* dx = amy*(dxp*sfxy[mm] + dx); */ /* dy = amy*(dxp*sfxy[mm+1] + dy); */ /* dz = amy*(dxp*sfxy[mm+2] + dz); */ /* nn += 4*mxv; */ /* acx = amx*sfxy[nn]; */ /* acy = amx*sfxy[nn+1]; */ /* acz = amx*sfxy[nn+2]; */ /* mm = nn + 4; */ /* dx += dyp*(dxp*sfxy[mm] + acx); */ /* dy += dyp*(dxp*sfxy[mm+1] + acy); */ /* dz += dyp*(dxp*sfxy[mm+2] + acz); */ /* find magnetic field */ /* nn = nm; */ /* ox = amx*sbxy[nn]; */ /* oy = amx*sbxy[nn+1]; */ /* oz = amx*sbxy[nn+2]; */ /* mm = nn + 4; */ /* ox = amy*(dxp*sbxy[mm] + ox); */ /* oy = amy*(dxp*sbxy[mm+1] + oy); */ /* oz = amy*(dxp*sbxy[mm+2] + oz); */ /* nn += 4*mxv; */ /* acx = amx*sbxy[nn]; */ /* acy = amx*sbxy[nn+1]; */ /* acz = amx*sbxy[nn+2]; */ /* mm = nn + 4; */ /* ox += dyp*(dxp*sbxy[mm] + acx); */ /* oy += dyp*(dxp*sbxy[mm+1] + acy); */ /* oz += dyp*(dxp*sbxy[mm+2] + acz); */ /* interpolate electric and magnetic fields for first particle */ nn = ll[0]; v_at = _mm_shuffle_ps(v_amx,v_amx,0); a = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn])); e = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn])); mm = nn + 4*mxv; v_dx = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm])); v_dy = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm])); v_at = _mm_shuffle_ps(v_dxp,v_dxp,0); nn += 4; a = _mm_add_ps(a,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn]))); e = _mm_add_ps(e,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn]))); mm += 4; v_dx = _mm_add_ps(v_dx,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm]))); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm]))); v_at = _mm_shuffle_ps(v_amy,v_amy,0); a = _mm_mul_ps(a,v_at); e = _mm_mul_ps(e,v_at); v_at = _mm_shuffle_ps(v_dyp,v_dyp,0); a = _mm_add_ps(a,_mm_mul_ps(v_dx,v_at)); e = _mm_add_ps(e,_mm_mul_ps(v_dy,v_at)); /* interpolate electric and magnetic fields for second particle */ nn = ll[1]; v_at = _mm_shuffle_ps(v_amx,v_amx,85); b = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn])); f = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn])); mm = nn + 4*mxv; v_dx = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm])); v_dy = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm])); v_at = _mm_shuffle_ps(v_dxp,v_dxp,85); nn += 4; b = _mm_add_ps(b,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn]))); f = _mm_add_ps(f,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn]))); mm += 4; v_dx = _mm_add_ps(v_dx,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm]))); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm]))); v_at = _mm_shuffle_ps(v_amy,v_amy,85); b = _mm_mul_ps(b,v_at); f = _mm_mul_ps(f,v_at); v_at = _mm_shuffle_ps(v_dyp,v_dyp,85); b = _mm_add_ps(b,_mm_mul_ps(v_dx,v_at)); f = _mm_add_ps(f,_mm_mul_ps(v_dy,v_at)); /* interpolate electric and magnetic fields for third particle */ nn = ll[2]; v_at = _mm_shuffle_ps(v_amx,v_amx,170); c = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn])); g = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn])); mm = nn + 4*mxv; v_dx = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm])); v_dy = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm])); v_at = _mm_shuffle_ps(v_dxp,v_dxp,170); nn += 4; c = _mm_add_ps(c,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn]))); g = _mm_add_ps(g,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn]))); mm += 4; v_dx = _mm_add_ps(v_dx,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm]))); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm]))); v_at = _mm_shuffle_ps(v_amy,v_amy,170); c = _mm_mul_ps(c,v_at); g = _mm_mul_ps(g,v_at); v_at = _mm_shuffle_ps(v_dyp,v_dyp,170); c = _mm_add_ps(c,_mm_mul_ps(v_dx,v_at)); g = _mm_add_ps(g,_mm_mul_ps(v_dy,v_at)); /* interpolate electric and magnetic fields for fourth particle */ nn = ll[3]; v_at = _mm_shuffle_ps(v_amx,v_amx,255); d = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn])); h = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn])); mm = nn + 4*mxv; v_dx = _mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm])); v_dy = _mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm])); v_at = _mm_shuffle_ps(v_dxp,v_dxp,255); nn += 4; d = _mm_add_ps(d,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[nn]))); h = _mm_add_ps(h,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[nn]))); mm += 4; v_dx = _mm_add_ps(v_dx,_mm_mul_ps(v_at,_mm_load_ps(&sfxy[mm]))); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(v_at,_mm_load_ps(&sbxy[mm]))); v_at = _mm_shuffle_ps(v_amy,v_amy,255); d = _mm_mul_ps(d,v_at); h = _mm_mul_ps(h,v_at); v_at = _mm_shuffle_ps(v_dyp,v_dyp,255); d = _mm_add_ps(d,_mm_mul_ps(v_dx,v_at)); h = _mm_add_ps(h,_mm_mul_ps(v_dy,v_at)); /* transpose so a,b,c,d contain electric fields for each particle */ _MM_TRANSPOSE4_PS(a,b,c,d); /* transpose so e,f,g,h contain magnetic fields for each particle */ _MM_TRANSPOSE4_PS(e,f,g,h); /* calculate half impulse */ /* dx *= qtmh; */ /* dy *= qtmh; */ /* dz *= qtmh; */ v_dx = _mm_mul_ps(a,v_qtmh); v_dy = _mm_mul_ps(b,v_qtmh); v_dz = _mm_mul_ps(c,v_qtmh); /* half acceleration */ /* acx = ppart[j+2*nppmx+npoff] + dx; */ /* acy = ppart[j+3*nppmx+npoff] + dy; */ /* acz = ppart[j+4*nppmx+npoff] + dz; */ a = _mm_add_ps(v_dx,_mm_load_ps(&ppart[j+2*nppmx+npoff])); b = _mm_add_ps(v_dy,_mm_load_ps(&ppart[j+3*nppmx+npoff])); c = _mm_add_ps(v_dz,_mm_load_ps(&ppart[j+4*nppmx+npoff])); /* find inverse gamma */ /* p2 = acx*acx + acy*acy + acz*acz; */ v_at = _mm_add_ps(_mm_mul_ps(a,a),_mm_mul_ps(b,b)); v_at = _mm_add_ps(v_at,_mm_mul_ps(c,c)); /* gami = 1.0f/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_gami = _mm_rsqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); */ /* full accuracy calculation */ v_gami = _mm_sqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); v_gami = _mm_div_ps(v_one,v_gami); /* full accuracy calculation with SVML */ /* v_gami = _mm_invsqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); */ /* time-centered kinetic energy */ /* sum1 += gami*p2/(1.0f + gami); */ v_at = _mm_mul_ps(v_gami,v_at); v_at = _mm_div_ps(v_at,_mm_add_ps(v_one,v_gami)); /* convert to double precision before accumulating */ v_d = _mm_cvtps_pd(v_at); v_sum1 = _mm_add_pd(v_sum1,v_d); v_it = _mm_srli_si128((__m128i)v_at,8); v_d = _mm_cvtps_pd((__m128)v_it); v_sum1 = _mm_add_pd(v_sum1,v_d); /* renormalize magnetic field */ /* qtmg = qtmh*gami; */ v_at = _mm_mul_ps(v_qtmh,v_gami); /* calculate cyclotron frequency */ /* omxt = qtmg*ox; */ /* omyt = qtmg*oy; */ /* omzt = qtmg*oz; */ e = _mm_mul_ps(v_at,e); f = _mm_mul_ps(v_at,f); g = _mm_mul_ps(v_at,g); /* calculate rotation matrix */ /* vx = omxt*omxt; */ v_vx = _mm_mul_ps(e,e); /* vy = omyt*omyt; */ v_vy = _mm_mul_ps(f,f); /* vz = omzt*omzt; */ v_vz = _mm_mul_ps(g,g); /* omt = omxt*omxt + omyt*omyt + omzt*omzt; */ v_at = _mm_add_ps(_mm_add_ps(v_vx,v_vy),v_vz); /* anorm = 2.0f/(1.0f + omt); */ d = _mm_div_ps(v_two,_mm_add_ps(v_one,v_at)); /* omt = 0.5f*(1.0f - omt); */ h = _mm_mul_ps(v_half,_mm_sub_ps(v_one,v_at)); /* vx = (omt + vx)*acx; */ v_vx = _mm_mul_ps(_mm_add_ps(h,v_vx),a); /* vy = (omt + vy)*acy; */ v_vy = _mm_mul_ps(_mm_add_ps(h,v_vy),b); /* vz = (omt + vz)*acz; */ v_vz = _mm_mul_ps(_mm_add_ps(h,v_vz),c); /* omt = omxt*omyt; */ h = _mm_mul_ps(e,f); /* vx = vx + (omzt + omt)*acy; */ v_vx = _mm_add_ps(v_vx,_mm_mul_ps(_mm_add_ps(h,g),b)); /* vy = vy + (omt - omzt)*acx; */ v_vy = _mm_add_ps(v_vy,_mm_mul_ps(_mm_sub_ps(h,g),a)); /* omt = omxt*omzt; */ h = _mm_mul_ps(e,g); /* vx = vx + (omt - omyt)*acz; */ v_vx = _mm_add_ps(v_vx,_mm_mul_ps(_mm_sub_ps(h,f),c)); /* vz = vz + (omt + omyt)*acx; */ v_vz = _mm_add_ps(v_vz,_mm_mul_ps(_mm_add_ps(h,f),a)); /* omt = omyt*omzt; */ h = _mm_mul_ps(f,g); /* vy = vy + (omt + omxt)*acz; */ v_vy = _mm_add_ps(v_vy,_mm_mul_ps(_mm_add_ps(h,e),c)); /* vz = vz + (omt - omxt)*acy; */ v_vz = _mm_add_ps(v_vz,_mm_mul_ps(_mm_sub_ps(h,e),b)); /* new momentum */ /* vx = dx + (rot1*acx + rot2*acy + rot3*acz)*anorm; */ /* vy = dy + (rot4*acx + rot5*acy + rot6*acz)*anorm; */ /* vz = dz + (rot7*acx + rot8*acy + rot9*acz)*anorm; */ v_vx = _mm_add_ps(v_dx,_mm_mul_ps(v_vx,d)); v_vy = _mm_add_ps(v_dy,_mm_mul_ps(v_vy,d)); v_vz = _mm_add_ps(v_dz,_mm_mul_ps(v_vz,d)); /* update inverse gamma */ /* p2 = vx*vx + vy*vy + vz*vz; */ v_at = _mm_mul_ps(v_vx,v_vx); v_at = _mm_add_ps(v_at,_mm_mul_ps(v_vy,v_vy)); v_at = _mm_add_ps(v_at,_mm_mul_ps(v_vz,v_vz)); /* dtg = dtc/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_at = _mm_rsqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); */ /* v_at = _mm_mul_ps(v_dtc,v_at); */ /* full accuracy calculation */ v_at = _mm_sqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); v_at = _mm_div_ps(v_dtc,v_at); /* full accuracy calculation with SVML */ /* v_at = _mm_invsqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); */ /* v_at = _mm_mul_ps(v_dtc,v_at); */ /* new position */ /* dx = x + vx*dtg; */ /* dy = y + vy*dtg; */ v_dx = _mm_add_ps(v_x,_mm_mul_ps(v_vx,v_at)); v_dy = _mm_add_ps(v_y,_mm_mul_ps(v_vy,v_at)); /* find particles going out of bounds */ mm = 0; v_st = v_zero; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ /* if (dx >= edgerx) { */ /* if (dx >= anx) */ /* dx -= anx; */ /* mm = 2; */ /* } */ v_x = _mm_cmpge_ps(v_dx,v_edgerx); v_y = _mm_cmplt_ps(v_dx,v_edgelx); v_at = _mm_or_ps(v_x,v_y); v_it = _mm_srli_si128((__m128i)v_at,8); v_it = _mm_add_epi64((__m128i)v_at,v_it); _mm_storel_epi64((__m128i *)&jj[0],v_it); /* execute if either test result is true for any particle */ if (jj[0] != 0) { v_st = _mm_and_ps(v_two,v_x); v_x = _mm_and_ps(v_x,_mm_cmpge_ps(v_dx,v_anx)); v_dx = _mm_sub_ps(v_dx,_mm_and_ps(v_anx,v_x)); /* if (dx < edgelx) { */ /* if (dx < 0.0f) { */ /* dx += anx; */ /* if (dx < anx) */ /* mm = 1; */ /* else */ /* dx = 0.0; */ /* } */ /* else { */ /* mm = 1; */ /* } */ /* } */ v_at = _mm_and_ps(v_one,v_y); v_x = _mm_and_ps(v_y,_mm_cmplt_ps(v_dx,v_zero)); v_dx = _mm_add_ps(v_dx,_mm_and_ps(v_anx,v_x)); v_y = _mm_cmplt_ps(v_dx,v_anx); v_dx = _mm_and_ps(v_dx,v_y); v_st = _mm_add_ps(v_st,_mm_and_ps(v_at,v_y)); } /* if (dy >= edgery) { */ /* if (dy >= any) */ /* dy -= any; */ /* mm += 6; */ /* } */ v_y = _mm_cmpge_ps(v_dy,v_edgery); v_x = _mm_cmplt_ps(v_dy,v_edgely); v_at = _mm_or_ps(v_x,v_y); v_it = _mm_srli_si128((__m128i)v_at,8); v_it = _mm_add_epi64((__m128i)v_at,v_it); _mm_storel_epi64((__m128i *)&jj[0],v_it); /* execute if either test result is true for any particle */ if (jj[0] != 0) { v_st = _mm_add_ps(v_st,_mm_and_ps(v_six,v_y)); v_y = _mm_and_ps(v_y,_mm_cmpge_ps(v_dy,v_any)); v_dy = _mm_sub_ps(v_dy,_mm_and_ps(v_any,v_y)); /* if (dy < edgely) { */ /* if (dy < 0.0) { */ /* dy += any; */ /* if (dy < any) */ /* mm += 3; */ /* else */ /* dy = 0.0; */ /* } */ /* else { */ /* mm += 3; */ /* } */ /* } */ v_at = _mm_and_ps(v_three,v_x); v_y = _mm_and_ps(v_x,_mm_cmplt_ps(v_dy,v_zero)); v_dy = _mm_add_ps(v_dy,_mm_and_ps(v_any,v_y)); v_x = _mm_cmplt_ps(v_dy,v_any); v_dy = _mm_and_ps(v_dy,v_x); v_st = _mm_add_ps(v_st,_mm_and_ps(v_at,v_x)); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ _mm_store_ps(&ppart[j+npoff],v_dx); _mm_store_ps(&ppart[j+nppmx+npoff],v_dy); /* set new momentum */ /* ppart[j+2*nppmx+npoff] = vx; */ /* ppart[j+3*nppmx+npoff] = vy; */ /* ppart[j+4*nppmx+npoff] = vz; */ _mm_store_ps(&ppart[j+2*nppmx+npoff],v_vx); _mm_store_ps(&ppart[j+3*nppmx+npoff],v_vy); _mm_store_ps(&ppart[j+4*nppmx+npoff],v_vz); /* increment counters */ /* if (mm > 0) { */ /* ncl[mm+8*k-1] += 1; */ /* ih += 1; */ /* if (ih <= ntmax) { */ /* ihole[2*(ih+(ntmax+1)*k)] = j + 1; */ /* ihole[1+2*(ih+(ntmax+1)*k)] = mm; */ /* } */ /* else { */ /* nh = 1; */ /* } */ /* } */ _mm_store_si128((__m128i *)ll,_mm_cvttps_epi32(v_st)); /* remove zero ist values and left shift data */ kk = 0; memset((void*)lm,0,8*sizeof(int)); for (i = 0; i < 4; i++) { mm = ll[i]; if (mm > 0) { lm[2*kk] = j + i + 1; lm[1+2*kk] = mm; ncl[mm+8*k-1] += 1; kk += 1; } } if (kk > 0) { if ((ih+kk) > ntmax) { nh = 1; } else { v_it = _mm_load_si128((__m128i *)lm); _mm_storeu_si128((__m128i *)&ihole[2*(ih+1+(ntmax+1)*k)],v_it); if (kk > 2) { v_it = _mm_load_si128((__m128i *)&lm[4]); _mm_storeu_si128((__m128i *)&ihole[2*(ih+3+(ntmax+1)*k)],v_it); } } ih += kk; } } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; nn = x; mm = y; dxp = x - (float) nn; dyp = y - (float) mm; nm = 4*(nn - noff + mxv*(mm - moff)); amx = 1.0f - dxp; amy = 1.0f - dyp; /* find electric field */ nn = nm; dx = amx*sfxy[nn]; dy = amx*sfxy[nn+1]; dz = amx*sfxy[nn+2]; mm = nn + 4; dx = amy*(dxp*sfxy[mm] + dx); dy = amy*(dxp*sfxy[mm+1] + dy); dz = amy*(dxp*sfxy[mm+2] + dz); nn += 4*mxv; acx = amx*sfxy[nn]; acy = amx*sfxy[nn+1]; acz = amx*sfxy[nn+2]; mm = nn + 4; dx += dyp*(dxp*sfxy[mm] + acx); dy += dyp*(dxp*sfxy[mm+1] + acy); dz += dyp*(dxp*sfxy[mm+2] + acz); /* find magnetic field */ nn = nm; ox = amx*sbxy[nn]; oy = amx*sbxy[nn+1]; oz = amx*sbxy[nn+2]; mm = nn + 4; ox = amy*(dxp*sbxy[mm] + ox); oy = amy*(dxp*sbxy[mm+1] + oy); oz = amy*(dxp*sbxy[mm+2] + oz); nn += 4*mxv; acx = amx*sbxy[nn]; acy = amx*sbxy[nn+1]; acz = amx*sbxy[nn+2]; mm = nn + 4; ox += dyp*(dxp*sbxy[mm] + acx); oy += dyp*(dxp*sbxy[mm+1] + acy); oz += dyp*(dxp*sbxy[mm+2] + acz); /* calculate half impulse */ dx *= qtmh; dy *= qtmh; dz *= qtmh; /* half acceleration */ acx = ppart[j+2*nppmx+npoff] + dx; acy = ppart[j+3*nppmx+npoff] + dy; acz = ppart[j+4*nppmx+npoff] + dz; /* find inverse gamma */ p2 = acx*acx + acy*acy + acz*acz; gami = 1.0f/sqrtf(1.0f + p2*ci2); /* renormalize magnetic field */ qtmg = qtmh*gami; /* time-centered kinetic energy */ sum1 += gami*p2/(1.0f + gami); /* calculate cyclotron frequency */ omxt = qtmg*ox; omyt = qtmg*oy; omzt = qtmg*oz; /* calculate rotation matrix */ omt = omxt*omxt + omyt*omyt + omzt*omzt; anorm = 2.0f/(1.0f + omt); omt = 0.5f*(1.0f - omt); rot4 = omxt*omyt; rot7 = omxt*omzt; rot8 = omyt*omzt; rot1 = omt + omxt*omxt; rot5 = omt + omyt*omyt; rot9 = omt + omzt*omzt; rot2 = omzt + rot4; rot4 -= omzt; rot3 = -omyt + rot7; rot7 += omyt; rot6 = omxt + rot8; rot8 -= omxt; /* new momentum */ vx = (rot1*acx + rot2*acy + rot3*acz)*anorm + dx; vy = (rot4*acx + rot5*acy + rot6*acz)*anorm + dy; vz = (rot7*acx + rot8*acy + rot9*acz)*anorm + dz; /* update inverse gamma */ p2 = vx*vx + vy*vy + vz*vz; dtg = dtc/sqrtf(1.0f + p2*ci2); /* new position */ dx = x + vx*dtg; dy = y + vy*dtg; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx -= anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy -= any; mm += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) mm += 3; else dy = 0.0; } else { mm += 3; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; /* set new momentum */ ppart[j+2*nppmx+npoff] = vx; ppart[j+3*nppmx+npoff] = vy; ppart[j+4*nppmx+npoff] = vz; /* increment counters */ if (mm > 0) { ncl[mm+8*k-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*k)] = j + 1; ihole[1+2*(ih+(ntmax+1)*k)] = mm; } else { nh = 1; } } } /* sum2 += sum1; */ _mm_store_pd(&dd[0],v_sum1); for (j = 1; j < 2; j++) { dd[0] += dd[j]; } sum2 += (sum1 + dd[0]); /* set error and end of file flag */ /* ihole overflow */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*k] = ih; } /* normalize kinetic energy */ *ek += sum2; return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void csse2gppost2lt(float ppart[], float q[], int kpic[], float qm, int nppmx, int idimp, int mx, int my, int nxv, int nyv, int mx1, int mxy1) { /* for 2d code, this subroutine calculates particle charge density using first-order linear interpolation, periodic boundaries OpenMP/vector version using guard cells data deposited in tiles particles stored segmented array 17 flops/particle, 6 loads, 4 stores input: all, output: q charge density is approximated by values at the nearest grid points q(n,m)=qm*(1.-dx)*(1.-dy) q(n+1,m)=qm*dx*(1.-dy) q(n,m+1)=qm*(1.-dx)*dy q(n+1,m+1)=qm*dx*dy where n,m = leftmost grid points and dx = x-n, dy = y-m ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m q[k][j] = charge density at grid point j,k kpic = number of particles per tile qm = charge on particle, in units of e nppmx = maximum number of particles in tile idimp = size of phase space = 4 mx/my = number of grids in sorting cell in x/y nxv = first dimension of charge array, must be >= nx+1 nyv = second dimension of charge array, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 requires SSE2, ppart needs to be 16 byte aligned nppmx needs to be a multiple of 4 local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp, nps, mxv; int i, j, k, nn, mm, it; float x, y, dxp, dyp, amx, amy; __m128i v_noff, v_moff, v_mxv; __m128i v_nn, v_mm, v_it; __m128 v_qm, v_one, v_m; __m128 v_x, v_y, v_dxp, v_dyp, v_amx, v_amy; __m128 a, b, c, d; __attribute__((aligned(16))) unsigned int ll[4]; __attribute__((aligned(16))) float sq[MXV*MYV]; /* __attribute__((aligned(16))) float sq[(mx+1)*(my+1)]; */ mxv = mx + 1; v_mxv = _mm_set1_epi32(mxv); v_qm = _mm_set1_ps(qm); v_one = _mm_set1_ps(1.0f); v_m = _mm_castsi128_ps(_mm_set_epi32(-1,-1,-1,0)); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,nps,npoff,nn,mm,it,x,y,dxp,dyp,amx,amy, \ v_noff,v_moff,v_nn,v_mm,v_it,v_x,v_y,v_dxp,v_dyp,v_amx,v_amy,a,b,c,d, \ ll,sq) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm_set1_epi32(noff); v_moff = _mm_set1_epi32(moff); npp = kpic[k]; nps = 4*(npp/4); npoff = idimp*nppmx*k; /* zero out local accumulator */ /* for (j = 0; j < mxv*(my+1); j++) { */ /* sq[j] = 0.0f; */ /* } */ memset((void*)sq,0,mxv*(my+1)*sizeof(float)); /* loop over particles in tile in groups of 4 */ for (j = 0; j < nps; j+=4) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ v_x = _mm_load_ps(&ppart[j+npoff]); v_y = _mm_load_ps(&ppart[j+nppmx+npoff]); /* nn = x; */ /* mm = y; */ v_nn = _mm_cvttps_epi32(v_x); v_mm = _mm_cvttps_epi32(v_y); /* dxp = qm*(x - (float) nn); */ v_dxp = _mm_sub_ps(v_x,_mm_cvtepi32_ps(v_nn)); v_dxp = _mm_mul_ps(v_dxp,v_qm); /* dyp = y - (float) mm; */ v_dyp = _mm_sub_ps(v_y,_mm_cvtepi32_ps(v_mm)); /* nn = nn - noff + mxv*(mm - moff); */ v_nn = _mm_sub_epi32(v_nn,v_noff); v_mm = _mm_sub_epi32(v_mm,v_moff); v_it = _mm_mul_epu32(v_mxv,_mm_srli_si128(v_mm,4)); v_mm = _mm_mul_epu32(v_mm,v_mxv); v_mm = _mm_add_epi32(v_mm,_mm_slli_si128(v_it,4)); v_nn = _mm_add_epi32(v_nn,v_mm); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ v_amx = _mm_sub_ps(v_qm,v_dxp); v_amy = _mm_sub_ps(v_one,v_dyp); /* calculate weights, for lower left/right, upper left/right */ a = _mm_mul_ps(v_amx,v_amy); b = _mm_mul_ps(v_dxp,v_amy); c = _mm_mul_ps(v_amx,v_dyp); d = _mm_mul_ps(v_dxp,v_dyp); _mm_store_si128((__m128i *)ll,v_nn); /* transpose so a,b,c,d contain the 4 weights for each of 4 particles */ _MM_TRANSPOSE4_PS(a,b,c,d); /* deposit charge within tile to local accumulator */ /* x = q[nn] + amx*amy; */ /* y = q[nn+1] + dxp*amy; */ /* q[nn] = x; */ /* q[nn+1] = y; */ /* nn += nxv; */ /* x = q[nn] + amx*dyp; */ /* y = q[nn+1] + dxp*dyp; */ /* q[nn] = x; */ /* q[nn+1] = y; */ /* deposit for first particle */ mm = ll[0]; v_x = _mm_loadl_pi(v_x,(__m64 *)&sq[mm]); v_x = _mm_loadh_pi(v_x,(__m64 *)&sq[mm+mxv]); v_x = _mm_add_ps(v_x,a); _mm_storel_pi((__m64 *)&sq[mm],v_x); _mm_storeh_pi((__m64 *)&sq[mm+mxv],v_x); /* deposit for second particle */ mm = ll[1]; v_y = _mm_loadl_pi(v_y,(__m64 *)&sq[mm]); v_y = _mm_loadh_pi(v_y,(__m64 *)&sq[mm+mxv]); v_y = _mm_add_ps(v_y,b); _mm_storel_pi((__m64 *)&sq[mm],v_y); _mm_storeh_pi((__m64 *)&sq[mm+mxv],v_y); /* deposit for third particle */ mm = ll[2]; v_x = _mm_loadl_pi(v_x,(__m64 *)&sq[mm]); v_x = _mm_loadh_pi(v_x,(__m64 *)&sq[mm+mxv]); v_x = _mm_add_ps(v_x,c); _mm_storel_pi((__m64 *)&sq[mm],v_x); _mm_storeh_pi((__m64 *)&sq[mm+mxv],v_x); /* deposit for fourth particle */ mm = ll[3]; v_y = _mm_loadl_pi(v_y,(__m64 *)&sq[mm]); v_y = _mm_loadh_pi(v_y,(__m64 *)&sq[mm+mxv]); v_y = _mm_add_ps(v_y,d); _mm_storel_pi((__m64 *)&sq[mm],v_y); _mm_storeh_pi((__m64 *)&sq[mm+mxv],v_y); } /* loop over remaining particles in tile */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; nn = x; mm = y; dxp = qm*(x - (float) nn); dyp = y - (float) mm; nn = nn - noff + mxv*(mm - moff); amx = qm - dxp; amy = 1.0f - dyp; /* deposit charge within tile to local accumulator */ x = sq[nn] + amx*amy; y = sq[nn+1] + dxp*amy; sq[nn] = x; sq[nn+1] = y; nn += mxv; x = sq[nn] + amx*dyp; y = sq[nn+1] + dxp*dyp; sq[nn] = x; sq[nn+1] = y; } /* deposit charge to interior points in global array */ nn = nxv - noff; mm = nyv - moff; nn = mx < nn ? mx : nn; mm = my < mm ? my : mm; nps = 4*(nn/4); for (j = 1; j < mm; j++) { /* vector loop over elements in blocks of 4 */ /* for (i = 1; i < nn; i++) { */ /* q[i+noff+nxv*(j+moff)] += sq[i+mxv*j]; */ /* } */ for (i = 0; i < nps; i+=4) { v_x = _mm_loadu_ps(&q[i+noff+nxv*(j+moff)]); v_y = _mm_loadu_ps(&sq[i+mxv*j]); /* zero out first element for i = 0 */ if (i==0) v_y = _mm_and_ps(v_y,v_m); v_x = _mm_add_ps(v_x,v_y); _mm_storeu_ps(&q[i+noff+nxv*(j+moff)],v_x); } /* loop over remaining elements */ it = 1 > nps ? 1 : nps; for (i = it; i < nn; i++) { q[i+noff+nxv*(j+moff)] += sq[i+mxv*j]; } } /* deposit charge to edge points in global array */ mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (i = 1; i < nn; i++) { #pragma omp atomic q[i+noff+nxv*moff] += sq[i]; if (mm > my) { #pragma omp atomic q[i+noff+nxv*(mm+moff-1)] += sq[i+mxv*(mm-1)]; } } nn = nxv - noff; nn = mx+1 < nn ? mx+1 : nn; for (j = 0; j < mm; j++) { #pragma omp atomic q[noff+nxv*(j+moff)] += sq[mxv*j]; if (nn > mx) { #pragma omp atomic q[nn+noff-1+nxv*(j+moff)] += sq[nn-1+mxv*j]; } } } return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void csse2gjppost2lt(float ppart[], float cu[], int kpic[], float qm, float dt, int nppmx, int idimp, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ipbc) { /* for 2-1/2d code, this subroutine calculates particle current density using first-order linear interpolation in addition, particle positions are advanced a half time-step vector/OpenMP version using guard cells data deposited in tiles particles stored segmented array 41 flops/particle, 17 loads, 14 stores input: all, output: ppart, cu current density is approximated by values at the nearest grid points cu(i,n,m)=qci*(1.-dx)*(1.-dy) cu(i,n+1,m)=qci*dx*(1.-dy) cu(i,n,m+1)=qci*(1.-dx)*dy cu(i,n+1,m+1)=qci*dx*dy where n,m = leftmost grid points and dx = x-n, dy = y-m and qci = qm*vi, where i = x,y,z ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = x velocity of particle n in tile m ppart[m][3][n] = y velocity of particle n in tile m ppart[m][4][n] = z velocity of particle n in tile m cu[k][j][i] = ith component of current density at grid point j,k kpic = number of particles per tile qm = charge on particle, in units of e dt = time interval between successive calculations nppmx = maximum number of particles in tile idimp = size of phase space = 5 nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = first dimension of current array, must be >= nx+1 nyv = second dimension of current array, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,2d periodic,2d reflecting,mixed reflecting/periodic) requires SSE2, ppart and cu need to be 16 byte aligned nppmx needs to be a multiple of 4, cu needs to have 4 components local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp, mxv; int i, j, k, nps, nn, mm; float edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy; float x, y, dx, dy, vx, vy, vz; __m128i v_noff, v_moff, v_mxv; __m128i v_nn, v_mm, v_it; __m128 v_qm, v_dt, v_one; __m128 v_dxp, v_dyp, v_amx, v_amy, v_at; __m128 v_x, v_y, v_dx, v_dy, v_vx, v_vy; __m128 v_edgelx, v_edgely, v_edgerx, v_edgery; __m128 a, b, c, d, va, vb, vc, vd; __attribute__((aligned(16))) unsigned int ll[4]; __attribute__((aligned(16))) unsigned long kk[1]; __attribute__((aligned(16))) float scu[4*MXV*MYV]; /* __attribute__((aligned(16))) float scu[4*(mx+1)*(my+1)]; */ mxv = mx + 1; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgerx = (float) nx; edgery = (float) ny; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } else if (ipbc==3) { edgelx = 1.0f; edgerx = (float) (nx-1); } v_mxv = _mm_set1_epi32(mxv); v_qm = _mm_set1_ps(qm); v_one = _mm_set1_ps(1.0f); v_dt = _mm_set1_ps(dt); v_edgelx = _mm_set1_ps(edgelx); v_edgely = _mm_set1_ps(edgely); v_edgerx = _mm_set1_ps(edgerx); v_edgery = _mm_set1_ps(edgery); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nps,nn,mm,x,y,dxp,dyp,amx,amy,dx,dy, \ vx,vy,vz,v_noff,v_moff,v_nn,v_mm,v_it,v_x,v_y,v_vx,v_vy,v_dxp,v_dyp, \ v_amx,v_amy,v_dx,v_dy,v_at,a,b,c,d,va,vb,vc,vd,ll,kk,scu) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm_set1_epi32(noff); v_moff = _mm_set1_epi32(moff); npp = kpic[k]; nps = 4*(npp/4); npoff = idimp*nppmx*k; /* zero out local accumulator */ /* for (j = 0; j < 4*mxv*(my+1); j++) { */ /* scu[j] = 0.0f; */ /* } */ memset((void*)scu,0,4*mxv*(my+1)*sizeof(float)); /* vector loop over particles in blocks of 4 */ for (j = 0; j < nps; j+=4) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ v_x = _mm_load_ps(&ppart[j+npoff]); v_y = _mm_load_ps(&ppart[j+nppmx+npoff]); /* nn = x; */ /* mm = y; */ v_nn = _mm_cvttps_epi32(v_x); v_mm = _mm_cvttps_epi32(v_y); /* dxp = qm*(x - (float) nn); */ v_dxp = _mm_sub_ps(v_x,_mm_cvtepi32_ps(v_nn)); v_dxp = _mm_mul_ps(v_dxp,v_qm); /* dyp = y - (float) mm; */ v_dyp = _mm_sub_ps(v_y,_mm_cvtepi32_ps(v_mm)); /* nm = 4*(nn - noff + mxv*(mm - moff)); */ v_nn = _mm_sub_epi32(v_nn,v_noff); v_mm = _mm_sub_epi32(v_mm,v_moff); v_it = _mm_mul_epu32(v_mxv,_mm_srli_si128(v_mm,4)); v_mm = _mm_mul_epu32(v_mm,v_mxv); v_mm = _mm_add_epi32(v_mm,_mm_slli_si128(v_it,4)); v_nn = _mm_slli_epi32(_mm_add_epi32(v_nn,v_mm),2); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ v_amx = _mm_sub_ps(v_qm,v_dxp); v_amy = _mm_sub_ps(v_one,v_dyp); /* calculate weights, for lower left/right, upper left/right */ a = _mm_mul_ps(v_amx,v_amy); b = _mm_mul_ps(v_dxp,v_amy); c = _mm_mul_ps(v_amx,v_dyp); d = _mm_mul_ps(v_dxp,v_dyp); _mm_store_si128((__m128i *)ll,v_nn); /* deposit current */ /* vx = ppart[j+2*nppmx+npoff]; */ /* vy = ppart[j+3*nppmx+npoff]; */ /* vz = ppart[j+4*nppmx+npoff]; */ v_vx = _mm_load_ps(&ppart[j+2*nppmx+npoff]); v_vy = _mm_load_ps(&ppart[j+3*nppmx+npoff]); va = v_vx; vb = v_vy; vc = _mm_load_ps(&ppart[j+4*nppmx+npoff]); vd = _mm_setzero_ps(); /* transpose so va,vb,vc,vd contain the 3 velocities plus zero */ /* for each of 4 particles */ _MM_TRANSPOSE4_PS(va,vb,vc,vd); /* dx = amx*amy; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dy = dxp*amy; */ /* mm = nn + 4; */ /* scu[mm] += vx*dy; */ /* scu[mm+1] += vy*dy; */ /* scu[mm+2] += vz*dy; */ /* dx = amx*dyp; */ /* nn += 4*nxv; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dy = dxp*dyp; */ /* mm = nn + 4; */ /* scu[mm] += vx*dy; */ /* scu[mm+1] += vy*dy; */ /* scu[mm+2] += vz*dy; */ /* deposit for first particle */ mm = ll[0]; v_dx = _mm_load_ps(&scu[mm]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(va,_mm_shuffle_ps(a,a,0))); _mm_store_ps(&scu[mm],v_dx); v_dy = _mm_load_ps(&scu[mm+4]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(va,_mm_shuffle_ps(b,b,0))); _mm_store_ps(&scu[mm+4],v_dy); v_dx = _mm_load_ps(&scu[mm+4*mxv]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(va,_mm_shuffle_ps(c,c,0))); _mm_store_ps(&scu[mm+4*mxv],v_dx); v_dy = _mm_load_ps(&scu[mm+4+4*mxv]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(va,_mm_shuffle_ps(d,d,0))); _mm_store_ps(&scu[mm+4+4*mxv],v_dy); /* deposit for second particle */ mm = ll[1]; v_dx = _mm_load_ps(&scu[mm]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vb,_mm_shuffle_ps(a,a,85))); _mm_store_ps(&scu[mm],v_dx); v_dy = _mm_load_ps(&scu[mm+4]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vb,_mm_shuffle_ps(b,b,85))); _mm_store_ps(&scu[mm+4],v_dy); v_dx = _mm_load_ps(&scu[mm+4*mxv]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vb,_mm_shuffle_ps(c,c,85))); _mm_store_ps(&scu[mm+4*mxv],v_dx); v_dy = _mm_load_ps(&scu[mm+4+4*mxv]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vb,_mm_shuffle_ps(d,d,85))); _mm_store_ps(&scu[mm+4+4*mxv],v_dy); /* deposit for third particle */ mm = ll[2]; v_dx = _mm_load_ps(&scu[mm]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vc,_mm_shuffle_ps(a,a,170))); _mm_store_ps(&scu[mm],v_dx); v_dy = _mm_load_ps(&scu[mm+4]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vc,_mm_shuffle_ps(b,b,170))); _mm_store_ps(&scu[mm+4],v_dy); v_dx = _mm_load_ps(&scu[mm+4*mxv]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vc,_mm_shuffle_ps(c,c,170))); _mm_store_ps(&scu[mm+4*mxv],v_dx); v_dy = _mm_load_ps(&scu[mm+4+4*mxv]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vc,_mm_shuffle_ps(d,d,170))); _mm_store_ps(&scu[mm+4+4*mxv],v_dy); /* deposit for fourth particle */ mm = ll[3]; v_dx = _mm_load_ps(&scu[mm]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vd,_mm_shuffle_ps(a,a,255))); _mm_store_ps(&scu[mm],v_dx); v_dy = _mm_load_ps(&scu[mm+4]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vd,_mm_shuffle_ps(b,b,255))); _mm_store_ps(&scu[mm+4],v_dy); v_dx = _mm_load_ps(&scu[mm+4*mxv]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vd,_mm_shuffle_ps(c,c,255))); _mm_store_ps(&scu[mm+4*mxv],v_dx); v_dy = _mm_load_ps(&scu[mm+4+4*mxv]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vd,_mm_shuffle_ps(d,d,255))); _mm_store_ps(&scu[mm+4+4*mxv],v_dy); /* advance position half a time-step */ /* dx = x + vx*dt; */ /* dy = y + vy*dt; */ v_dx = _mm_add_ps(v_x,_mm_mul_ps(v_vx,v_dt)); v_dy = _mm_add_ps(v_y,_mm_mul_ps(v_vy,v_dt)); /* reflecting boundary conditions */ if (ipbc==2) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* ppart[j+2*nppmx+npoff] = -vx; */ /* } */ v_at = _mm_cmplt_ps(v_dx,v_edgelx); v_at = _mm_or_ps(v_at,_mm_cmpge_ps(v_dx,v_edgerx)); v_x = _mm_and_ps(v_at,v_x); v_dx = _mm_add_ps(_mm_andnot_ps(v_at,v_dx),v_x); v_dxp = _mm_and_ps(v_at,v_vx); v_vx = _mm_sub_ps(_mm_andnot_ps(v_at,v_vx),v_dxp); /* write output if test result is true for any particle */ v_mm = _mm_srli_si128((__m128i)v_at,8); v_mm = _mm_add_epi64((__m128i)v_at,v_mm); _mm_storel_epi64((__m128i *)&kk[0],v_mm); if (kk[0] != 0) _mm_store_ps(&ppart[j+2*nppmx+npoff],v_vx); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* ppart[j+3*nppmx+npoff] = -vy; */ /* } */ v_at = _mm_cmplt_ps(v_dy,v_edgely); v_at = _mm_or_ps(v_at,_mm_cmpge_ps(v_dy,v_edgery)); v_y = _mm_and_ps(v_at,v_y); v_dy = _mm_add_ps(_mm_andnot_ps(v_at,v_dy),v_y); v_dyp = _mm_and_ps(v_at,v_vy); v_vy = _mm_sub_ps(_mm_andnot_ps(v_at,v_vy),v_dyp); /* write output if test result is true for any particle */ v_mm = _mm_srli_si128((__m128i)v_at,8); v_mm = _mm_add_epi64((__m128i)v_at,v_mm); _mm_storel_epi64((__m128i *)&kk[0],v_mm); if (kk[0] != 0) _mm_store_ps(&ppart[j+3*nppmx+npoff],v_vy); } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* ppart[j+2*nppmx+npoff] = -vx; */ /* } */ v_at = _mm_cmplt_ps(v_dx,v_edgelx); v_at = _mm_or_ps(v_at,_mm_cmpge_ps(v_dx,v_edgerx)); v_x = _mm_and_ps(v_at,v_x); v_dx = _mm_add_ps(_mm_andnot_ps(v_at,v_dx),v_x); v_dxp = _mm_and_ps(v_at,v_vx); v_vx = _mm_sub_ps(_mm_andnot_ps(v_at,v_vx),v_dxp); /* write output if test result is true for any particle */ v_mm = _mm_srli_si128((__m128i)v_at,8); v_mm = _mm_add_epi64((__m128i)v_at,v_mm); _mm_storel_epi64((__m128i *)&kk[0],v_mm); if (kk[0] != 0) _mm_store_ps(&ppart[j+2*nppmx+npoff],v_vx); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ _mm_store_ps(&ppart[j+npoff],v_dx); _mm_store_ps(&ppart[j+nppmx+npoff],v_dy); } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; nn = x; mm = y; dxp = qm*(x - (float) nn); dyp = y - (float) mm; nn = 4*(nn - noff + mxv*(mm - moff)); amx = qm - dxp; amy = 1.0f - dyp; /* deposit current */ dx = amx*amy; dy = dxp*amy; vx = ppart[j+2*nppmx+npoff]; vy = ppart[j+3*nppmx+npoff]; vz = ppart[j+4*nppmx+npoff]; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = amx*dyp; mm = nn + 4; scu[mm] += vx*dy; scu[mm+1] += vy*dy; scu[mm+2] += vz*dy; dy = dxp*dyp; nn += 4*mxv; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; mm = nn + 4; scu[mm] += vx*dy; scu[mm+1] += vy*dy; scu[mm+2] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[j+2*nppmx+npoff] = -vx; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[j+3*nppmx+npoff] = -vy; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[j+2*nppmx+npoff] = -vx; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; } /* deposit current to interior points in global array */ nn = nxv - noff; mm = nyv - moff; nn = mx < nn ? mx : nn; mm = my < mm ? my : mm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { /* cu[4*(i+noff+nxv*(j+moff))] += scu[4*(i+mxv*j)]; */ /* cu[1+4*(i+noff+nxv*(j+moff))] += scu[1+4*(i+mxv*j)]; */ /* cu[2+4*(i+noff+nxv*(j+moff))] += scu[2+4*(i+mxv*j)]; */ v_x = _mm_loadu_ps(&cu[4*(i+noff+nxv*(j+moff))]); v_y = _mm_loadu_ps(&scu[4*(i+mxv*j)]); v_x = _mm_add_ps(v_x,v_y); _mm_storeu_ps(&cu[4*(i+noff+nxv*(j+moff))],v_x); } } /* deposit current to edge points in global array */ mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff)] += scu[4*i]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff)] += scu[1+4*i]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff)] += scu[2+4*i]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1))] += scu[4*(i+mxv*(mm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1))] += scu[1+4*(i+mxv*(mm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1))] += scu[2+4*(i+mxv*(mm-1))]; } } nn = nxv - noff; nn = mx+1 < nn ? mx+1 : nn; for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff))] += scu[4*mxv*j]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff))] += scu[1+4*mxv*j]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff))] += scu[2+4*mxv*j]; if (nn > mx) { #pragma omp atomic cu[4*(nn+noff-1+nxv*(j+moff))] += scu[4*((nn-1)+mxv*j)]; #pragma omp atomic cu[1+4*(nn+noff-1+nxv*(j+moff))] += scu[1+4*((nn-1)+mxv*j)]; #pragma omp atomic cu[2+4*(nn+noff-1+nxv*(j+moff))] += scu[2+4*((nn-1)+mxv*j)]; } } } return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void csse2gjppostf2lt(float ppart[], float cu[], int kpic[], int ncl[], int ihole[], float qm, float dt, int nppmx, int idimp, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ntmax, int *irc) { /* for 2-1/2d code, this subroutine calculates particle current density using first-order linear interpolation in addition, particle positions are advanced a half time-step with periodic boundary conditions. also determines list of particles which are leaving this tile vector/OpenMP version using guard cells data deposited in tiles particles stored segmented array 41 flops/particle, 17 loads, 14 stores input: all except ncl, ihole, irc, output: ppart, cu, ncl, ihole, irc current density is approximated by values at the nearest grid points cu(i,n,m)=qci*(1.-dx)*(1.-dy) cu(i,n+1,m)=qci*dx*(1.-dy) cu(i,n,m+1)=qci*(1.-dx)*dy cu(i,n+1,m+1)=qci*dx*dy where n,m = leftmost grid points and dx = x-n, dy = y-m and qci = qm*vi, where i = x,y,z ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = x velocity of particle n in tile m ppart[m][3][n] = y velocity of particle n in tile m ppart[m][4][n] = z velocity of particle n in tile m cu[k][j][i] = ith component of current density at grid point j,k kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = destination of particle leaving hole ihole[k][0][0] = ih, number of holes left (error, if negative) qm = charge on particle, in units of e dt = time interval between successive calculations nppmx = maximum number of particles in tile idimp = size of phase space = 5 nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = first dimension of current array, must be >= nx+1 nyv = second dimension of current array, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 optimized version requires SSE2, ppart and cu need to be 16 byte aligned nppmx needs to be a multiple of 4, cu needs to have 4 components local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp, mxv; int i, j, k, nps, ih, nh, nn, mm, kk; float dxp, dyp, amx, amy; float x, y, dx, dy, vx, vy, vz; float anx, any, edgelx, edgely, edgerx, edgery; __m128i v_noff, v_moff, v_mxv; __m128i v_nn, v_mm, v_it; __m128 v_qm, v_dt, v_one; __m128 v_dxp, v_dyp, v_amx, v_amy, v_st, v_at; __m128 v_x, v_y, v_dx, v_dy, v_vx, v_vy; __m128 v_anx, v_any, v_edgelx, v_edgely, v_edgerx, v_edgery; __m128 v_zero, v_two, v_three, v_six; __m128 a, b, c, d, va, vb, vc, vd; __attribute__((aligned(16))) unsigned int ll[4], lm[8]; __attribute__((aligned(16))) unsigned long jj[1]; __attribute__((aligned(16))) float scu[4*MXV*MYV]; /* __attribute__((aligned(16))) float scu[4*(mx+1)*(my+1)]; */ mxv = mx + 1; anx = (float) nx; any = (float) ny; v_mxv = _mm_set1_epi32(mxv); v_qm = _mm_set1_ps(qm); v_anx = _mm_set1_ps(anx); v_any = _mm_set1_ps(any); v_zero = _mm_setzero_ps(); v_one = _mm_set1_ps(1.0f); v_dt = _mm_set1_ps(dt); v_two = _mm_set1_ps(2.0f); v_three = _mm_set1_ps(3.0f); v_six = _mm_set1_ps(6.0f); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nps,nn,mm,kk,ih,nh,x,y,dxp,dyp,amx, \ amy,dx,dy,vx,vy,vz,edgelx,edgely,edgerx,edgery,v_noff,v_moff,v_nn,v_mm, \ v_it,v_x,v_y,v_vx,v_vy,v_dxp,v_dyp,v_amx,v_amy,v_dx,v_dy,v_at,v_st, \ v_edgelx,v_edgely,v_edgerx,v_edgery,a,b,c,d,va,vb,vc,vd,jj,ll,lm,scu) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm_set1_epi32(noff); v_moff = _mm_set1_epi32(moff); npp = kpic[k]; nps = 4*(npp/4); npoff = idimp*nppmx*k; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; v_edgelx = _mm_set1_ps(edgelx); v_edgely = _mm_set1_ps(edgely); v_edgerx = _mm_set1_ps(edgerx); v_edgery = _mm_set1_ps(edgery); ih = 0; nh = 0; nn += 1; mm += 1; /* zero out local accumulator */ /* for (j = 0; j < 4*mxv*(my+1); j++) { */ /* scu[j] = 0.0f; */ /* } */ memset((void*)scu,0,4*mxv*(my+1)*sizeof(float)); /* clear counters */ /* for (j = 0; j < 8; j++) { */ /* ncl[j+8*k] = 0; */ /* } */ memset((void*)&ncl[8*k],0,8*sizeof(int)); /* vector loop over particles in blocks of 4 */ for (j = 0; j < nps; j+=4) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ v_x = _mm_load_ps(&ppart[j+npoff]); v_y = _mm_load_ps(&ppart[j+nppmx+npoff]); /* nn = x; */ /* mm = y; */ v_nn = _mm_cvttps_epi32(v_x); v_mm = _mm_cvttps_epi32(v_y); /* dxp = qm*(x - (float) nn); */ v_dxp = _mm_sub_ps(v_x,_mm_cvtepi32_ps(v_nn)); v_dxp = _mm_mul_ps(v_dxp,v_qm); /* dyp = y - (float) mm; */ v_dyp = _mm_sub_ps(v_y,_mm_cvtepi32_ps(v_mm)); /* nm = 4*(nn - noff + mxv*(mm - moff)); */ v_nn = _mm_sub_epi32(v_nn,v_noff); v_mm = _mm_sub_epi32(v_mm,v_moff); v_it = _mm_mul_epu32(v_mxv,_mm_srli_si128(v_mm,4)); v_mm = _mm_mul_epu32(v_mm,v_mxv); v_mm = _mm_add_epi32(v_mm,_mm_slli_si128(v_it,4)); v_nn = _mm_slli_epi32(_mm_add_epi32(v_nn,v_mm),2); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ v_amx = _mm_sub_ps(v_qm,v_dxp); v_amy = _mm_sub_ps(v_one,v_dyp); /* calculate weights, for lower left/right, upper left/right */ a = _mm_mul_ps(v_amx,v_amy); b = _mm_mul_ps(v_dxp,v_amy); c = _mm_mul_ps(v_amx,v_dyp); d = _mm_mul_ps(v_dxp,v_dyp); _mm_store_si128((__m128i *)ll,v_nn); /* deposit current */ /* vx = ppart[j+2*nppmx+npoff]; */ /* vy = ppart[j+3*nppmx+npoff]; */ /* vz = ppart[j+4*nppmx+npoff]; */ v_vx = _mm_load_ps(&ppart[j+2*nppmx+npoff]); v_vy = _mm_load_ps(&ppart[j+3*nppmx+npoff]); va = v_vx; vb = v_vy; vc = _mm_load_ps(&ppart[j+4*nppmx+npoff]); vd = _mm_setzero_ps(); /* transpose so va,vb,vc,vd contain the 3 velocities plus zero */ /* for each of 4 particles */ _MM_TRANSPOSE4_PS(va,vb,vc,vd); /* dx = amx*amy; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dy = dxp*amy; */ /* mm = nn + 4; */ /* scu[mm] += vx*dy; */ /* scu[mm+1] += vy*dy; */ /* scu[mm+2] += vz*dy; */ /* dx = amx*dyp; */ /* nn += 4*nxv; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dy = dxp*dyp; */ /* mm = nn + 4; */ /* scu[mm] += vx*dy; */ /* scu[mm+1] += vy*dy; */ /* scu[mm+2] += vz*dy; */ /* deposit for first particle */ mm = ll[0]; v_dx = _mm_load_ps(&scu[mm]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(va,_mm_shuffle_ps(a,a,0))); _mm_store_ps(&scu[mm],v_dx); v_dy = _mm_load_ps(&scu[mm+4]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(va,_mm_shuffle_ps(b,b,0))); _mm_store_ps(&scu[mm+4],v_dy); v_dx = _mm_load_ps(&scu[mm+4*mxv]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(va,_mm_shuffle_ps(c,c,0))); _mm_store_ps(&scu[mm+4*mxv],v_dx); v_dy = _mm_load_ps(&scu[mm+4+4*mxv]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(va,_mm_shuffle_ps(d,d,0))); _mm_store_ps(&scu[mm+4+4*mxv],v_dy); /* deposit for second particle */ mm = ll[1]; v_dx = _mm_load_ps(&scu[mm]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vb,_mm_shuffle_ps(a,a,85))); _mm_store_ps(&scu[mm],v_dx); v_dy = _mm_load_ps(&scu[mm+4]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vb,_mm_shuffle_ps(b,b,85))); _mm_store_ps(&scu[mm+4],v_dy); v_dx = _mm_load_ps(&scu[mm+4*mxv]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vb,_mm_shuffle_ps(c,c,85))); _mm_store_ps(&scu[mm+4*mxv],v_dx); v_dy = _mm_load_ps(&scu[mm+4+4*mxv]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vb,_mm_shuffle_ps(d,d,85))); _mm_store_ps(&scu[mm+4+4*mxv],v_dy); /* deposit for third particle */ mm = ll[2]; v_dx = _mm_load_ps(&scu[mm]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vc,_mm_shuffle_ps(a,a,170))); _mm_store_ps(&scu[mm],v_dx); v_dy = _mm_load_ps(&scu[mm+4]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vc,_mm_shuffle_ps(b,b,170))); _mm_store_ps(&scu[mm+4],v_dy); v_dx = _mm_load_ps(&scu[mm+4*mxv]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vc,_mm_shuffle_ps(c,c,170))); _mm_store_ps(&scu[mm+4*mxv],v_dx); v_dy = _mm_load_ps(&scu[mm+4+4*mxv]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vc,_mm_shuffle_ps(d,d,170))); _mm_store_ps(&scu[mm+4+4*mxv],v_dy); /* deposit for fourth particle */ mm = ll[3]; v_dx = _mm_load_ps(&scu[mm]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vd,_mm_shuffle_ps(a,a,255))); _mm_store_ps(&scu[mm],v_dx); v_dy = _mm_load_ps(&scu[mm+4]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vd,_mm_shuffle_ps(b,b,255))); _mm_store_ps(&scu[mm+4],v_dy); v_dx = _mm_load_ps(&scu[mm+4*mxv]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vd,_mm_shuffle_ps(c,c,255))); _mm_store_ps(&scu[mm+4*mxv],v_dx); v_dy = _mm_load_ps(&scu[mm+4+4*mxv]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vd,_mm_shuffle_ps(d,d,255))); _mm_store_ps(&scu[mm+4+4*mxv],v_dy); /* advance position half a time-step */ /* dx = x + vx*dt; */ /* dy = y + vy*dt; */ v_dx = _mm_add_ps(v_x,_mm_mul_ps(v_vx,v_dt)); v_dy = _mm_add_ps(v_y,_mm_mul_ps(v_vy,v_dt)); /* find particles going out of bounds */ mm = 0; v_st = v_zero; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ /* if (dx >= edgerx) { */ /* if (dx >= anx) */ /* dx -= anx; */ /* mm = 2; */ /* } */ v_x = _mm_cmpge_ps(v_dx,v_edgerx); v_y = _mm_cmplt_ps(v_dx,v_edgelx); v_at = _mm_or_ps(v_x,v_y); v_it = _mm_srli_si128((__m128i)v_at,8); v_it = _mm_add_epi64((__m128i)v_at,v_it); _mm_storel_epi64((__m128i *)&jj[0],v_it); /* execute if either test result is true for any particle */ if (jj[0] != 0) { v_st = _mm_and_ps(v_two,v_x); v_x = _mm_and_ps(v_x,_mm_cmpge_ps(v_dx,v_anx)); v_dx = _mm_sub_ps(v_dx,_mm_and_ps(v_anx,v_x)); /* if (dx < edgelx) { */ /* if (dx < 0.0f) { */ /* dx += anx; */ /* if (dx < anx) */ /* mm = 1; */ /* else */ /* dx = 0.0; */ /* } */ /* else { */ /* mm = 1; */ /* } */ /* } */ v_at = _mm_and_ps(v_one,v_y); v_x = _mm_and_ps(v_y,_mm_cmplt_ps(v_dx,v_zero)); v_dx = _mm_add_ps(v_dx,_mm_and_ps(v_anx,v_x)); v_y = _mm_cmplt_ps(v_dx,v_anx); v_dx = _mm_and_ps(v_dx,v_y); v_st = _mm_add_ps(v_st,_mm_and_ps(v_at,v_y)); } /* if (dy >= edgery) { */ /* if (dy >= any) */ /* dy -= any; */ /* mm += 6; */ /* } */ v_y = _mm_cmpge_ps(v_dy,v_edgery); v_x = _mm_cmplt_ps(v_dy,v_edgely); v_at = _mm_or_ps(v_x,v_y); v_it = _mm_srli_si128((__m128i)v_at,8); v_it = _mm_add_epi64((__m128i)v_at,v_it); _mm_storel_epi64((__m128i *)&jj[0],v_it); /* execute if either test result is true for any particle */ if (jj[0] != 0) { v_st = _mm_add_ps(v_st,_mm_and_ps(v_six,v_y)); v_y = _mm_and_ps(v_y,_mm_cmpge_ps(v_dy,v_any)); v_dy = _mm_sub_ps(v_dy,_mm_and_ps(v_any,v_y)); /* if (dy < edgely) { */ /* if (dy < 0.0) { */ /* dy += any; */ /* if (dy < any) */ /* mm += 3; */ /* else */ /* dy = 0.0; */ /* } */ /* else { */ /* mm += 3; */ /* } */ /* } */ v_at = _mm_and_ps(v_three,v_x); v_y = _mm_and_ps(v_x,_mm_cmplt_ps(v_dy,v_zero)); v_dy = _mm_add_ps(v_dy,_mm_and_ps(v_any,v_y)); v_x = _mm_cmplt_ps(v_dy,v_any); v_dy = _mm_and_ps(v_dy,v_x); v_st = _mm_add_ps(v_st,_mm_and_ps(v_at,v_x)); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ _mm_store_ps(&ppart[j+npoff],v_dx); _mm_store_ps(&ppart[j+nppmx+npoff],v_dy); /* increment counters */ /* if (mm > 0) { */ /* ncl[mm+8*k-1] += 1; */ /* ih += 1; */ /* if (ih <= ntmax) { */ /* ihole[2*(ih+(ntmax+1)*k)] = j + 1; */ /* ihole[1+2*(ih+(ntmax+1)*k)] = mm; */ /* } */ /* else { */ /* nh = 1; */ /* } */ /* } */ _mm_store_si128((__m128i *)ll,_mm_cvttps_epi32(v_st)); /* remove zero ist values and left shift data */ kk = 0; memset((void*)lm,0,8*sizeof(int)); for (i = 0; i < 4; i++) { mm = ll[i]; if (mm > 0) { lm[2*kk] = j + i + 1; lm[1+2*kk] = mm; ncl[mm+8*k-1] += 1; kk += 1; } } if (kk > 0) { if ((ih+kk) > ntmax) { nh = 1; } else { v_it = _mm_load_si128((__m128i *)lm); _mm_storeu_si128((__m128i *)&ihole[2*(ih+1+(ntmax+1)*k)],v_it); if (kk > 2) { v_it = _mm_load_si128((__m128i *)&lm[4]); _mm_storeu_si128((__m128i *)&ihole[2*(ih+3+(ntmax+1)*k)],v_it); } } ih += kk; } } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; nn = x; mm = y; dxp = qm*(x - (float) nn); dyp = y - (float) mm; nn = 4*(nn - noff + mxv*(mm - moff)); amx = qm - dxp; amy = 1.0f - dyp; /* deposit current */ dx = amx*amy; dy = dxp*amy; vx = ppart[j+2*nppmx+npoff]; vy = ppart[j+3*nppmx+npoff]; vz = ppart[j+4*nppmx+npoff]; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = amx*dyp; mm = nn + 4; scu[mm] += vx*dy; scu[mm+1] += vy*dy; scu[mm+2] += vz*dy; dy = dxp*dyp; nn += 4*mxv; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; mm = nn + 4; scu[mm] += vx*dy; scu[mm+1] += vy*dy; scu[mm+2] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx -= anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy -= any; mm += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) mm += 3; else dy = 0.0; } else { mm += 3; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; /* increment counters */ if (mm > 0) { ncl[mm+8*k-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*k)] = j + 1; ihole[1+2*(ih+(ntmax+1)*k)] = mm; } else { nh = 1; } } } /* deposit current to interior points in global array */ nn = nxv - noff; mm = nyv - moff; nn = mx < nn ? mx : nn; mm = my < mm ? my : mm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { /* cu[4*(i+noff+nxv*(j+moff))] += scu[4*(i+mxv*j)]; */ /* cu[1+4*(i+noff+nxv*(j+moff))] += scu[1+4*(i+mxv*j)]; */ /* cu[2+4*(i+noff+nxv*(j+moff))] += scu[2+4*(i+mxv*j)]; */ v_x = _mm_loadu_ps(&cu[4*(i+noff+nxv*(j+moff))]); v_y = _mm_loadu_ps(&scu[4*(i+mxv*j)]); v_x = _mm_add_ps(v_x,v_y); _mm_storeu_ps(&cu[4*(i+noff+nxv*(j+moff))],v_x); } } /* deposit current to edge points in global array */ mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff)] += scu[4*i]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff)] += scu[1+4*i]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff)] += scu[2+4*i]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1))] += scu[4*(i+mxv*(mm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1))] += scu[1+4*(i+mxv*(mm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1))] += scu[2+4*(i+mxv*(mm-1))]; } } nn = nxv - noff; nn = mx+1 < nn ? mx+1 : nn; for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff))] += scu[4*mxv*j]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff))] += scu[1+4*mxv*j]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff))] += scu[2+4*mxv*j]; if (nn > mx) { #pragma omp atomic cu[4*(nn+noff-1+nxv*(j+moff))] += scu[4*((nn-1)+mxv*j)]; #pragma omp atomic cu[1+4*(nn+noff-1+nxv*(j+moff))] += scu[1+4*((nn-1)+mxv*j)]; #pragma omp atomic cu[2+4*(nn+noff-1+nxv*(j+moff))] += scu[2+4*((nn-1)+mxv*j)]; } } /* set error and end of file flag */ /* ihole overflow */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*k] = ih; } return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void csse2grjppost2lt(float ppart[], float cu[], int kpic[], float qm, float dt, float ci, int nppmx, int idimp, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ipbc) { /* for 2-1/2d code, this subroutine calculates particle current density using first-order linear interpolation in addition, particle positions are advanced a half time-step vecgtor/OpenMP version using guard cells data deposited in tiles particles stored segmented array 47 flops/particle, 1 divide, 1 sqrt, 17 loads, 14 stores input: all, output: ppart, cu current density is approximated by values at the nearest grid points cu(i,n,m)=qci*(1.-dx)*(1.-dy) cu(i,n+1,m)=qci*dx*(1.-dy) cu(i,n,m+1)=qci*(1.-dx)*dy cu(i,n+1,m+1)=qci*dx*dy where n,m = leftmost grid points and dx = x-n, dy = y-m and qci = qm*pi*gami, where i = x,y,z where gami = 1./sqrt(1.+sum(pi**2)*ci*ci) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = x momentum of particle n in tile m ppart[m][3][n] = y momentum of particle n in tile m ppart[m][4][n] = z momentum of particle n in tile m cu[k][j][i] = ith component of current density at grid point j,k kpic = number of particles per tile qm = charge on particle, in units of e dt = time interval between successive calculations ci = reciprocal of velocity of light nppmx = maximum number of particles in tile idimp = size of phase space = 5 nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = first dimension of current array, must be >= nx+1 nyv = second dimension of current array, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ipbc = particle boundary condition = (0,1,2,3) = (none,2d periodic,2d reflecting,mixed reflecting/periodic) requires SSE2, ppart and cu need to be 16 byte aligned nppmx needs to be a multiple of 4, cu needs to have 4 components local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp, mxv; int i, j, k, nps, nn, mm; float ci2, edgelx, edgely, edgerx, edgery, dxp, dyp, amx, amy; float x, y, dx, dy, vx, vy, vz, ux, uy, uz, p2, gami; __m128i v_noff, v_moff, v_mxv; __m128i v_nn, v_mm, v_it; __m128 v_qm, v_dt, v_one, v_ci2; __m128 v_dxp, v_dyp, v_amx, v_amy, v_at; __m128 v_x, v_y, v_dx, v_dy, v_vx, v_vy, v_ux, v_uy; __m128 v_edgelx, v_edgely, v_edgerx, v_edgery; __m128 a, b, c, d, va, vb, vc, vd; __attribute__((aligned(16))) unsigned int ll[4]; __attribute__((aligned(16))) unsigned long kk[1]; __attribute__((aligned(16))) float scu[4*MXV*MYV]; /* __attribute__((aligned(16))) float scu[4*(mx+1)*(my+1)]; */ mxv = mx + 1; ci2 = ci*ci; /* set boundary values */ edgelx = 0.0f; edgely = 0.0f; edgerx = (float) nx; edgery = (float) ny; if (ipbc==2) { edgelx = 1.0f; edgely = 1.0f; edgerx = (float) (nx-1); edgery = (float) (ny-1); } else if (ipbc==3) { edgelx = 1.0f; edgerx = (float) (nx-1); } v_mxv = _mm_set1_epi32(mxv); v_qm = _mm_set1_ps(qm); v_one = _mm_set1_ps(1.0f); v_dt = _mm_set1_ps(dt); v_ci2 = _mm_set1_ps(ci2); v_edgelx = _mm_set1_ps(edgelx); v_edgely = _mm_set1_ps(edgely); v_edgerx = _mm_set1_ps(edgerx); v_edgery = _mm_set1_ps(edgery); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nps,nn,mm,x,y,dxp,dyp,amx,amy,dx,dy, \ vx,vy,vz,ux,uy,uz,p2,gami,v_noff,v_moff,v_nn,v_mm,v_it,v_x,v_y,v_vx, \ v_vy,v_ux,v_uy,v_dxp,v_dyp,v_amx,v_amy,v_dx,v_dy,v_at,a,b,c,d,va,vb, \ vc,vd,ll,kk,scu) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm_set1_epi32(noff); v_moff = _mm_set1_epi32(moff); npp = kpic[k]; nps = 4*(npp/4); npoff = idimp*nppmx*k; /* zero out local accumulator */ /* for (j = 0; j < 4*mxv*(my+1); j++) { */ /* scu[j] = 0.0f; */ /* } */ memset((void*)scu,0,4*mxv*(my+1)*sizeof(float)); /* vector loop over particles in blocks of 4 */ for (j = 0; j < nps; j+=4) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ v_x = _mm_load_ps(&ppart[j+npoff]); v_y = _mm_load_ps(&ppart[j+nppmx+npoff]); /* nn = x; */ /* mm = y; */ v_nn = _mm_cvttps_epi32(v_x); v_mm = _mm_cvttps_epi32(v_y); /* dxp = qm*(x - (float) nn); */ v_dxp = _mm_sub_ps(v_x,_mm_cvtepi32_ps(v_nn)); v_dxp = _mm_mul_ps(v_dxp,v_qm); /* dyp = y - (float) mm; */ v_dyp = _mm_sub_ps(v_y,_mm_cvtepi32_ps(v_mm)); /* find inverse gamma */ /* ux = ppart[j+2*nppmx+npoff]; */ /* uy = ppart[j+3*nppmx+npoff]; */ /* uz = ppart[j+4*nppmx+npoff]; */ v_ux = _mm_load_ps(&ppart[j+2*nppmx+npoff]); v_uy = _mm_load_ps(&ppart[j+3*nppmx+npoff]); vc = _mm_load_ps(&ppart[j+4*nppmx+npoff]); /* p2 = ux*ux + uy*uy + uz*uz; */ v_at = _mm_mul_ps(v_ux,v_ux); v_at = _mm_add_ps(v_at,_mm_mul_ps(v_uy,v_uy)); v_at = _mm_add_ps(v_at,_mm_mul_ps(vc,vc)); /* gami = 1.0f/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_at = _mm_rsqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); */ /* full accuracy calculation */ v_at = _mm_sqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); v_at = _mm_div_ps(v_one,v_at); /* full accuracy calculation with SVML */ /* v_at = _mm_invsqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); */ /* calculate weights */ /* nm = 4*(nn - noff + mxv*(mm - moff)); */ v_nn = _mm_sub_epi32(v_nn,v_noff); v_mm = _mm_sub_epi32(v_mm,v_moff); v_it = _mm_mul_epu32(v_mxv,_mm_srli_si128(v_mm,4)); v_mm = _mm_mul_epu32(v_mm,v_mxv); v_mm = _mm_add_epi32(v_mm,_mm_slli_si128(v_it,4)); v_nn = _mm_slli_epi32(_mm_add_epi32(v_nn,v_mm),2); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ v_amx = _mm_sub_ps(v_qm,v_dxp); v_amy = _mm_sub_ps(v_one,v_dyp); /* calculate weights, for lower left/right, upper left/right */ a = _mm_mul_ps(v_amx,v_amy); b = _mm_mul_ps(v_dxp,v_amy); c = _mm_mul_ps(v_amx,v_dyp); d = _mm_mul_ps(v_dxp,v_dyp); _mm_store_si128((__m128i *)ll,v_nn); /* deposit current */ /* vx = ux*gami; */ /* vy = uy*gami; */ /* vz = uz*gami; */ v_vx = _mm_mul_ps(v_ux,v_at); v_vy = _mm_mul_ps(v_uy,v_at); va = v_vx; vb = v_vy; vc = _mm_mul_ps(vc,v_at); vd = _mm_setzero_ps(); /* transpose so va,vb,vc,vd contain the 3 velocities plus zero */ /* for each of 4 particles */ _MM_TRANSPOSE4_PS(va,vb,vc,vd); /* dx = amx*amy; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dy = dxp*amy; */ /* mm = nn + 4; */ /* scu[mm] += vx*dy; */ /* scu[mm+1] += vy*dy; */ /* scu[mm+2] += vz*dy; */ /* dx = amx*dyp; */ /* nn += 4*nxv; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dy = dxp*dyp; */ /* mm = nn + 4; */ /* scu[mm] += vx*dy; */ /* scu[mm+1] += vy*dy; */ /* scu[mm+2] += vz*dy; */ /* deposit for first particle */ mm = ll[0]; v_dx = _mm_load_ps(&scu[mm]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(va,_mm_shuffle_ps(a,a,0))); _mm_store_ps(&scu[mm],v_dx); v_dy = _mm_load_ps(&scu[mm+4]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(va,_mm_shuffle_ps(b,b,0))); _mm_store_ps(&scu[mm+4],v_dy); v_dx = _mm_load_ps(&scu[mm+4*mxv]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(va,_mm_shuffle_ps(c,c,0))); _mm_store_ps(&scu[mm+4*mxv],v_dx); v_dy = _mm_load_ps(&scu[mm+4+4*mxv]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(va,_mm_shuffle_ps(d,d,0))); _mm_store_ps(&scu[mm+4+4*mxv],v_dy); /* deposit for second particle */ mm = ll[1]; v_dx = _mm_load_ps(&scu[mm]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vb,_mm_shuffle_ps(a,a,85))); _mm_store_ps(&scu[mm],v_dx); v_dy = _mm_load_ps(&scu[mm+4]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vb,_mm_shuffle_ps(b,b,85))); _mm_store_ps(&scu[mm+4],v_dy); v_dx = _mm_load_ps(&scu[mm+4*mxv]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vb,_mm_shuffle_ps(c,c,85))); _mm_store_ps(&scu[mm+4*mxv],v_dx); v_dy = _mm_load_ps(&scu[mm+4+4*mxv]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vb,_mm_shuffle_ps(d,d,85))); _mm_store_ps(&scu[mm+4+4*mxv],v_dy); /* deposit for third particle */ mm = ll[2]; v_dx = _mm_load_ps(&scu[mm]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vc,_mm_shuffle_ps(a,a,170))); _mm_store_ps(&scu[mm],v_dx); v_dy = _mm_load_ps(&scu[mm+4]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vc,_mm_shuffle_ps(b,b,170))); _mm_store_ps(&scu[mm+4],v_dy); v_dx = _mm_load_ps(&scu[mm+4*mxv]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vc,_mm_shuffle_ps(c,c,170))); _mm_store_ps(&scu[mm+4*mxv],v_dx); v_dy = _mm_load_ps(&scu[mm+4+4*mxv]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vc,_mm_shuffle_ps(d,d,170))); _mm_store_ps(&scu[mm+4+4*mxv],v_dy); /* deposit for fourth particle */ mm = ll[3]; v_dx = _mm_load_ps(&scu[mm]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vd,_mm_shuffle_ps(a,a,255))); _mm_store_ps(&scu[mm],v_dx); v_dy = _mm_load_ps(&scu[mm+4]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vd,_mm_shuffle_ps(b,b,255))); _mm_store_ps(&scu[mm+4],v_dy); v_dx = _mm_load_ps(&scu[mm+4*mxv]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vd,_mm_shuffle_ps(c,c,255))); _mm_store_ps(&scu[mm+4*mxv],v_dx); v_dy = _mm_load_ps(&scu[mm+4+4*mxv]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vd,_mm_shuffle_ps(d,d,255))); _mm_store_ps(&scu[mm+4+4*mxv],v_dy); /* advance position half a time-step */ /* dx = x + vx*dt; */ /* dy = y + vy*dt; */ v_dx = _mm_add_ps(v_x,_mm_mul_ps(v_vx,v_dt)); v_dy = _mm_add_ps(v_y,_mm_mul_ps(v_vy,v_dt)); /* reflecting boundary conditions */ if (ipbc==2) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* ppart[j+2*nppmx+npoff] = -ux; */ /* } */ v_at = _mm_cmplt_ps(v_dx,v_edgelx); v_at = _mm_or_ps(v_at,_mm_cmpge_ps(v_dx,v_edgerx)); v_x = _mm_and_ps(v_at,v_x); v_dx = _mm_add_ps(_mm_andnot_ps(v_at,v_dx),v_x); v_dxp = _mm_and_ps(v_at,v_ux); v_ux = _mm_sub_ps(_mm_andnot_ps(v_at,v_ux),v_dxp); /* write output if test result is true for any particle */ v_mm = _mm_srli_si128((__m128i)v_at,8); v_mm = _mm_add_epi64((__m128i)v_at,v_mm); _mm_storel_epi64((__m128i *)&kk[0],v_mm); if (kk[0] != 0) _mm_store_ps(&ppart[j+2*nppmx+npoff],v_ux); /* if ((dy < edgely) || (dy >= edgery)) { */ /* dy = y; */ /* ppart[j+3*nppmx+npoff] = -uy; */ /* } */ v_at = _mm_cmplt_ps(v_dy,v_edgely); v_at = _mm_or_ps(v_at,_mm_cmpge_ps(v_dy,v_edgery)); v_y = _mm_and_ps(v_at,v_y); v_dy = _mm_add_ps(_mm_andnot_ps(v_at,v_dy),v_y); v_dyp = _mm_and_ps(v_at,v_uy); v_uy = _mm_sub_ps(_mm_andnot_ps(v_at,v_uy),v_dyp); /* write output if test result is true for any particle */ v_mm = _mm_srli_si128((__m128i)v_at,8); v_mm = _mm_add_epi64((__m128i)v_at,v_mm); _mm_storel_epi64((__m128i *)&kk[0],v_mm); if (kk[0] != 0) _mm_store_ps(&ppart[j+3*nppmx+npoff],v_uy); } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { /* if ((dx < edgelx) || (dx >= edgerx)) { */ /* dx = x; */ /* ppart[j+2*nppmx+npoff] = -ux; */ /* } */ v_at = _mm_cmplt_ps(v_dx,v_edgelx); v_at = _mm_or_ps(v_at,_mm_cmpge_ps(v_dx,v_edgerx)); v_x = _mm_and_ps(v_at,v_x); v_dx = _mm_add_ps(_mm_andnot_ps(v_at,v_dx),v_x); v_dxp = _mm_and_ps(v_at,v_ux); v_ux = _mm_sub_ps(_mm_andnot_ps(v_at,v_ux),v_dxp); /* write output if test result is true for any particle */ v_mm = _mm_srli_si128((__m128i)v_at,8); v_mm = _mm_add_epi64((__m128i)v_at,v_mm); _mm_storel_epi64((__m128i *)&kk[0],v_mm); if (kk[0] != 0) _mm_store_ps(&ppart[j+2*nppmx+npoff],v_ux); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ _mm_store_ps(&ppart[j+npoff],v_dx); _mm_store_ps(&ppart[j+nppmx+npoff],v_dy); } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; nn = x; mm = y; dxp = qm*(x - (float) nn); dyp = y - (float) mm; /* find inverse gamma */ ux = ppart[j+2*nppmx+npoff]; uy = ppart[j+3*nppmx+npoff]; uz = ppart[j+4*nppmx+npoff]; p2 = ux*ux + uy*uy + uz*uz; gami = 1.0f/sqrtf(1.0f + p2*ci2); /* calculate weights */ nn = 4*(nn - noff + mxv*(mm - moff)); amx = qm - dxp; amy = 1.0f - dyp; /* deposit current */ dx = amx*amy; dy = dxp*amy; vx = ux*gami; vy = uy*gami; vz = uz*gami; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = amx*dyp; mm = nn + 4; scu[mm] += vx*dy; scu[mm+1] += vy*dy; scu[mm+2] += vz*dy; dy = dxp*dyp; nn += 4*mxv; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; mm = nn + 4; scu[mm] += vx*dy; scu[mm+1] += vy*dy; scu[mm+2] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; /* reflecting boundary conditions */ if (ipbc==2) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[j+2*nppmx+npoff] = -ux; } if ((dy < edgely) || (dy >= edgery)) { dy = y; ppart[j+3*nppmx+npoff] = -uy; } } /* mixed reflecting/periodic boundary conditions */ else if (ipbc==3) { if ((dx < edgelx) || (dx >= edgerx)) { dx = x; ppart[j+2*nppmx+npoff] = -ux; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; } /* deposit current to interior points in global array */ nn = nxv - noff; mm = nyv - moff; nn = mx < nn ? mx : nn; mm = my < mm ? my : mm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { /* cu[4*(i+noff+nxv*(j+moff))] += scu[4*(i+mxv*j)]; */ /* cu[1+4*(i+noff+nxv*(j+moff))] += scu[1+4*(i+mxv*j)]; */ /* cu[2+4*(i+noff+nxv*(j+moff))] += scu[2+4*(i+mxv*j)]; */ v_x = _mm_loadu_ps(&cu[4*(i+noff+nxv*(j+moff))]); v_y = _mm_loadu_ps(&scu[4*(i+mxv*j)]); v_x = _mm_add_ps(v_x,v_y); _mm_storeu_ps(&cu[4*(i+noff+nxv*(j+moff))],v_x); } } /* deposit current to edge points in global array */ mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff)] += scu[4*i]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff)] += scu[1+4*i]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff)] += scu[2+4*i]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1))] += scu[4*(i+mxv*(mm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1))] += scu[1+4*(i+mxv*(mm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1))] += scu[2+4*(i+mxv*(mm-1))]; } } nn = nxv - noff; nn = mx+1 < nn ? mx+1 : nn; for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff))] += scu[4*mxv*j]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff))] += scu[1+4*mxv*j]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff))] += scu[2+4*mxv*j]; if (nn > mx) { #pragma omp atomic cu[4*(nn+noff-1+nxv*(j+moff))] += scu[4*((nn-1)+mxv*j)]; #pragma omp atomic cu[1+4*(nn+noff-1+nxv*(j+moff))] += scu[1+4*((nn-1)+mxv*j)]; #pragma omp atomic cu[2+4*(nn+noff-1+nxv*(j+moff))] += scu[2+4*((nn-1)+mxv*j)]; } } } return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void csse2grjppostf2lt(float ppart[], float cu[], int kpic[], int ncl[], int ihole[], float qm, float dt, float ci, int nppmx, int idimp, int nx, int ny, int mx, int my, int nxv, int nyv, int mx1, int mxy1, int ntmax, int *irc) { /* for 2-1/2d code, this subroutine calculates particle current density using first-order linear interpolation for relativistic particles in addition, particle positions are advanced a half time-step with periodic boundary conditions. also determines list of particles which are leaving this tile vector/OpenMP version using guard cells data deposited in tiles particles stored segmented array 47 flops/particle, 1 divide, 1 sqrt, 17 loads, 14 stores input: all except ncl, ihole, irc, output: ppart, cu, ncl, ihole, irc current density is approximated by values at the nearest grid points cu(i,n,m)=qci*(1.-dx)*(1.-dy) cu(i,n+1,m)=qci*dx*(1.-dy) cu(i,n,m+1)=qci*(1.-dx)*dy cu(i,n+1,m+1)=qci*dx*dy where n,m = leftmost grid points and dx = x-n, dy = y-m and qci = qm*pi*gami, where i = x,y,z where gami = 1./sqrt(1.+sum(pi**2)*ci*ci) ppart[m][0][n] = position x of particle n in tile m ppart[m][1][n] = position y of particle n in tile m ppart[m][2][n] = x momentum of particle n in tile m ppart[m][3][n] = y momentum of particle n in tile m ppart[m][4][n] = z momentum of particle n in tile m cu[k][j][i] = ith component of current density at grid point j,k kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = destination of particle leaving hole ihole[k][0][0] = ih, number of holes left (error, if negative) qm = charge on particle, in units of e dt = time interval between successive calculations ci = reciprocal of velocity of light nppmx = maximum number of particles in tile idimp = size of phase space = 5 nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y nxv = first dimension of current array, must be >= nx+1 nyv = second dimension of current array, must be >= ny+1 mx1 = (system length in x direction - 1)/mx + 1 mxy1 = mx1*my1, where my1 = (system length in y direction - 1)/my + 1 ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 optimized version requires SSE2, ppart and cu need to be 16 byte aligned nppmx needs to be a multiple of 4, cu needs to have 4 components local data */ #define MXV 33 #define MYV 33 int noff, moff, npoff, npp, mxv; int i, j, k, nps, ih, nh, nn, mm, kk; float ci2, dxp, dyp, amx, amy; float x, y, dx, dy, vx, vy, vz, ux, uy, uz, p2, gami; float anx, any, edgelx, edgely, edgerx, edgery; __m128i v_noff, v_moff, v_mxv; __m128i v_nn, v_mm, v_it; __m128 v_qm, v_dt, v_one, v_ci2; __m128 v_dxp, v_dyp, v_amx, v_amy, v_st, v_at; __m128 v_x, v_y, v_dx, v_dy, v_vx, v_vy, v_ux, v_uy; __m128 v_anx, v_any, v_edgelx, v_edgely, v_edgerx, v_edgery; __m128 v_zero, v_two, v_three, v_six; __m128 a, b, c, d, va, vb, vc, vd; __attribute__((aligned(16))) unsigned int ll[4], lm[8]; __attribute__((aligned(16))) unsigned long jj[1]; __attribute__((aligned(16))) float scu[4*MXV*MYV]; /* __attribute__((aligned(16))) float scu[4*(mx+1)*(my+1)]; */ mxv = mx + 1; ci2 = ci*ci; anx = (float) nx; any = (float) ny; v_mxv = _mm_set1_epi32(mxv); v_qm = _mm_set1_ps(qm); v_anx = _mm_set1_ps(anx); v_any = _mm_set1_ps(any); v_zero = _mm_setzero_ps(); v_one = _mm_set1_ps(1.0f); v_dt = _mm_set1_ps(dt); v_ci2 = _mm_set1_ps(ci2); v_two = _mm_set1_ps(2.0f); v_three = _mm_set1_ps(3.0f); v_six = _mm_set1_ps(6.0f); /* error if local array is too small */ /* if ((mx >= MXV) || (my >= MYV)) */ /* return; */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,npoff,nps,nn,mm,kk,ih,nh,x,y,dxp,dyp,amx, \ amy,dx,dy,vx,vy,vz,ux,uy,uz,edgelx,edgely,edgerx,edgery,p2,gami,v_noff, \ v_moff,v_nn,v_mm,v_it,v_x,v_y,v_vx,v_vy,v_ux,v_uy,v_dxp,v_dyp,v_amx, \ v_amy,v_dx,v_dy,v_at,v_st,v_edgelx,v_edgely,v_edgerx,v_edgery,a,b,c,d, \ va,vb,vc,vd,jj,ll,lm,scu) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); v_noff = _mm_set1_epi32(noff); v_moff = _mm_set1_epi32(moff); npp = kpic[k]; nps = 4*(npp/4); npoff = idimp*nppmx*k; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; v_edgelx = _mm_set1_ps(edgelx); v_edgely = _mm_set1_ps(edgely); v_edgerx = _mm_set1_ps(edgerx); v_edgery = _mm_set1_ps(edgery); ih = 0; nh = 0; nn += 1; mm += 1; /* zero out local accumulator */ /* for (j = 0; j < 4*mxv*(my+1); j++) { */ /* scu[j] = 0.0f; */ /* } */ memset((void*)scu,0,4*mxv*(my+1)*sizeof(float)); /* clear counters */ /* for (j = 0; j < 8; j++) { */ /* ncl[j+8*k] = 0; */ /* } */ memset((void*)&ncl[8*k],0,8*sizeof(int)); /* vector loop over particles in blocks of 4 */ for (j = 0; j < nps; j+=4) { /* find interpolation weights */ /* x = ppart[j+npoff]; */ /* y = ppart[j+nppmx+npoff]; */ v_x = _mm_load_ps(&ppart[j+npoff]); v_y = _mm_load_ps(&ppart[j+nppmx+npoff]); /* nn = x; */ /* mm = y; */ v_nn = _mm_cvttps_epi32(v_x); v_mm = _mm_cvttps_epi32(v_y); /* dxp = qm*(x - (float) nn); */ v_dxp = _mm_sub_ps(v_x,_mm_cvtepi32_ps(v_nn)); v_dxp = _mm_mul_ps(v_dxp,v_qm); /* dyp = y - (float) mm; */ v_dyp = _mm_sub_ps(v_y,_mm_cvtepi32_ps(v_mm)); /* find inverse gamma */ /* ux = ppart[j+2*nppmx+npoff]; */ /* uy = ppart[j+3*nppmx+npoff]; */ /* uz = ppart[j+4*nppmx+npoff]; */ v_ux = _mm_load_ps(&ppart[j+2*nppmx+npoff]); v_uy = _mm_load_ps(&ppart[j+3*nppmx+npoff]); vc = _mm_load_ps(&ppart[j+4*nppmx+npoff]); /* p2 = ux*ux + uy*uy + uz*uz; */ v_at = _mm_mul_ps(v_ux,v_ux); v_at = _mm_add_ps(v_at,_mm_mul_ps(v_uy,v_uy)); v_at = _mm_add_ps(v_at,_mm_mul_ps(vc,vc)); /* gami = 1.0f/sqrtf(1.0f + p2*ci2); */ /* approximate calculation */ /* v_at = _mm_rsqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); */ /* full accuracy calculation */ v_at = _mm_sqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); v_at = _mm_div_ps(v_one,v_at); /* full accuracy calculation with SVML */ /* v_at = _mm_invsqrt_ps(_mm_add_ps(v_one,_mm_mul_ps(v_at,v_ci2))); */ /* calculate weights */ /* nm = 4*(nn - noff + mxv*(mm - moff)); */ v_nn = _mm_sub_epi32(v_nn,v_noff); v_mm = _mm_sub_epi32(v_mm,v_moff); v_it = _mm_mul_epu32(v_mxv,_mm_srli_si128(v_mm,4)); v_mm = _mm_mul_epu32(v_mm,v_mxv); v_mm = _mm_add_epi32(v_mm,_mm_slli_si128(v_it,4)); v_nn = _mm_slli_epi32(_mm_add_epi32(v_nn,v_mm),2); /* amx = qm - dxp; */ /* amy = 1.0f - dyp; */ v_amx = _mm_sub_ps(v_qm,v_dxp); v_amy = _mm_sub_ps(v_one,v_dyp); /* calculate weights, for lower left/right, upper left/right */ a = _mm_mul_ps(v_amx,v_amy); b = _mm_mul_ps(v_dxp,v_amy); c = _mm_mul_ps(v_amx,v_dyp); d = _mm_mul_ps(v_dxp,v_dyp); _mm_store_si128((__m128i *)ll,v_nn); /* deposit current */ /* vx = ux*gami; */ /* vy = uy*gami; */ /* vz = uz*gami; */ v_vx = _mm_mul_ps(v_ux,v_at); v_vy = _mm_mul_ps(v_uy,v_at); va = v_vx; vb = v_vy; vc = _mm_mul_ps(vc,v_at); vd = _mm_setzero_ps(); /* transpose so va,vb,vc,vd contain the 3 velocities plus zero */ /* for each of 4 particles */ _MM_TRANSPOSE4_PS(va,vb,vc,vd); /* dx = amx*amy; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dy = dxp*amy; */ /* mm = nn + 4; */ /* scu[mm] += vx*dy; */ /* scu[mm+1] += vy*dy; */ /* scu[mm+2] += vz*dy; */ /* dx = amx*dyp; */ /* nn += 4*nxv; */ /* scu[nn] += vx*dx; */ /* scu[nn+1] += vy*dx; */ /* scu[nn+2] += vz*dx; */ /* dy = dxp*dyp; */ /* mm = nn + 4; */ /* scu[mm] += vx*dy; */ /* scu[mm+1] += vy*dy; */ /* scu[mm+2] += vz*dy; */ /* deposit for first particle */ mm = ll[0]; v_dx = _mm_load_ps(&scu[mm]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(va,_mm_shuffle_ps(a,a,0))); _mm_store_ps(&scu[mm],v_dx); v_dy = _mm_load_ps(&scu[mm+4]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(va,_mm_shuffle_ps(b,b,0))); _mm_store_ps(&scu[mm+4],v_dy); v_dx = _mm_load_ps(&scu[mm+4*mxv]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(va,_mm_shuffle_ps(c,c,0))); _mm_store_ps(&scu[mm+4*mxv],v_dx); v_dy = _mm_load_ps(&scu[mm+4+4*mxv]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(va,_mm_shuffle_ps(d,d,0))); _mm_store_ps(&scu[mm+4+4*mxv],v_dy); /* deposit for second particle */ mm = ll[1]; v_dx = _mm_load_ps(&scu[mm]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vb,_mm_shuffle_ps(a,a,85))); _mm_store_ps(&scu[mm],v_dx); v_dy = _mm_load_ps(&scu[mm+4]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vb,_mm_shuffle_ps(b,b,85))); _mm_store_ps(&scu[mm+4],v_dy); v_dx = _mm_load_ps(&scu[mm+4*mxv]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vb,_mm_shuffle_ps(c,c,85))); _mm_store_ps(&scu[mm+4*mxv],v_dx); v_dy = _mm_load_ps(&scu[mm+4+4*mxv]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vb,_mm_shuffle_ps(d,d,85))); _mm_store_ps(&scu[mm+4+4*mxv],v_dy); /* deposit for third particle */ mm = ll[2]; v_dx = _mm_load_ps(&scu[mm]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vc,_mm_shuffle_ps(a,a,170))); _mm_store_ps(&scu[mm],v_dx); v_dy = _mm_load_ps(&scu[mm+4]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vc,_mm_shuffle_ps(b,b,170))); _mm_store_ps(&scu[mm+4],v_dy); v_dx = _mm_load_ps(&scu[mm+4*mxv]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vc,_mm_shuffle_ps(c,c,170))); _mm_store_ps(&scu[mm+4*mxv],v_dx); v_dy = _mm_load_ps(&scu[mm+4+4*mxv]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vc,_mm_shuffle_ps(d,d,170))); _mm_store_ps(&scu[mm+4+4*mxv],v_dy); /* deposit for fourth particle */ mm = ll[3]; v_dx = _mm_load_ps(&scu[mm]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vd,_mm_shuffle_ps(a,a,255))); _mm_store_ps(&scu[mm],v_dx); v_dy = _mm_load_ps(&scu[mm+4]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vd,_mm_shuffle_ps(b,b,255))); _mm_store_ps(&scu[mm+4],v_dy); v_dx = _mm_load_ps(&scu[mm+4*mxv]); v_dx = _mm_add_ps(v_dx,_mm_mul_ps(vd,_mm_shuffle_ps(c,c,255))); _mm_store_ps(&scu[mm+4*mxv],v_dx); v_dy = _mm_load_ps(&scu[mm+4+4*mxv]); v_dy = _mm_add_ps(v_dy,_mm_mul_ps(vd,_mm_shuffle_ps(d,d,255))); _mm_store_ps(&scu[mm+4+4*mxv],v_dy); /* advance position half a time-step */ /* dx = x + vx*dt; */ /* dy = y + vy*dt; */ v_dx = _mm_add_ps(v_x,_mm_mul_ps(v_vx,v_dt)); v_dy = _mm_add_ps(v_y,_mm_mul_ps(v_vy,v_dt)); /* find particles going out of bounds */ mm = 0; v_st = v_zero; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ /* if (dx >= edgerx) { */ /* if (dx >= anx) */ /* dx -= anx; */ /* mm = 2; */ /* } */ v_x = _mm_cmpge_ps(v_dx,v_edgerx); v_y = _mm_cmplt_ps(v_dx,v_edgelx); v_at = _mm_or_ps(v_x,v_y); v_it = _mm_srli_si128((__m128i)v_at,8); v_it = _mm_add_epi64((__m128i)v_at,v_it); _mm_storel_epi64((__m128i *)&jj[0],v_it); /* execute if either test result is true for any particle */ if (jj[0] != 0) { v_st = _mm_and_ps(v_two,v_x); v_x = _mm_and_ps(v_x,_mm_cmpge_ps(v_dx,v_anx)); v_dx = _mm_sub_ps(v_dx,_mm_and_ps(v_anx,v_x)); /* if (dx < edgelx) { */ /* if (dx < 0.0f) { */ /* dx += anx; */ /* if (dx < anx) */ /* mm = 1; */ /* else */ /* dx = 0.0; */ /* } */ /* else { */ /* mm = 1; */ /* } */ /* } */ v_at = _mm_and_ps(v_one,v_y); v_x = _mm_and_ps(v_y,_mm_cmplt_ps(v_dx,v_zero)); v_dx = _mm_add_ps(v_dx,_mm_and_ps(v_anx,v_x)); v_y = _mm_cmplt_ps(v_dx,v_anx); v_dx = _mm_and_ps(v_dx,v_y); v_st = _mm_add_ps(v_st,_mm_and_ps(v_at,v_y)); } /* if (dy >= edgery) { */ /* if (dy >= any) */ /* dy -= any; */ /* mm += 6; */ /* } */ v_y = _mm_cmpge_ps(v_dy,v_edgery); v_x = _mm_cmplt_ps(v_dy,v_edgely); v_at = _mm_or_ps(v_x,v_y); v_it = _mm_srli_si128((__m128i)v_at,8); v_it = _mm_add_epi64((__m128i)v_at,v_it); _mm_storel_epi64((__m128i *)&jj[0],v_it); /* execute if either test result is true for any particle */ if (jj[0] != 0) { v_st = _mm_add_ps(v_st,_mm_and_ps(v_six,v_y)); v_y = _mm_and_ps(v_y,_mm_cmpge_ps(v_dy,v_any)); v_dy = _mm_sub_ps(v_dy,_mm_and_ps(v_any,v_y)); /* if (dy < edgely) { */ /* if (dy < 0.0) { */ /* dy += any; */ /* if (dy < any) */ /* mm += 3; */ /* else */ /* dy = 0.0; */ /* } */ /* else { */ /* mm += 3; */ /* } */ /* } */ v_at = _mm_and_ps(v_three,v_x); v_y = _mm_and_ps(v_x,_mm_cmplt_ps(v_dy,v_zero)); v_dy = _mm_add_ps(v_dy,_mm_and_ps(v_any,v_y)); v_x = _mm_cmplt_ps(v_dy,v_any); v_dy = _mm_and_ps(v_dy,v_x); v_st = _mm_add_ps(v_st,_mm_and_ps(v_at,v_x)); } /* set new position */ /* ppart[j+npoff] = dx; */ /* ppart[j+nppmx+npoff] = dy; */ _mm_store_ps(&ppart[j+npoff],v_dx); _mm_store_ps(&ppart[j+nppmx+npoff],v_dy); /* increment counters */ /* if (mm > 0) { */ /* ncl[mm+8*k-1] += 1; */ /* ih += 1; */ /* if (ih <= ntmax) { */ /* ihole[2*(ih+(ntmax+1)*k)] = j + 1; */ /* ihole[1+2*(ih+(ntmax+1)*k)] = mm; */ /* } */ /* else { */ /* nh = 1; */ /* } */ /* } */ _mm_store_si128((__m128i *)ll,_mm_cvttps_epi32(v_st)); /* remove zero ist values and left shift data */ kk = 0; memset((void*)lm,0,8*sizeof(int)); for (i = 0; i < 4; i++) { mm = ll[i]; if (mm > 0) { lm[2*kk] = j + i + 1; lm[1+2*kk] = mm; ncl[mm+8*k-1] += 1; kk += 1; } } if (kk > 0) { if ((ih+kk) > ntmax) { nh = 1; } else { v_it = _mm_load_si128((__m128i *)lm); _mm_storeu_si128((__m128i *)&ihole[2*(ih+1+(ntmax+1)*k)],v_it); if (kk > 2) { v_it = _mm_load_si128((__m128i *)&lm[4]); _mm_storeu_si128((__m128i *)&ihole[2*(ih+3+(ntmax+1)*k)],v_it); } } ih += kk; } } /* loop over remaining particles */ for (j = nps; j < npp; j++) { /* find interpolation weights */ x = ppart[j+npoff]; y = ppart[j+nppmx+npoff]; nn = x; mm = y; dxp = qm*(x - (float) nn); dyp = y - (float) mm; /* find inverse gamma */ ux = ppart[j+2*nppmx+npoff]; uy = ppart[j+3*nppmx+npoff]; uz = ppart[j+4*nppmx+npoff]; p2 = ux*ux + uy*uy + uz*uz; gami = 1.0f/sqrtf(1.0f + p2*ci2); /* calculate weights */ nn = 4*(nn - noff + mxv*(mm - moff)); amx = qm - dxp; amy = 1.0f - dyp; /* deposit current */ dx = amx*amy; dy = dxp*amy; vx = ux*gami; vy = uy*gami; vz = uz*gami; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; dx = amx*dyp; mm = nn + 4; scu[mm] += vx*dy; scu[mm+1] += vy*dy; scu[mm+2] += vz*dy; dy = dxp*dyp; nn += 4*mxv; scu[nn] += vx*dx; scu[nn+1] += vy*dx; scu[nn+2] += vz*dx; mm = nn + 4; scu[mm] += vx*dy; scu[mm+1] += vy*dy; scu[mm+2] += vz*dy; /* advance position half a time-step */ dx = x + vx*dt; dy = y + vy*dt; /* find particles going out of bounds */ mm = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* mm = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) dx -= anx; mm = 2; } else if (dx < edgelx) { if (dx < 0.0f) { dx += anx; if (dx < anx) mm = 1; else dx = 0.0; } else { mm = 1; } } if (dy >= edgery) { if (dy >= any) dy -= any; mm += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) mm += 3; else dy = 0.0; } else { mm += 3; } } /* set new position */ ppart[j+npoff] = dx; ppart[j+nppmx+npoff] = dy; /* increment counters */ if (mm > 0) { ncl[mm+8*k-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+(ntmax+1)*k)] = j + 1; ihole[1+2*(ih+(ntmax+1)*k)] = mm; } else { nh = 1; } } } /* deposit current to interior points in global array */ nn = nxv - noff; mm = nyv - moff; nn = mx < nn ? mx : nn; mm = my < mm ? my : mm; for (j = 1; j < mm; j++) { for (i = 1; i < nn; i++) { /* cu[4*(i+noff+nxv*(j+moff))] += scu[4*(i+mxv*j)]; */ /* cu[1+4*(i+noff+nxv*(j+moff))] += scu[1+4*(i+mxv*j)]; */ /* cu[2+4*(i+noff+nxv*(j+moff))] += scu[2+4*(i+mxv*j)]; */ v_x = _mm_loadu_ps(&cu[4*(i+noff+nxv*(j+moff))]); v_y = _mm_loadu_ps(&scu[4*(i+mxv*j)]); v_x = _mm_add_ps(v_x,v_y); _mm_storeu_ps(&cu[4*(i+noff+nxv*(j+moff))],v_x); } } /* deposit current to edge points in global array */ mm = nyv - moff; mm = my+1 < mm ? my+1 : mm; for (i = 1; i < nn; i++) { #pragma omp atomic cu[4*(i+noff+nxv*moff)] += scu[4*i]; #pragma omp atomic cu[1+4*(i+noff+nxv*moff)] += scu[1+4*i]; #pragma omp atomic cu[2+4*(i+noff+nxv*moff)] += scu[2+4*i]; if (mm > my) { #pragma omp atomic cu[4*(i+noff+nxv*(mm+moff-1))] += scu[4*(i+mxv*(mm-1))]; #pragma omp atomic cu[1+4*(i+noff+nxv*(mm+moff-1))] += scu[1+4*(i+mxv*(mm-1))]; #pragma omp atomic cu[2+4*(i+noff+nxv*(mm+moff-1))] += scu[2+4*(i+mxv*(mm-1))]; } } nn = nxv - noff; nn = mx+1 < nn ? mx+1 : nn; for (j = 0; j < mm; j++) { #pragma omp atomic cu[4*(noff+nxv*(j+moff))] += scu[4*mxv*j]; #pragma omp atomic cu[1+4*(noff+nxv*(j+moff))] += scu[1+4*mxv*j]; #pragma omp atomic cu[2+4*(noff+nxv*(j+moff))] += scu[2+4*mxv*j]; if (nn > mx) { #pragma omp atomic cu[4*(nn+noff-1+nxv*(j+moff))] += scu[4*((nn-1)+mxv*j)]; #pragma omp atomic cu[1+4*(nn+noff-1+nxv*(j+moff))] += scu[1+4*((nn-1)+mxv*j)]; #pragma omp atomic cu[2+4*(nn+noff-1+nxv*(j+moff))] += scu[2+4*((nn-1)+mxv*j)]; } } /* set error and end of file flag */ /* ihole overflow */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*(ntmax+1)*k] = ih; } return; #undef MXV #undef MYV } /*--------------------------------------------------------------------*/ void csse2pporder2lt(float ppart[], float ppbuff[], int kpic[], int ncl[], int ihole[], int idimp, int nppmx, int nx, int ny, int mx, int my, int mx1, int my1, int npbmx, int ntmax, int *irc) { /* this subroutine sorts particles by x,y grid in tiles of mx, my linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 2D linear memory algorithm has 3 steps. first, one finds particles leaving tile and stores their number in each directon, location, and destination in ncl and ihole. second, a prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. finally, we copy the incoming particles from other tiles into ppart. input: all except ppbuff, ncl, ihole, irc output: ppart, ppbuff, kpic, ncl, ihole, irc ppart[k][0][n] = position x of particle n in tile k ppart[k][1][n] = position y of particle n in tile k ppbuff[k][i][n] = i co-ordinate of particle n in tile k kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = direction destination of particle leaving hole all for tile k ihole[k][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 4 nppmx = maximum number of particles in tile nx/ny = system length in x/y direction mx/my = number of grids in sorting cell in x/y mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 requires SSE2, ppart, ppbuff need to be 16 byte aligned nppmx, npbmx need to be a multiple of 4 local data */ int mxy1, noff, moff, npoff, npp, nps, nboff, ncoff; int i, j, k, ii, kx, ky, ih, nh, ist, nn, mm, isum; int ip, in, j1, j2, kxl, kxr, kk, kl, kr; float anx, any, edgelx, edgely, edgerx, edgery, dx, dy; __m128i v_it, v_is, v_in, v_m1, v_m2; __m128 v_dx, v_dy, v_st, v_at, v_x, v_y; __m128 v_anx, v_any, v_edgelx, v_edgely, v_edgerx, v_edgery; __m128 v_zero, v_one, v_two, v_three, v_six; __attribute__((aligned(16))) unsigned int ll[8], lm[8]; __attribute__((aligned(16))) unsigned long jj[1]; int ks[8]; mxy1 = mx1*my1; anx = (float) nx; any = (float) ny; /* find and count particles leaving tiles and determine destination */ /* update ppart, ihole, ncl */ v_anx = _mm_set1_ps(anx); v_any = _mm_set1_ps(any); v_zero = _mm_setzero_ps(); v_one = _mm_set1_ps(1.0f); v_two = _mm_set1_ps(2.0f); v_three = _mm_set1_ps(3.0f); v_six = _mm_set1_ps(6.0f); /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,noff,moff,npp,nps,npoff,nn,mm,ih,nh,ist,kk,dx,dy, \ edgelx,edgely,edgerx,edgery,v_it,v_edgelx,v_edgely,v_edgerx,v_edgery,\ v_dx,v_dy,v_st,v_at,v_x,v_y,jj,ll,lm) for (k = 0; k < mxy1; k++) { noff = k/mx1; moff = my*noff; noff = mx*(k - mx1*noff); npp = kpic[k]; /* nps = 4*(npp/4); */ nps = (npp >> 2) << 2; npoff = idimp*nppmx*k; nn = nx - noff; nn = mx < nn ? mx : nn; mm = ny - moff; mm = my < mm ? my : mm; ih = 0; nh = 0; edgelx = noff; edgerx = noff + nn; edgely = moff; edgery = moff + mm; noff = (ntmax+1)*k; v_edgelx = _mm_set1_ps(edgelx); v_edgely = _mm_set1_ps(edgely); v_edgerx = _mm_set1_ps(edgerx); v_edgery = _mm_set1_ps(edgery); /* clear counters */ /* for (j = 0; j < 8; j++) { */ /* ncl[j+8*k] = 0; */ /* } */ memset((void*)&ncl[8*k],0,8*sizeof(int)); /* loop over particles in tile in groups of 4 */ for (j = 0; j < nps; j+=4) { /* dx = ppart[j+npoff]; */ /* dy = ppart[j+nppmx+npoff]; */ v_dx = _mm_load_ps(&ppart[j+npoff]); v_dy = _mm_load_ps(&ppart[j+nppmx+npoff]); /* find particles going out of bounds */ /* ist = 0; */ v_st = v_zero; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* ist = direction particle is going */ /* if (dx >= edgerx) { */ /* if (dx >= anx) */ /* ppart[j+npoff] = dx - anx; */ /* ist = 2; */ /* } */ v_x = _mm_cmpge_ps(v_dx,v_edgerx); v_y = _mm_cmplt_ps(v_dx,v_edgelx); v_at = _mm_or_ps(v_x,v_y); v_it = _mm_srli_si128((__m128i)v_at,8); v_it = _mm_add_epi64((__m128i)v_at,v_it); _mm_storel_epi64((__m128i *)&jj[0],v_it); /* execute if either test result is true for any particle */ if (jj[0] != 0) { v_st = _mm_and_ps(v_two,v_x); v_x = _mm_and_ps(v_x,_mm_cmpge_ps(v_dx,v_anx)); /* write output if test result is true for any particle */ v_it = _mm_srli_si128((__m128i)v_x,8); v_it = _mm_add_epi64((__m128i)v_x,v_it); _mm_storel_epi64((__m128i *)&jj[0],v_it); if (jj[0] != 0) { v_x = _mm_sub_ps(v_dx,_mm_and_ps(v_anx,v_x)); _mm_store_ps(&ppart[j+npoff],v_x); } /* if (dx < edgelx) { */ /* if (dx < 0.0) { */ /* dx += anx; */ /* if (dx < anx) */ /* ist += 1; */ /* else */ /* dx = 0.0; */ /* ppart[j+npoff] = dx; */ /* } */ /* else { */ /* ist += 1; */ /* } */ /* } */ v_at = _mm_and_ps(v_one,v_y); v_x = _mm_and_ps(v_y,_mm_cmplt_ps(v_dx,v_zero)); /* write output if test result is true for any particle */ v_it = _mm_srli_si128((__m128i)v_x,8); v_it = _mm_add_epi64((__m128i)v_x,v_it); _mm_storel_epi64((__m128i *)&jj[0],v_it); if (jj[0] != 0) { v_x = _mm_add_ps(v_dx,_mm_and_ps(v_anx,v_x)); v_y = _mm_cmplt_ps(v_x,v_anx); v_at = _mm_and_ps(v_at,v_y); v_x = _mm_and_ps(v_x,v_y); _mm_store_ps(&ppart[j+npoff],v_x); } v_st = _mm_add_ps(v_st,v_at); } /* if (dy >= edgery) { */ /* if (dy >= any) */ /* ppart[j+nppmx+npoff] = dy - any; */ /* ist += 6; */ /* } */ v_y = _mm_cmpge_ps(v_dy,v_edgery); v_x = _mm_cmplt_ps(v_dy,v_edgely); v_at = _mm_or_ps(v_x,v_y); v_it = _mm_srli_si128((__m128i)v_at,8); v_it = _mm_add_epi64((__m128i)v_at,v_it); _mm_storel_epi64((__m128i *)&jj[0],v_it); /* execute if either test result is true for any particle */ if (jj[0] != 0) { v_st = _mm_add_ps(v_st,_mm_and_ps(v_six,v_y)); v_y = _mm_and_ps(v_y,_mm_cmpge_ps(v_dy,v_any)); /* write output if test result is true for any particle */ v_it = _mm_srli_si128((__m128i)v_y,8); v_it = _mm_add_epi64((__m128i)v_y,v_it); _mm_storel_epi64((__m128i *)&jj[0],v_it); if (jj[0] != 0) { v_y = _mm_sub_ps(v_dy,_mm_and_ps(v_any,v_y)); _mm_store_ps(&ppart[j+nppmx+npoff],v_y); } /* if (dy < edgely) { */ /* if (dy < 0.0) { */ /* dy += any; */ /* if (dy < any) */ /* ist += 3; */ /* else */ /* dy = 0.0; */ /* ppart[j+nppmx+npoff] = dy; */ /* } */ /* else { */ /* ist += 3; */ /* } */ /* } */ v_at = _mm_and_ps(v_three,v_x); v_y = _mm_and_ps(v_x,_mm_cmplt_ps(v_dy,v_zero)); /* write output if test result is true for any particle */ v_it = _mm_srli_si128((__m128i)v_y,8); v_it = _mm_add_epi64((__m128i)v_y,v_it); _mm_storel_epi64((__m128i *)&jj[0],v_it); if (jj[0] != 0) { v_y = _mm_add_ps(v_dy,_mm_and_ps(v_any,v_y)); v_x = _mm_cmplt_ps(v_y,v_any); v_at = _mm_and_ps(v_at,v_x); v_y = _mm_and_ps(v_y,v_x); _mm_store_ps(&ppart[j+nppmx+npoff],v_y); } v_st = _mm_add_ps(v_st,v_at); } /* increment counters */ /* if (ist > 0) { */ /* ncl[ist+8*k-1] += 1; */ /* ih += 1; */ /* if (ih <= ntmax) { */ /* ihole[2*(ih+(ntmax+1)*k)] = j + 1; */ /* ihole[1+2*(ih+(ntmax+1)*k)] = ist; */ /* } */ /* else { */ /* nh = 1; */ /* } */ /* } */ _mm_store_si128((__m128i *)ll,_mm_cvttps_epi32(v_st)); /* remove zero ist values and left shift data */ kk = 0; memset((void*)lm,0,8*sizeof(int)); for (i = 0; i < 4; i++) { ist = ll[i]; if (ist > 0) { lm[2*kk] = j + i + 1; lm[1+2*kk] = ist; ncl[ist+8*k-1] += 1; kk += 1; } } if (kk > 0) { if ((ih+kk) > ntmax) { nh = 1; } else { v_it = _mm_load_si128((__m128i *)lm); _mm_storeu_si128((__m128i *)&ihole[2*(ih+1+noff)],v_it); if (kk > 2) { v_it = _mm_load_si128((__m128i *)&lm[4]); _mm_storeu_si128((__m128i *)&ihole[2*(ih+3+noff)],v_it); } } ih += kk; } } /* loop over remaining particles in tile */ for (j = nps; j < npp; j++) { dx = ppart[j+npoff]; dy = ppart[j+nppmx+npoff]; /* find particles going out of bounds */ ist = 0; /* count how many particles are going in each direction in ncl */ /* save their address and destination in ihole */ /* use periodic boundary conditions and check for roundoff error */ /* ist = direction particle is going */ if (dx >= edgerx) { if (dx >= anx) ppart[j+npoff] = dx - anx; ist = 2; } else if (dx < edgelx) { if (dx < 0.0) { dx += anx; if (dx < anx) ist = 1; else dx = 0.0; ppart[j+npoff] = dx; } else { ist = 1; } } if (dy >= edgery) { if (dy >= any) ppart[j+nppmx+npoff] = dy - any; ist += 6; } else if (dy < edgely) { if (dy < 0.0) { dy += any; if (dy < any) ist += 3; else dy = 0.0; ppart[j+nppmx+npoff] = dy; } else { ist += 3; } } if (ist > 0) { ncl[ist+8*k-1] += 1; ih += 1; if (ih <= ntmax) { ihole[2*(ih+noff)] = j + 1; ihole[1+2*(ih+noff)] = ist; } else { nh = 1; } } } /* set error and end of file flag */ if (nh > 0) { *irc = ih; ih = -ih; } ihole[2*noff] = ih; } /* ihole overflow */ if (*irc > 0) return; /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ v_m1 = _mm_set_epi32(0,-1,0,-1); v_m2 = _mm_set_epi32(0,-1,-1,0); #pragma omp parallel for \ private(i,j,k,noff,npoff,nboff,isum,ist,nh,nps,ip,j1,ii,kk, \ v_it,v_is,v_in,lm) for (k = 0; k < mxy1; k++) { npoff = idimp*nppmx*k; nboff = idimp*npbmx*k; noff = (ntmax+1)*k; /* find address offset for ordered ppbuff array */ isum = 0; /* for (j = 0; j < 8; j++) { */ /* ist = ncl[j+8*k]; */ /* ncl[j+8*k] = isum; */ /* isum += ist; */ /* } */ /* perform exclusive prefix scan */ v_is = _mm_setzero_si128(); for (i = 0; i < 8; i+=4) { v_it = _mm_load_si128((__m128i *)&ncl[i+8*k]); /* save last entry */ v_in = _mm_srli_si128(v_it,12); /* shift and add last entry from previous read */ v_it = _mm_add_epi32(v_is,_mm_slli_si128(v_it,4)); /* first pass */ v_is = _mm_slli_si128(_mm_and_si128(v_it,v_m1),4); v_it = _mm_add_epi32(v_is,v_it); /* second pass */ v_is = _mm_shuffle_epi32(v_it,212); v_is = _mm_slli_si128(_mm_and_si128(v_is,v_m2),4); v_it = _mm_add_epi32(v_is,v_it); /* add last sum to next entry */ v_is = _mm_add_epi32(v_in,_mm_srli_si128(v_it,12)); _mm_store_si128((__m128i *)&ncl[i+8*k],v_it); } nh = ihole[2*noff]; /* nps = 4*(nh/4); */ nps = (nh >> 2) << 2; ip = 0; /* loop over particles leaving tile in groups of 4 */ for (j = 0; j < nps; j+=4) { /* buffer particles that are leaving tile, in direction order */ /* j1 = ihole[2*(j+1+noff)] - 1; */ /* ist = ihole[1+2*(j+1+noff)]; */ v_it = _mm_loadu_si128((__m128i *)&ihole[2*(j+1+noff)]); _mm_store_si128((__m128i *)lm,v_it); v_it = _mm_loadu_si128((__m128i *)&ihole[2*(j+3+noff)]); _mm_store_si128((__m128i *)&lm[4],v_it); for (kk = 0; kk < 4; kk++) { j1 = lm[2*kk] - 1; ist = lm[1+2*kk]; ii = ncl[ist+8*k-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ncl[ist+8*k-1] = ii + 1; } } /* loop over remaining particles leaving tile */ for (j = nps; j < nh; j++) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+noff)] - 1; ist = ihole[1+2*(j+1+noff)]; ii = ncl[ist+8*k-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ncl[ist+8*k-1] = ii + 1; } /* set error */ if (ip > 0) *irc = ncl[7+8*k]; } /* ppbuff overflow */ if (*irc > 0) return; /* copy incoming particles from buffer into ppart: update ppart, kpic */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,ii,kk,npp,nps,noff,npoff,nboff,kx,ky,kl,kr,kxl,kxr,ih,nh, \ nn,mm,ncoff,ist,j1,j2,ip,in,v_it,v_is,v_in,v_x,ks,ll,lm) for (k = 0; k < mxy1; k++) { npp = kpic[k]; npoff = idimp*nppmx*k; noff = (ntmax+1)*k; ky = k/mx1; /* loop over tiles in y, assume periodic boundary conditions */ kk = ky*mx1; /* find tile above */ kl = ky - 1; if (kl < 0) kl += my1; kl = kl*mx1; /* find tile below */ kr = ky + 1; if (kr >= my1) kr -= my1; kr = kr*mx1; /* loop over tiles in x, assume periodic boundary conditions */ kx = k - ky*mx1; kxl = kx - 1; if (kxl < 0) kxl += mx1; kxr = kx + 1; if (kxr >= mx1) kxr -= mx1; /* find tile number for different directions */ ks[0] = kxr + kk; ks[1] = kxl + kk; ks[2] = kx + kr; ks[3] = kxr + kr; ks[4] = kxl + kr; ks[5] = kx + kl; ks[6] = kxr + kl; ks[7] = kxl + kl; /* loop over directions */ nh = ihole[2*noff]; ncoff = 0; ih = 0; ist = 0; j1 = 0; v_in = _mm_set1_epi32(1); for (ii = 0; ii < 8; ii++) { nboff = idimp*npbmx*ks[ii]; if (ii > 0) ncoff = ncl[ii-1+8*ks[ii]]; /* ip = number of particles coming from direction ii */ ip = ncl[ii+8*ks[ii]] - ncoff; /* nps = 4*(ip/4); */ nps = (ip >> 2) << 2; /* loop over particles in this direction in groups of 4 */ for (j = 0; j < nps; j+=4) { /* insert incoming particles into holes */ /* ih += 1; */ /* if (ih <= nh) { */ /* j1 = ihole[2*(ih+noff)] - 1; */ /* } */ if (ih < nh) { v_it = _mm_loadu_si128((__m128i *)&ihole[2*(ih+1+noff)]); _mm_store_si128((__m128i *)lm,_mm_sub_epi32(v_it,v_in)); } if ((ih+2) < nh) { v_is = _mm_loadu_si128((__m128i *)&ihole[2*(ih+3+noff)]); _mm_store_si128((__m128i *)&lm[4],_mm_sub_epi32(v_is,v_in)); } /* place overflow at end of array */ /* else { */ /* j1 = npp; */ /* npp += 1; */ /* } */ ih += 4; nn = ih - nh; if (nn >= 4) { for (kk = 0; kk < 4; kk++) { lm[2*kk] = npp + kk; } npp += 4; } else if (nn > 0) { nn = nn < 4 ? nn : 4; for (kk = 4-nn; kk < 4; kk++) { lm[2*kk] = npp; npp += 1; } } for (i = 0; i < idimp; i++) { /* if (j1 < nppmx) */ /* ppart[j1+nppmx*i+npoff] */ /* = ppbuff[j+ncoff+npbmx*i+nboff]; */ v_x = _mm_loadu_ps(&ppbuff[j+ncoff+npbmx*i+nboff]); for (kk = 0; kk < 4; kk++) { j1 = lm[2*kk]; if (j1 < nppmx) { _mm_store_ss(&ppart[j1+nppmx*i+npoff],v_x); v_x = (__m128)_mm_srli_si128((__m128i)v_x,4); } else { ist = 1; } } } } /* loop over remaining particles in this direction */ for (j = nps; j < ip; j++) { ih += 1; /* insert incoming particles into holes */ if (ih <= nh) { j1 = ihole[2*(ih+noff)] - 1; } /* place overflow at end of array */ else { j1 = npp; npp += 1; } if (j1 < nppmx) { for (i = 0; i < idimp; i++) { ppart[j1+nppmx*i+npoff] = ppbuff[j+ncoff+npbmx*i+nboff]; } } else { ist = 1; } } } /* set error */ if (ist > 0) *irc = j1+1; /* fill up remaining holes in particle array with particles from bottom */ if (ih < nh) { ip = nh - ih; ii = nh; ih += 1; /* move particles from end into remaining holes */ /* holes are processed in increasing order */ /* nps = 4*(ip/4); */ nps = (ip >> 2) << 2; /* loop over particles in groups of 4 */ for (j = 0; j < nps; j+=4) { /* nn = ihole[2*(ii+noff)] - 1; */ v_it = _mm_loadu_si128((__m128i *)&ihole[2*(ii-3+noff)]); _mm_store_si128((__m128i *)ll,_mm_sub_epi32(v_it,v_in)); v_is = _mm_loadu_si128((__m128i *)&ihole[2*(ii-1+noff)]); _mm_store_si128((__m128i *)&ll[4],_mm_sub_epi32(v_is,v_in)); /* j2 = ihole[2*(ih+(ntmax+1)*k)] - 1; */ v_it = _mm_loadu_si128((__m128i *)&ihole[2*(ih+noff)]); _mm_store_si128((__m128i *)lm,_mm_sub_epi32(v_it,v_in)); v_is = _mm_loadu_si128((__m128i *)&ihole[2*(ih+2+noff)]); _mm_store_si128((__m128i *)&lm[4],_mm_sub_epi32(v_is,v_in)); /* holes with locations great than npp-ip do not need to be filled */ in = 0; mm = 0; nn = ll[6]; j2 = lm[0]; for (kk = 0; kk < 4; kk++) { j1 = npp - (j + kk) - 1; ll[2*kk+1] = nn; lm[2*kk+1] = j2; if (j1==nn) { in += 1; if (in < 4) nn = ll[6-2*in]; } else { mm += 1; if (mm < 4) j2 = lm[2*mm]; } } ii -= in; ih += mm; /* fill holes */ for (i = 0; i < idimp; i++) { ist = npp - j - 1; v_x = _mm_loadu_ps(&ppart[ist-3+nppmx*i+npoff]); v_x = _mm_shuffle_ps(v_x,v_x,27); /* j1 = npp - j - 1; */ /* if (j1==nn) { */ /* ii -= 1; */ /* nn = ihole[2*(ii+noff)] - 1; */ /* } */ for (kk = 0; kk < 4; kk++) { j1 = ist - kk; nn = ll[2*kk+1]; if (j1 != nn) { /* ppart[j2+nppmx*i+npoff] */ /* = ppart[j1+nppmx*i+npoff]; */ _mm_store_ss(&ppart[lm[2*kk+1]+nppmx*i+npoff],v_x); } v_x = (__m128)_mm_srli_si128((__m128i)v_x,4); } } } /* loop over remaining particles */ if (nps < ip) { nn = ihole[2*(ii+noff)] - 1; j2 = ihole[2*(ih+noff)] - 1; } for (j = nps; j < ip; j++) { j1 = npp - j - 1; if (j1==nn) { ii -= 1; nn = ihole[2*(ii+noff)] - 1; } else { for (i = 0; i < idimp; i++) { ppart[j2+nppmx*i+npoff] = ppart[j1+nppmx*i+npoff]; } ih += 1; j2 = ihole[2*(ih+(ntmax+1)*k)] - 1; } } npp -= ip; } kpic[k] = npp; } return; } /*--------------------------------------------------------------------*/ void csse2pporderf2lt(float ppart[], float ppbuff[], int kpic[], int ncl[], int ihole[], int idimp, int nppmx, int mx1, int my1, int npbmx, int ntmax, int *irc) { /* this subroutine sorts particles by x,y grid in tiles of mx, my linear interpolation, with periodic boundary conditions tiles are assumed to be arranged in 2D linear memory the algorithm has 2 steps. first, a prefix scan of ncl is performed and departing particles are buffered in ppbuff in direction order. then we copy the incoming particles from other tiles into ppart. it assumes that the number, location, and destination of particles leaving a tile have been previously stored in ncl and ihole by the cgppushf2lt procedure. input: all except ppbuff, irc output: ppart, ppbuff, kpic, ncl, irc ppart[k][0][n] = position x of particle n in tile k ppart[k][1][n] = position y of particle n in tile k ppbuff[k][i][n] = i co-ordinate of particle n in tile k kpic[k] = number of particles in tile k ncl[k][i] = number of particles going to destination i, tile k ihole[k][:][0] = location of hole in array left by departing particle ihole[k][:][1] = direction destination of particle leaving hole all for tile k ihole[k][0][0] = ih, number of holes left (error, if negative) idimp = size of phase space = 4 nppmx = maximum number of particles in tile mx1 = (system length in x direction - 1)/mx + 1 my1 = (system length in y direction - 1)/my + 1 npbmx = size of buffer array ppbuff ntmax = size of hole array for particles leaving tiles irc = maximum overflow, returned only if error occurs, when irc > 0 local data */ int mxy1, noff, npoff, npp, nps, nboff, ncoff; int i, j, k, ii, kx, ky, ih, nh, ist, nn, mm, isum; int ip, in, j1, j2, kxl, kxr, kk, kl, kr; __m128i v_it, v_is, v_in, v_m1, v_m2; __m128 v_x; __attribute__((aligned(16))) unsigned int ll[8], lm[8]; int ks[8]; mxy1 = mx1*my1; /* buffer particles that are leaving tile: update ppbuff, ncl */ /* loop over tiles */ v_m1 = _mm_set_epi32(0,-1,0,-1); v_m2 = _mm_set_epi32(0,-1,-1,0); #pragma omp parallel for \ private(i,j,k,noff,npoff,nboff,isum,ist,nh,nps,ip,j1,ii,kk, \ v_it,v_is,v_in,lm) for (k = 0; k < mxy1; k++) { npoff = idimp*nppmx*k; nboff = idimp*npbmx*k; noff = (ntmax+1)*k; /* find address offset for ordered ppbuff array */ isum = 0; /* for (j = 0; j < 8; j++) { */ /* ist = ncl[j+8*k]; */ /* ncl[j+8*k] = isum; */ /* isum += ist; */ /* } */ /* perform exclusive prefix scan */ v_is = _mm_setzero_si128(); for (i = 0; i < 8; i+=4) { v_it = _mm_load_si128((__m128i *)&ncl[i+8*k]); /* save last entry */ v_in = _mm_srli_si128(v_it,12); /* shift and add last entry from previous read */ v_it = _mm_add_epi32(v_is,_mm_slli_si128(v_it,4)); /* first pass */ v_is = _mm_slli_si128(_mm_and_si128(v_it,v_m1),4); v_it = _mm_add_epi32(v_is,v_it); /* second pass */ v_is = _mm_shuffle_epi32(v_it,212); v_is = _mm_slli_si128(_mm_and_si128(v_is,v_m2),4); v_it = _mm_add_epi32(v_is,v_it); /* add last sum to next entry */ v_is = _mm_add_epi32(v_in,_mm_srli_si128(v_it,12)); _mm_store_si128((__m128i *)&ncl[i+8*k],v_it); } nh = ihole[2*noff]; /* nps = 4*(nh/4); */ nps = (nh >> 2) << 2; ip = 0; /* loop over particles leaving tile in groups of 4 */ for (j = 0; j < nps; j+=4) { /* buffer particles that are leaving tile, in direction order */ /* j1 = ihole[2*(j+1+noff)] - 1; */ /* ist = ihole[1+2*(j+1+noff)]; */ v_it = _mm_loadu_si128((__m128i *)&ihole[2*(j+1+noff)]); _mm_store_si128((__m128i *)lm,v_it); v_it = _mm_loadu_si128((__m128i *)&ihole[2*(j+3+noff)]); _mm_store_si128((__m128i *)&lm[4],v_it); for (kk = 0; kk < 4; kk++) { j1 = lm[2*kk] - 1; ist = lm[1+2*kk]; ii = ncl[ist+8*k-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ncl[ist+8*k-1] = ii + 1; } } /* loop over remaining particles leaving tile */ for (j = nps; j < nh; j++) { /* buffer particles that are leaving tile, in direction order */ j1 = ihole[2*(j+1+noff)] - 1; ist = ihole[1+2*(j+1+noff)]; ii = ncl[ist+8*k-1]; if (ii < npbmx) { for (i = 0; i < idimp; i++) { ppbuff[ii+npbmx*i+nboff] = ppart[j1+nppmx*i+npoff]; } } else { ip = 1; } ncl[ist+8*k-1] = ii + 1; } /* set error */ if (ip > 0) *irc = ncl[7+8*k]; } /* ppbuff overflow */ if (*irc > 0) return; /* copy incoming particles from buffer into ppart: update ppart, kpic */ /* loop over tiles */ #pragma omp parallel for \ private(i,j,k,ii,kk,npp,nps,noff,npoff,nboff,kx,ky,kl,kr,kxl,kxr,ih,nh, \ nn,mm,ncoff,ist,j1,j2,ip,in,v_it,v_is,v_in,v_x,ks,ll,lm) for (k = 0; k < mxy1; k++) { npp = kpic[k]; npoff = idimp*nppmx*k; noff = (ntmax+1)*k; ky = k/mx1; /* loop over tiles in y, assume periodic boundary conditions */ kk = ky*mx1; /* find tile above */ kl = ky - 1; if (kl < 0) kl += my1; kl = kl*mx1; /* find tile below */ kr = ky + 1; if (kr >= my1) kr -= my1; kr = kr*mx1; /* loop over tiles in x, assume periodic boundary conditions */ kx = k - ky*mx1; kxl = kx - 1; if (kxl < 0) kxl += mx1; kxr = kx + 1; if (kxr >= mx1) kxr -= mx1; /* find tile number for different directions */ ks[0] = kxr + kk; ks[1] = kxl + kk; ks[2] = kx + kr; ks[3] = kxr + kr; ks[4] = kxl + kr; ks[5] = kx + kl; ks[6] = kxr + kl; ks[7] = kxl + kl; /* loop over directions */ nh = ihole[2*noff]; ncoff = 0; ih = 0; ist = 0; j1 = 0; v_in = _mm_set1_epi32(1); for (ii = 0; ii < 8; ii++) { nboff = idimp*npbmx*ks[ii]; if (ii > 0) ncoff = ncl[ii-1+8*ks[ii]]; /* ip = number of particles coming from direction ii */ ip = ncl[ii+8*ks[ii]] - ncoff; /* nps = 4*(ip/4); */ nps = (ip >> 2) << 2; /* loop over particles in this direction in groups of 4 */ for (j = 0; j < nps; j+=4) { /* insert incoming particles into holes */ /* ih += 1; */ /* if (ih <= nh) { */ /* j1 = ihole[2*(ih+noff)] - 1; */ /* } */ if (ih < nh) { v_it = _mm_loadu_si128((__m128i *)&ihole[2*(ih+1+noff)]); _mm_store_si128((__m128i *)lm,_mm_sub_epi32(v_it,v_in)); } if ((ih+2) < nh) { v_is = _mm_loadu_si128((__m128i *)&ihole[2*(ih+3+noff)]); _mm_store_si128((__m128i *)&lm[4],_mm_sub_epi32(v_is,v_in)); } /* place overflow at end of array */ /* else { */ /* j1 = npp; */ /* npp += 1; */ /* } */ ih += 4; nn = ih - nh; if (nn >= 4) { for (kk = 0; kk < 4; kk++) { lm[2*kk] = npp + kk; } npp += 4; } else if (nn > 0) { nn = nn < 4 ? nn : 4; for (kk = 4-nn; kk < 4; kk++) { lm[2*kk] = npp; npp += 1; } } for (i = 0; i < idimp; i++) { /* if (j1 < nppmx) */ /* ppart[j1+nppmx*i+npoff] */ /* = ppbuff[j+ncoff+npbmx*i+nboff]; */ v_x = _mm_loadu_ps(&ppbuff[j+ncoff+npbmx*i+nboff]); for (kk = 0; kk < 4; kk++) { j1 = lm[2*kk]; if (j1 < nppmx) { _mm_store_ss(&ppart[j1+nppmx*i+npoff],v_x); v_x = (__m128)_mm_srli_si128((__m128i)v_x,4); } else { ist = 1; } } } } /* loop over remaining particles in this direction */ for (j = nps; j < ip; j++) { ih += 1; /* insert incoming particles into holes */ if (ih <= nh) { j1 = ihole[2*(ih+noff)] - 1; } /* place overflow at end of array */ else { j1 = npp; npp += 1; } if (j1 < nppmx) { for (i = 0; i < idimp; i++) { ppart[j1+nppmx*i+npoff] = ppbuff[j+ncoff+npbmx*i+nboff]; } } else { ist = 1; } } } /* set error */ if (ist > 0) *irc = j1+1; /* fill up remaining holes in particle array with particles from bottom */ if (ih < nh) { ip = nh - ih; ii = nh; ih += 1; /* move particles from end into remaining holes */ /* holes are processed in increasing order */ /* nps = 4*(ip/4); */ nps = (ip >> 2) << 2; /* loop over particles in groups of 4 */ for (j = 0; j < nps; j+=4) { /* nn = ihole[2*(ii+noff)] - 1; */ v_it = _mm_loadu_si128((__m128i *)&ihole[2*(ii-3+noff)]); _mm_store_si128((__m128i *)ll,_mm_sub_epi32(v_it,v_in)); v_is = _mm_loadu_si128((__m128i *)&ihole[2*(ii-1+noff)]); _mm_store_si128((__m128i *)&ll[4],_mm_sub_epi32(v_is,v_in)); /* j2 = ihole[2*(ih+(ntmax+1)*k)] - 1; */ v_it = _mm_loadu_si128((__m128i *)&ihole[2*(ih+noff)]); _mm_store_si128((__m128i *)lm,_mm_sub_epi32(v_it,v_in)); v_is = _mm_loadu_si128((__m128i *)&ihole[2*(ih+2+noff)]); _mm_store_si128((__m128i *)&lm[4],_mm_sub_epi32(v_is,v_in)); /* holes with locations great than npp-ip do not need to be filled */ in = 0; mm = 0; nn = ll[6]; j2 = lm[0]; for (kk = 0; kk < 4; kk++) { j1 = npp - (j + kk) - 1; ll[2*kk+1] = nn; lm[2*kk+1] = j2; if (j1==nn) { in += 1; if (in < 4) nn = ll[6-2*in]; } else { mm += 1; if (mm < 4) j2 = lm[2*mm]; } } ii -= in; ih += mm; /* fill holes */ for (i = 0; i < idimp; i++) { ist = npp - j - 1; v_x = _mm_loadu_ps(&ppart[ist-3+nppmx*i+npoff]); v_x = _mm_shuffle_ps(v_x,v_x,27); /* j1 = npp - j - 1; */ /* if (j1==nn) { */ /* ii -= 1; */ /* nn = ihole[2*(ii+noff)] - 1; */ /* } */ for (kk = 0; kk < 4; kk++) { j1 = ist - kk; nn = ll[2*kk+1]; if (j1 != nn) { /* ppart[j2+nppmx*i+npoff] */ /* = ppart[j1+nppmx*i+npoff]; */ _mm_store_ss(&ppart[lm[2*kk+1]+nppmx*i+npoff],v_x); } v_x = (__m128)_mm_srli_si128((__m128i)v_x,4); } } } /* loop over remaining particles */ if (nps < ip) { nn = ihole[2*(ii+noff)] - 1; j2 = ihole[2*(ih+noff)] - 1; } for (j = nps; j < ip; j++) { j1 = npp - j - 1; if (j1==nn) { ii -= 1; nn = ihole[2*(ii+noff)] - 1; } else { for (i = 0; i < idimp; i++) { ppart[j2+nppmx*i+npoff] = ppart[j1+nppmx*i+npoff]; } ih += 1; j2 = ihole[2*(ih+(ntmax+1)*k)] - 1; } } npp -= ip; } kpic[k] = npp; } return; } /*--------------------------------------------------------------------*/ void csse2bguard2l(float bxy[], int nx, int ny, int nxe, int nye) { /* replicate extended periodic vector field bxy linear interpolation nx/ny = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nxe = second dimension of field arrays, must be >= ny+1 requires SSE2, bxy needs to be 16 byte aligned local data */ int j, k, kk; /* copy edges of extended field */ for (k = 0; k < ny; k++) { kk = 4*nxe*k; bxy[4*nx+kk] = bxy[kk]; bxy[1+4*nx+kk] = bxy[1+kk]; bxy[2+4*nx+kk] = bxy[2+kk]; } kk = 4*nxe*ny; for (j = 0; j < nx; j++) { /* bxy[4*j+kk] = bxy[4*j]; */ /* bxy[1+4*j+kk] = bxy[1+4*j]; */ /* bxy[2+4*j+kk] = bxy[2+4*j]; */ _mm_store_ps(&bxy[4*j+kk],_mm_load_ps(&bxy[4*j])); } bxy[4*nx+kk] = bxy[0]; bxy[1+4*nx+kk] = bxy[1]; bxy[2+4*nx+kk] = bxy[2]; return; } /*--------------------------------------------------------------------*/ void csse2acguard2l(float cu[], int nx, int ny, int nxe, int nye) { /* accumulate extended periodic vector field cu linear interpolation nx/ny = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nxe = second dimension of field arrays, must be >= ny+1 requires SSE2, bxy needs to be 16 byte aligned local data */ int j, k, kk; __m128 v_cu; /* accumulate edges of extended field */ for (k = 0; k < ny; k++) { kk = 4*nxe*k; cu[kk] += cu[4*nx+kk]; cu[1+kk] += cu[1+4*nx+kk]; cu[2+kk] += cu[2+4*nx+kk]; cu[4*nx+kk] = 0.0; cu[1+4*nx+kk] = 0.0; cu[2+4*nx+kk] = 0.0; } kk = 4*nxe*ny; for (j = 0; j < nx; j++) { /* cu[4*j] += cu[4*j+kk]; */ /* cu[1+4*j] += cu[1+4*j+kk]; */ /* cu[2+4*j] += cu[2+4*j+kk]; */ v_cu = _mm_add_ps(_mm_load_ps(&cu[4*j]),_mm_load_ps(&cu[4*j+kk])); _mm_store_ps(&cu[4*j],v_cu); /* cu[4*j+kk] = 0.0; */ /* cu[1+4*j+kk] = 0.0; */ /* cu[2+4*j+kk] = 0.0; */ _mm_store_ps(&cu[4*j+kk],_mm_setzero_ps()); } cu[0] += cu[4*nx+kk]; cu[1] += cu[1+4*nx+kk]; cu[2] += cu[2+4*nx+kk]; cu[4*nx+kk] = 0.0; cu[1+4*nx+kk] = 0.0; cu[2+4*nx+kk] = 0.0; return; } /*--------------------------------------------------------------------*/ void csse2aguard2l(float q[], int nx, int ny, int nxe, int nye) { /* accumulate extended periodic scalar field q linear interpolation nx/ny = system length in x/y direction nxe = first dimension of field arrays, must be >= nx+1 nye = second dimension of field arrays, must be >= ny+1 requires SSE2, q needs to be 16 byte aligned nxe*ny needs to be a multiple of 4 local data */ int j, k, nxs; __m128 v_q; nxs = 4*(nx/4); /* accumulate edges of extended field */ for (k = 0; k < ny; k++) { q[nxe*k] += q[nx+nxe*k]; q[nx+nxe*k] = 0.0; } /* vector loop over elements in blocks of 4 */ for (j = 0; j < nxs; j+=4) { v_q = _mm_add_ps(_mm_load_ps(&q[j]),_mm_load_ps(&q[j+nxe*ny])); _mm_store_ps(&q[j],v_q); _mm_store_ps(&q[j+nxe*ny],_mm_setzero_ps()); } /* loop over remaining elements */ for (j = nxs; j < nx; j++) { q[j] += q[j+nxe*ny]; q[j+nxe*ny] = 0.0; } q[0] += q[nx+nxe*ny]; q[nx+nxe*ny] = 0.0; return; } /*--------------------------------------------------------------------*/ void csse2mpois23(float complex q[], float complex fxy[], int isign, float complex ffc[], float ax, float ay, float affp, float *we, int nx, int ny, int nxvh, int nyv, int nxhd, int nyhd) { /* this subroutine solves 2-1/2d poisson's equation in fourier space for force/charge (or convolution of electric field over particle shape) with periodic boundary conditions. Zeros out z component. for isign = 0, input: isign,ax,ay,affp,nx,ny,nxvh,nyhd, output: ffc for isign /= 0, input: q,ffc,isign,nx,ny,nxvh,nyhd, output: fxy,we approximate flop count is: 26*nxc*nyc + 12*(nxc + nyc) where nxc = nx/2 - 1, nyc = ny/2 - 1 equation used is: fx[ky][kx] = -sqrt(-1)*kx*g[ky][kx]*s[ky][kx]*q[ky][kx], fy[ky][kx] = -sqrt(-1)*ky*g[ky][kx]*s[ky][kx]*q[ky][kx], fz[ky][kx] = zero, where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers, g[ky][kx] = (affp/(kx**2+ky**2))*s[ky][kx], s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for fx(kx=pi) = fy(kx=pi) = fx(ky=pi) = fy(ky=pi) = 0, and fx(kx=0,ky=0) = fy(kx=0,ky=0) = 0. q[k][j] = complex charge density for fourier mode (j,k) fxy[k][j][0] = x component of complex force/charge, fxy[k][j][1] = y component of complex force/charge, fxy[k][j][2] = zero, all for fourier mode (j,k) if isign = 0, form factor array is prepared if isign is not equal to 0, force/charge is calculated cimag(ffc[k][j]) = finite-size particle shape factor s for fourier mode (j,k) creal(ffc[k][j]) = potential green's function g for fourier mode (j,k) ax/ay = half-width of particle in x/y direction affp = normalization constant = nx*ny/np, where np=number of particles electric field energy is also calculated, using we = nx*ny*sum((affp/(kx**2+ky**2))*|q[ky][kx]*s[ky][kx]|**2) nx/ny = system length in x/y direction nxvh = first dimension of field arrays, must be >= nxh nyv = second dimension of field arrays, must be >= ny nxhd = first dimension of form factor array, must be >= nxh nyhd = second dimension of form factor array, must be >= nyh requires SSE2, q, fxy, ffc need to be 16 byte aligned nxhd, nxvh need to be a multiple of 2 fxy needs to have 4 components local data */ int nxh, nyh, nxhs, j, k, k1, kk, kj, it; float dnx, dny, dkx, dky, at1, at2, at3, at4; float complex zero, zt1, zt2; double wp, sum1; __m128i v_j, v_it; __m128 v_dnx, v_dny, v_dky, v_at1, v_at2, v_at3, v_at4; __m128 v_zero, v_m, v_zt1, v_zt2, v_zt3, v_zt4; __m128d v_wp, v_d; __attribute__((aligned(16))) double dd[2]; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; nxhs = 2*(nxh/2); dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; zero = 0.0 + 0.0*_Complex_I; v_j = _mm_set_epi32(1,1,0,0); v_dnx = _mm_set1_ps(dnx); v_dny = _mm_set1_ps(dny); v_zero = _mm_set1_ps(0.0f); v_m = _mm_set_ps(1.0f,-1.0f,1.0f,-1.0f); if (isign != 0) goto L30; /* prepare form factor array */ for (k = 0; k < nyh; k++) { dky = dny*(float) k; kk = nxhd*k; at1 = dky*dky; at2 = pow((dky*ay),2); for (j = 0; j < nxh; j++) { dkx = dnx*(float) j; at3 = dkx*dkx + at1; at4 = exp(-0.5*(pow((dkx*ax),2) + at2)); if (at3==0.0) { ffc[j+kk] = affp + 1.0*_Complex_I; } else { ffc[j+kk] = (affp*at4/at3) + at4*_Complex_I; } } } return; /* calculate force/charge and sum field energy */ L30: sum1 = 0.0; /* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */ #pragma omp parallel for \ private(j,k,k1,kk,kj,at1,at2,at3,zt1,zt2,wp,v_it,v_dky,v_at1,v_at2, \ v_at3,v_at4,v_zt1,v_zt2,v_zt3,v_zt4,v_wp,v_d,dd) \ reduction(+:sum1) for (k = 1; k < nyh; k++) { /* dky = dny*(float) k; */ v_dky = _mm_mul_ps(v_dny,_mm_cvtepi32_ps(_mm_set1_epi32(k))); kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; wp = 0.0; v_wp = _mm_set1_pd(0.0); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = crealf(ffc[j+kk])*cimagf(ffc[j+kk]); */ v_at1 = _mm_load_ps((float *)&ffc[j+kk]); v_at1 = _mm_mul_ps(v_at1,_mm_shuffle_ps(v_at1,v_at1,177)); /* at2 = at1*dnx*(float) j; */ v_it = _mm_add_epi32(_mm_set1_epi32(j),v_j); v_at2 = _mm_mul_ps(v_dnx,_mm_cvtepi32_ps(v_it)); v_at2 = _mm_mul_ps(v_at1,v_at2); /* at3 = dky*at1; */ v_at3 = _mm_mul_ps(v_dky,v_at1); /* zt1 = cimagf(q[j+kj]) - crealf(q[j+kj])*_Complex_I; */ v_zt1 = _mm_load_ps((float *)&q[j+kj]); v_zt1 = _mm_mul_ps(v_zt1,v_m); v_zt1 = _mm_shuffle_ps(v_zt1,v_zt1,177); /* zt2 = cimagf(q[j+k1]) - crealf(q[j+k1])*_Complex_I; */ v_zt2 = _mm_load_ps((float *)&q[j+k1]); v_zt2 = _mm_mul_ps(v_zt2,v_m); v_zt2 = _mm_shuffle_ps(v_zt2,v_zt2,177); /* zero out kx = 0 mode */ if (j==0) { v_at4 = _mm_castsi128_ps(_mm_set_epi32(-1,-1,0,0)); v_zt1 = _mm_and_ps(v_zt1,v_at4); v_zt2 = _mm_and_ps(v_zt2,v_at4); } /* fxy[4*(j+kj)] = at2*zt1; */ /* fxy[1+4*(j+kj)] = at3*zt1; */ /* fxy[2+4*(j+kj)] = zero; */ v_at4 = _mm_mul_ps(v_at2,v_zt1); v_zt4 = _mm_mul_ps(v_at3,v_zt1); /* reorder write */ v_zt3 = _mm_shuffle_ps(v_at4,v_zt4,68); v_zt4 = _mm_shuffle_ps(v_at4,v_zt4,238); _mm_store_ps((float *)&fxy[4*(j+kj)],v_zt3); _mm_store_ps((float *)&fxy[2+4*(j+kj)],v_zero); _mm_store_ps((float *)&fxy[4*(j+1+kj)],v_zt4); _mm_store_ps((float *)&fxy[2+4*(j+1+kj)],v_zero); /* fxy[4*(j+k1)] = at2*zt2; */ /* fxy[1+4*(j+k1)] = -at3*zt2; */ /* fxy[2+4*(j+k1)] = zero; */ v_at4 = _mm_mul_ps(v_at2,v_zt2); v_zt4 = _mm_sub_ps(v_zero,_mm_mul_ps(v_at3,v_zt2)); /* reorder write */ v_zt3 = _mm_shuffle_ps(v_at4,v_zt4,68); v_zt4 = _mm_shuffle_ps(v_at4,v_zt4,238); _mm_store_ps((float *)&fxy[4*(j+k1)],v_zt3); _mm_store_ps((float *)&fxy[2+4*(j+k1)],v_zero); _mm_store_ps((float *)&fxy[4*(j+1+k1)],v_zt4); _mm_store_ps((float *)&fxy[2+4*(j+1+k1)],v_zero); /* at1 = at1*(q[j+kj]*conjf(q[j+kj]) + q[j+k1]*conjf(q[j+k1])); */ /* wp += (double) at1; */ v_at4 = _mm_mul_ps(v_zt1,v_zt1); v_at4 = _mm_add_ps(v_at4,_mm_mul_ps(v_zt2,v_zt2)); v_at4 = _mm_mul_ps(v_at1,v_at4); /* convert to double precision before accumulating */ v_d = _mm_cvtps_pd(v_at4); v_wp = _mm_add_pd(v_wp,v_d); v_it = _mm_srli_si128((__m128i)v_at4,8); v_d = _mm_cvtps_pd((__m128)v_it); v_wp = _mm_add_pd(v_wp,v_d); } /* loop over remaining elements */ it = 1 > nxhs ? 1 : nxhs; #pragma ivdep for (j = it; j < nxh; j++) { at1 = crealf(ffc[j+kk])*cimagf(ffc[j+kk]); at2 = at1*dnx*(float) j; at3 = dky*at1; zt1 = cimagf(q[j+kj]) - crealf(q[j+kj])*_Complex_I; zt2 = cimagf(q[j+k1]) - crealf(q[j+k1])*_Complex_I; fxy[4*(j+kj)] = at2*zt1; fxy[1+4*(j+kj)] = at3*zt1; fxy[2+4*(j+kj)] = zero; fxy[4*(j+k1)] = at2*zt2; fxy[1+4*(j+k1)] = -at3*zt2; fxy[2+4*(j+k1)] = zero; at1 = at1*(q[j+kj]*conjf(q[j+kj]) + q[j+k1]*conjf(q[j+k1])); wp += (double) at1; } /* sum1 += wp; */ _mm_store_pd(&dd[0],v_wp); for (j = 1; j < 2; j++) { dd[0] += dd[j]; } sum1 += (wp + dd[0]); } wp = 0.0; v_wp = _mm_set1_pd(0.0); /* mode numbers kx = 0, nx/2 */ #pragma ivdep for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = nxvh*k; k1 = nxvh*ny - kj; at1 = crealf(ffc[kk])*cimagf(ffc[kk]); at3 = at1*dny*(float) k; zt1 = cimagf(q[kj]) - crealf(q[kj])*_Complex_I; fxy[4*kj] = zero; fxy[1+4*kj] = at3*zt1; fxy[2+4*kj] = zero; fxy[4*k1] = zero; fxy[1+4*k1] = zero; fxy[2+4*k1] = zero; at1 = at1*(q[kj]*conjf(q[kj])); wp += (double) at1; } /* mode numbers ky = 0, ny/2 */ k1 = 4*nxvh*nyh; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhs; j+=2) { /* at1 = crealf(ffc[j])*cimagf(ffc[j]); */ v_at1 = _mm_load_ps((float *)&ffc[j]); v_at1 = _mm_mul_ps(v_at1,_mm_shuffle_ps(v_at1,v_at1,177)); /* at2 = at1*dnx*(float) j; */ v_it = _mm_add_epi32(_mm_set1_epi32(j),v_j); v_at2 = _mm_mul_ps(v_dnx,_mm_cvtepi32_ps(v_it)); v_at2 = _mm_mul_ps(v_at1,v_at2); /* zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I; */ v_zt1 = _mm_load_ps((float *)&q[j]); v_zt1 = _mm_mul_ps(v_zt1,v_m); v_zt1 = _mm_shuffle_ps(v_zt1,v_zt1,177); /* zero out kx = 0 mode */ if (j==0) { v_at4 = _mm_castsi128_ps(_mm_set_epi32(-1,-1,0,0)); v_zt1 = _mm_and_ps(v_zt1,v_at4); } /* fxy[4*j] = at2*zt1; */ /* fxy[1+4*j] = zero; */ /* fxy[2+4*j] = zero; */ v_at4 = _mm_mul_ps(v_at2,v_zt1); /* reorder write */ v_zt3 = _mm_shuffle_ps(v_at4,v_zero,68); v_zt4 = _mm_shuffle_ps(v_at4,v_zero,238); _mm_store_ps((float *)&fxy[4*j],v_zt3); _mm_store_ps((float *)&fxy[4*j+2],v_zero); _mm_store_ps((float *)&fxy[4*j+4],v_zt4); _mm_store_ps((float *)&fxy[4*j+6],v_zero); /* fxy[4*j+k1] = zero; */ /* fxy[1+4*j+k1] = zero; */ /* fxy[2+4*j+k1] = zero; */ _mm_store_ps((float *)&fxy[4*j+k1],v_zero); _mm_store_ps((float *)&fxy[4*j+2+k1],v_zero); _mm_store_ps((float *)&fxy[4*j+4+k1],v_zero); _mm_store_ps((float *)&fxy[4*j+6+k1],v_zero); /* at1 = at1*(q[j]*conjf(q[j])); */ /* wp += (double) at1; */ v_at4 = _mm_mul_ps(v_at1,_mm_mul_ps(v_zt1,v_zt1)); /* convert to double precision before accumulating */ v_d = _mm_cvtps_pd(v_at4); v_wp = _mm_add_pd(v_wp,v_d); v_it = _mm_srli_si128((__m128i)v_at4,8); v_d = _mm_cvtps_pd((__m128)v_it); v_wp = _mm_add_pd(v_wp,v_d); } /* loop over remaining elements */ it = 1 > nxhs ? 1 : nxhs; #pragma ivdep for (j = it; j < nxh; j++) { at1 = crealf(ffc[j])*cimagf(ffc[j]); at2 = at1*dnx*(float) j; zt1 = cimagf(q[j]) - crealf(q[j])*_Complex_I; fxy[4*j] = at2*zt1; fxy[1+4*j] = zero; fxy[2+4*j] = zero; fxy[4*j+k1] = zero; fxy[1+4*j+k1] = zero; fxy[2+4*j+k1] = zero; at1 = at1*(q[j]*conjf(q[j])); wp += (double) at1; } fxy[0] = zero; fxy[1] = zero; fxy[2] = zero; fxy[k1] = zero; fxy[1+k1] = zero; fxy[2+k1] = zero; sum1 += wp; /* *we = wp*(float) (nx*ny); */ _mm_store_pd(&dd[0],v_wp); for (j = 1; j < 2; j++) { dd[0] += dd[j]; } *we = (sum1 + dd[0])*(float) (nx*ny); return; } /*--------------------------------------------------------------------*/ void csse2mcuperp2(float complex cu[], int nx, int ny, int nxvh, int nyv) { /* this subroutine calculates the transverse current in fourier space input: all, output: cu approximate flop count is: 36*nxc*nyc and nxc*nyc divides where nxc = nx/2 - 1, nyc = ny/2 - 1 the transverse current is calculated using the equation: cux[ky][kx] = cux[ky][kx] -kx*(kx*cux[ky][kx]+ky*cuy[ky][kx])/(kx*kx+ky*ky) cuy[ky][kx] = cuy[ky][kx] -ky*(kx*cux[ky][kx]+ky*cuy[ky][kx])/(kx*kx+ky*ky) where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers, except for cux(kx=pi) = cuy(kx=pi) = 0, cux(ky=pi) = cuy(ky=pi) = 0, and cux(kx=0,ky=0) = cuy(kx=0,ky=0) = 0. cu[k][j][i] = complex current density for fourier mode (j,k) nx/ny = system length in x/y direction nxvh = first dimension of current array, must be >= nxh nyv = second dimension of current array, must be >= ny requires SSE2, cu needs to be 16 byte aligned cu needs to have 4 components local data */ int nxh, nyh, j, k, k1, kj; float dnx, dny; float complex zero; __m128 v_dnx, v_dny, v_dkx, v_dky, v_dky2, v_at1, v_zt1, v_zt2; __m128 v_zero, v_one, v_n, v_at; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; zero = 0.0 + 0.0*_Complex_I; v_dnx = _mm_set1_ps(dnx); v_dny = _mm_set1_ps(dny); v_zero = _mm_set1_ps(0.0f); v_one = _mm_set1_ps(1.0f); v_n = _mm_set_ps(-1.0f,-1.0f,1.0f,1.0f); /* calculate transverse part of current */ /* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */ #pragma omp parallel for private(j,k,k1,kj,v_dkx,v_dky,v_dky2,v_at1, \ v_zt1,v_zt2,v_at) for (k = 1; k < nyh; k++) { /* dky = dny*(float) k; */ v_dky = _mm_mul_ps(v_dny,_mm_cvtepi32_ps(_mm_set1_epi32(k))); /* dky2 = dky*dky; */ v_dky2 = _mm_mul_ps(v_dky,v_dky); kj = 4*nxvh*k; k1 = 4*nxvh*ny - kj; for (j = 1; j < nxh; j++) { /* dkx = dnx*(float) j; */ v_dkx = _mm_mul_ps(v_dnx,_mm_cvtepi32_ps(_mm_set1_epi32(j))); /* at1 = 1./(dkx*dkx + dky2); */ v_at1 = _mm_add_ps(_mm_mul_ps(v_dkx,v_dkx),v_dky2); v_at1 = _mm_div_ps(v_one,v_at1); /* zt1 = at1*(dkx*cu[4*j+kj] + dky*cu[1+4*j+kj]); */ v_dkx = _mm_movelh_ps(v_dkx,v_dky); v_zt2 = _mm_load_ps((float *)&cu[4*j+kj]); v_zt1 = _mm_mul_ps(v_at1,_mm_mul_ps(v_dkx,v_zt2)); v_at = _mm_movelh_ps(v_zt1,v_zt1); v_zt1 = _mm_add_ps(_mm_movehl_ps(v_zt1,v_zt1),v_at); /* cu[4*j+kj] -= dkx*zt1; */ /* cu[1+4*j+kj] -= dky*zt1; */ v_zt2 = _mm_sub_ps(v_zt2,_mm_mul_ps(v_dkx,v_zt1)); _mm_store_ps((float *)&cu[4*j+kj],v_zt2); /* zt1 = at1*(dkx*cu[4*j+k1] - dky*cu[1+4*j+k1]); */ v_dkx = _mm_mul_ps(v_dkx,v_n); v_zt2 = _mm_load_ps((float *)&cu[4*j+k1]); v_zt1 = _mm_mul_ps(v_at1,_mm_mul_ps(v_dkx,v_zt2)); v_at = _mm_movelh_ps(v_zt1,v_zt1); v_zt1 = _mm_add_ps(_mm_movehl_ps(v_zt1,v_zt1),v_at); /* cu[4*j+k1] -= dkx*zt1; */ /* cu[1+4*j+k1] += dky*zt1; */ v_zt2 = _mm_sub_ps(v_zt2,_mm_mul_ps(v_dkx,v_zt1)); _mm_store_ps((float *)&cu[4*j+k1],v_zt2); } /* mode numbers kx = 0, nx/2 */ cu[1+kj] = zero; cu[k1] = zero; cu[1+k1] = zero; } /* mode numbers ky = 0, ny/2 */ k1 = 4*nxvh*nyh; for (j = 1; j < nxh; j++) { cu[4*j] = zero; /* cu[4*j+k1] = zero; */ /* cu[1+4*j+k1] = zero; */ _mm_store_ps((float *)&cu[4*j+k1],v_zero); } /* cu[0] = zero; */ /* cu[1] = zero; */ _mm_store_ps((float *)&cu[0],v_zero); /* cu[k1] = zero; */ /* cu[1+k1] = zero; */ _mm_store_ps((float *)&cu[k1],v_zero); return; } /*--------------------------------------------------------------------*/ void csse2mibpois23(float complex cu[], float complex bxy[], float complex ffc[], float ci, float *wm, int nx, int ny, int nxvh, int nyv, int nxhd, int nyhd) { /* this subroutine solves 2-1/2d poisson's equation in fourier space for magnetic field, with periodic boundary conditions. input: cu,ffc,ci,nx,ny,nxv,nyhd, output: bxy,wm approximate flop count is: 90*nxc*nyc + 40*(nxc + nyc) where nxc = nx/2 - 1, nyc = ny/2 - 1 the magnetic field is calculated using the equations: bx[ky][kx] = ci*ci*sqrt(-1)*g[ky][kx]*ky*cuz[ky][kx], by[ky][kx] = -ci*ci*sqrt(-1)*g[ky][kx]*kx*cuz[ky][kx], bz[ky][kx] = ci*ci*sqrt(-1)*g[ky][kx]*(kx*cuy[ky][kx]-ky*cux[ky][kx]), where kx = 2pi*j/nx, ky = 2pi*k/ny, and j,k = fourier mode numbers, g[ky][kx] = (affp/(kx**2+ky**2))*s[ky][kx], s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2), except for bx(kx=pi) = by(kx=pi) = bz(kx=pi) = bx(ky=pi) = by(ky=pi) = bz(ky=pi) = 0, and bx(kx=0,ky=0) = by(kx=0,ky=0) = bz(kx=0,ky=0) = 0. cu[k][j][i] = complex current density for fourier mode (j,k) bxy[k][j][i] = i component of complex magnetic field all for fourier mode (j,k) cimag(ffc[k][j]) = finite-size particle shape factor s for fourier mode (j,k) creal(ffc[k][j]) = potential green's function g for fourier mode (j,k) ci = reciprocal of velocity of light magnetic field energy is also calculated, using wm = nx*ny*sum((affp/(kx**2+ky**2))*ci*ci* |cu[ky][kx]*s[ky][kx]|**2), where affp = normalization constant = nx*ny/np, where np=number of particles this expression is valid only if the current is divergence-free nx/ny = system length in x/y direction nxvh = first dimension of field arrays, must be >= nxh nyv = second dimension of field arrays, must be >= ny nxhd = first dimension of form factor array, must be >= nxh nyhd = second dimension of form factor array, must be >= nyh requires SSE2, cu, bxy, ffc need to be 16 byte aligned nxhd, nxvh need to be a multiple of 2 cu, bxy need to have 4 components local data */ int nxh, nyh, j, k, k1, kk, kj; float dnx, dny, ci2, at1, at3; float complex zero, zt1, zt3; double wp, sum1; __m128i v_it; __m128 v_dnx, v_dny, v_dky, v_ci2, v_at1, v_at2, v_at3, v_at4; __m128 v_zero, v_n, v_m, v_zt1, v_zt2, v_zt3, v_zt4; __m128d v_wp, v_d; __attribute__((aligned(16))) double dd[2]; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; zero = 0.0 + 0.0*_Complex_I; ci2 = ci*ci; v_dnx = _mm_set1_ps(dnx); v_dny = _mm_set1_ps(dny); v_zero = _mm_set1_ps(0.0f); v_ci2 = _mm_set1_ps(ci2); v_n = _mm_set_ps(-1.0f,1.0f,-1.0f,1.0f); v_m = _mm_set_ps(1.0f,1.0f,-1.0f,-1.0f); /* calculate magnetic field and sum field energy */ sum1 = 0.0; /* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */ #pragma omp parallel for \ private(j,k,k1,kk,kj,v_it,v_dky,v_at1,v_at2,v_at3,v_at4,v_zt1,v_zt2, \ v_zt3,v_zt4,v_wp,v_d,dd) \ reduction(+:sum1) for (k = 1; k < nyh; k++) { /* dky = dny*(float) k; */ v_dky = _mm_mul_ps(v_dny,_mm_cvtepi32_ps(_mm_set1_epi32(k))); kk = nxhd*k; kj = 4*nxvh*k; k1 = 4*nxvh*ny - kj; v_wp = _mm_set1_pd(0.0); for (j = 1; j < nxh; j++) { /* at1 = ci2*crealf(ffc[j+kk]); */ v_at3 = _mm_loadl_pi(v_zero,(__m64 *)&ffc[j+kk]); v_at1 = _mm_mul_ps(v_ci2,_mm_shuffle_ps(v_at3,v_at3,0)); /* at2 = at1*dnx*(float) j; */ /* at3 = dky*at1; */ v_at2 = _mm_mul_ps(v_dnx,_mm_cvtepi32_ps(_mm_set1_epi32(j))); v_at2 = _mm_sub_ps(v_zero,v_at2); v_at2 = _mm_mul_ps(v_at1,_mm_movelh_ps(v_dky,v_at2)); /* at1 = at1*cimagf(ffc[j+kk]); */ v_at3 = _mm_movelh_ps(v_at3,v_at3); v_at1 = _mm_mul_ps(v_at1,_mm_shuffle_ps(v_at3,v_at3,245)); /* zt1 = -cimagf(cu[2+4*j+kj]) */ /* + crealf(cu[2+4*j+kj])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*j+kj]) */ /* + crealf(cu[1+4*j+kj])*_Complex_I; */ /* zt3 = -cimagf(cu[4*j+kj]) + crealf(cu[4*j+kj])*_Complex_I; */ v_zt3 = _mm_load_ps((float *)&cu[4*j+kj]); v_zt2 = _mm_mul_ps(v_zt3,v_n); v_zt2 = _mm_shuffle_ps(v_zt2,v_zt2,177); v_zt4 = _mm_load_ps((float *)&cu[2+4*j+kj]); v_zt1 = _mm_mul_ps(v_zt4,v_n); v_zt1 = _mm_movelh_ps(v_zt1,v_zt1); v_zt1 = _mm_shuffle_ps(v_zt1,v_zt1,177); /* bxy[4*j+kj] = at3*zt1; */ /* bxy[1+4*j+kj] = -at2*zt1; */ v_zt1 = _mm_mul_ps(v_at2,v_zt1); _mm_store_ps((float *)&bxy[4*j+kj],v_zt1); /* bxy[2+4*j+kj] = at2*zt2 - at3*zt3; */ v_zt2 = _mm_sub_ps(v_zero,_mm_mul_ps(v_at2,v_zt2)); v_at3 = _mm_movelh_ps(v_zt2,v_zt2); v_zt2 = _mm_add_ps(_mm_movehl_ps(v_zt2,v_zt2),v_at3); v_zt2 = _mm_movelh_ps(v_zt2,v_zero); _mm_store_ps((float *)&bxy[2+4*j+kj],v_zt2); /* at1 = at1*(cu[4*j+kj]*conjf(cu[4*j+kj]) */ /* + cu[1+4*j+kj]*conjf(cu[1+4*j+kj]) */ /* + cu[2+4*j+kj]*conjf(cu[2+4*j+kj])); */ /* wp += (double) at1; */ v_zt3 = _mm_mul_ps(v_zt3,v_zt3); v_zt4 = _mm_movelh_ps(v_zt4,v_zero); v_at4 = _mm_add_ps(v_zt3,_mm_mul_ps(v_zt4,v_zt4)); /* zt1 = -cimagf(cu[2+4*j+k1]) */ /* + crealf(cu[2+4*j+k1])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*j+k1]) */ /* + crealf(cu[1+4*j+k1])*_Complex_I; */ /* zt3 = -cimagf(cu[4*j+k1]) + crealf(cu[4*j+k1])*_Complex_I; */ v_zt3 = _mm_load_ps((float *)&cu[4*j+k1]); v_zt2 = _mm_mul_ps(v_zt3,v_n); v_zt2 = _mm_shuffle_ps(v_zt2,v_zt2,177); v_zt4 = _mm_load_ps((float *)&cu[2+4*j+k1]); v_zt1 = _mm_movelh_ps(v_zt4,v_zt4); v_zt1 = _mm_mul_ps(v_zt1,v_n); v_zt1 = _mm_shuffle_ps(v_zt1,v_zt1,177); /* bxy[4*j+k1] = -at3*zt1; */ /* bxy[1+4*j+k1] = -at2*zt1; */ v_at2 = _mm_mul_ps(v_at2,v_m); v_zt1 = _mm_mul_ps(v_at2,v_zt1); _mm_store_ps((float *)&bxy[4*j+k1],v_zt1); /* bxy[2+4*j+k1] = at2*zt2 + at3*zt3; */ v_zt2 = _mm_sub_ps(v_zero,_mm_mul_ps(v_at2,v_zt2)); v_at3 = _mm_movelh_ps(v_zt2,v_zt2); v_zt2 = _mm_add_ps(_mm_movehl_ps(v_zt2,v_zt2),v_at3); v_zt2 = _mm_movelh_ps(v_zt2,v_zero); _mm_store_ps((float *)&bxy[2+4*j+k1],v_zt2); /* at1 = at1*(cu[4*j+k1]*conjf(cu[4*j+k1]) */ /* + cu[1+4*j+k1]*conjf(cu[1+4*j+k1]) */ /* + cu[2+4*j+k1]*conjf(cu[2+4*j+k1])); */ /* wp += (double) at1; */ v_zt3 = _mm_mul_ps(v_zt3,v_zt3); v_zt4 = _mm_movelh_ps(v_zt4,v_zero); v_zt3 = _mm_add_ps(v_zt3,_mm_mul_ps(v_zt4,v_zt4)); v_at4 = _mm_mul_ps(v_at1,_mm_add_ps(v_at4,v_zt3)); /* convert to double precision before accumulating */ v_d = _mm_cvtps_pd(v_at4); v_wp = _mm_add_pd(v_wp,v_d); v_it = _mm_srli_si128((__m128i)v_at4,8); v_d = _mm_cvtps_pd((__m128)v_it); v_wp = _mm_add_pd(v_wp,v_d); } /* sum1 += wp; */ _mm_store_pd(&dd[0],v_wp); for (j = 1; j < 2; j++) { dd[0] += dd[j]; } sum1 += dd[0]; } wp = 0.0; v_wp = _mm_set1_pd(0.0); /* mode numbers kx = 0, nx/2 */ #pragma ivdep for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = 4*nxvh*k; k1 = 4*nxvh*ny - kj; at1 = ci2*crealf(ffc[kk]); at3 = at1*dny*(float) k; at1 = at1*cimagf(ffc[kk]); zt1 = -cimagf(cu[2+kj]) + crealf(cu[2+kj])*_Complex_I; zt3 = -cimagf(cu[kj]) + crealf(cu[kj])*_Complex_I; bxy[kj] = at3*zt1; bxy[1+kj] = zero; bxy[2+kj] = -at3*zt3; bxy[k1] = zero; bxy[1+k1] = zero; bxy[2+k1] = zero; at1 = at1*(cu[kj]*conjf(cu[kj]) + cu[1+kj]*conjf(cu[1+kj]) + cu[2+kj]*conjf(cu[2+kj])); wp += (double) at1; } sum1 += wp; /* mode numbers ky = 0, ny/2 */ k1 = 4*nxvh*nyh; for (j = 1; j < nxh; j++) { /* at1 = ci2*crealf(ffc[j]); */ v_at3 = _mm_loadl_pi(v_zero,(__m64 *)&ffc[j]); v_at1 = _mm_mul_ps(v_ci2,_mm_shuffle_ps(v_at3,v_at3,0)); /* at2 = at1*dnx*(float) j; */ v_at2 = _mm_mul_ps(v_dnx,_mm_cvtepi32_ps(_mm_set1_epi32(j))); v_at2 = _mm_sub_ps(v_zero,v_at2); v_at2 = _mm_mul_ps(v_at1,_mm_movelh_ps(v_zero,v_at2)); /* at1 = at1*cimagf(ffc[j]); */ v_at3 = _mm_movelh_ps(v_at3,v_at3); v_at1 = _mm_mul_ps(v_at1,_mm_shuffle_ps(v_at3,v_at3,245)); /* zt1 = -cimagf(cu[2+4*j]) + crealf(cu[2+4*j])*_Complex_I; */ /* zt2 = -cimagf(cu[1+4*j]) + crealf(cu[1+4*j])*_Complex_I; */ v_zt3 = _mm_load_ps((float *)&cu[4*j]); v_zt2 = _mm_mul_ps(v_zt3,v_n); v_zt2 = _mm_shuffle_ps(v_zt2,v_zt2,177); v_zt4 = _mm_load_ps((float *)&cu[2+4*j]); v_zt1 = _mm_mul_ps(v_zt4,v_n); v_zt1 = _mm_movelh_ps(v_zt1,v_zt1); v_zt1 = _mm_shuffle_ps(v_zt1,v_zt1,177); /* bxy[4*j] = zero; */ /* bxy[1+4*j] = -at2*zt1; */ v_zt1 = _mm_mul_ps(v_at2,v_zt1); _mm_store_ps((float *)&bxy[4*j],v_zt1); /* bxy[2+4*j] = at2*zt2; */ v_zt2 = _mm_sub_ps(v_zero,_mm_mul_ps(v_at2,v_zt2)); v_zt2 = _mm_movehl_ps(v_zt2,v_zt2); v_zt2 = _mm_movelh_ps(v_zt2,v_zero); _mm_store_ps((float *)&bxy[2+4*j],v_zt2); /* bxy[4*j+k1] = zero; */ /* bxy[1+4*j+k1] = zero; */ /* bxy[2+4*j+k1] = zero; */ _mm_store_ps((float *)&bxy[4*j+k1],v_zero); _mm_store_ps((float *)&bxy[2+4*j+k1],v_zero); /* at1 = at1*(cu[4*j]*conjf(cu[4*j]) + cu[1+4*j]*conjf(cu[1+4*j]) */ /* + cu[2+4*j]*conjf(cu[2+4*j])); */ /* wp += (double) at1; */ v_zt3 = _mm_mul_ps(v_zt3,v_zt3); v_zt4 = _mm_movelh_ps(v_zt4,v_zero); v_at4 = _mm_add_ps(v_zt3,_mm_mul_ps(v_zt4,v_zt4)); v_at4 = _mm_mul_ps(v_at1,v_at4); /* convert to double precision before accumulating */ v_d = _mm_cvtps_pd(v_at4); v_wp = _mm_add_pd(v_wp,v_d); v_it = _mm_srli_si128((__m128i)v_at4,8); v_d = _mm_cvtps_pd((__m128)v_it); v_wp = _mm_add_pd(v_wp,v_d); } /* bxy[0] = zero; */ /* bxy[1] = zero; */ /* bxy[2] = zero; */ _mm_store_ps((float *)&bxy[0],v_zero); _mm_store_ps((float *)&bxy[2],v_zero); /* bxy[k1] = zero; */ /* bxy[1+k1] = zero; */ /* bxy[2+k1] = zero; */ _mm_store_ps((float *)&bxy[k1],v_zero); _mm_store_ps((float *)&bxy[2+k1],v_zero); /* *wm = wp*(float) (nx*ny); */ _mm_store_pd(&dd[0],v_wp); for (j = 1; j < 2; j++) { dd[0] += dd[j]; } *wm = (sum1 + dd[0])*(float) (nx*ny); return; } /*--------------------------------------------------------------------*/ void csse2mmaxwel2(float complex exy[], float complex bxy[], float complex cu[], float complex ffc[], float ci, float dt, float *wf, float *wm, int nx, int ny, int nxvh, int nyv, int nxhd, int nyhd) { /* this subroutine solves 2-1/2d maxwell's equation in fourier space for transverse electric and magnetic fields with periodic boundary conditions input: all, output: wf, wm, exy, bxy approximate flop count is: 286*nxc*nyc + 84*(nxc + nyc) where nxc = nx/2 - 1, nyc = ny/2 - 1 the magnetic field is first updated half a step using the equations: bx[ky][kx] = bx[ky][kx] - .5*dt*sqrt(-1)*ky*ez[ky][kx] by[ky][kx] = by[ky][kx] + .5*dt*sqrt(-1)*kx*ez[ky][kx] bz[ky][kx] = bz[ky][kx] - .5*dt*sqrt(-1)*(kx*ey[ky][kx]-ky*ex[ky][kx]) the electric field is then updated a whole step using the equations: ex[ky][kx] = ex[ky][kx] + c2*dt*sqrt(-1)*ky*bz[ky][kx] - affp*dt*cux[ky][kx]*s[ky][kx] ey[ky][kx] = ey[ky][kx] - c2*dt*sqrt(-1)*kx*bz[ky][kx] - affp*dt*cuy[ky][kx]*s[ky][kx] ez[ky][kx] = ez[ky][kx] + c2*dt*sqrt(-1)*(kx*by[ky][kx]-ky*bx[ky][kx]) - affp*dt*cuz[ky][kx]*s[ky][kx] the magnetic field is finally updated the remaining half step with the new electric field and the previous magnetic field equations. where kx = 2pi*j/nx, ky = 2pi*k/ny, c2 = 1./(ci*ci) and s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2) j,k = fourier mode numbers, except for ex(kx=pi) = ey(kx=pi) = ez(kx=pi) = 0, ex(ky=pi) = ey(ky=pi) = ex(ky=pi) = 0, ex(kx=0,ky=0) = ey(kx=0,ky=0) = ez(kx=0,ky=0) = 0. and similarly for bx, by, bz. cu[k][j][i] = complex current density exy[k][j][i] = complex transverse electric field bxy[k][j][i] = complex magnetic field for component i, all for fourier mode (j,k) creal(ffc[0][0]) = affp = normalization constant = nx*ny/np, where np=number of particles cimag(ffc[k][j]) = finite-size particle shape factor s. s[ky][kx] = exp(-((kx*ax)**2+(ky*ay)**2)/2) for fourier mode (j-1,k-1) ci = reciprocal of velocity of light dt = time interval between successive calculations transverse electric field energy is also calculated, using wf = nx*ny**sum((1/affp)*|exy[ky][kx]|**2) magnetic field energy is also calculated, using wm = nx*ny**sum((c2/affp)*|bxy[ky][kx]|**2) nx/ny = system length in x/y direction nxvh = first dimension of field arrays, must be >= nxh nyv = second dimension of field arrays, must be >= ny nxhd = first dimension of form factor array, must be >= nxh nyhd = second dimension of form factor array, must be >= nyh requires SSE2, cu, bxy, ffc need to be 16 byte aligned nxhd, nxvh need to be a multiple of 2 cu, exy, bxy need to have 4 components local data */ int nxh, nyh, j, k, k1, kk, kj; float dnx, dny, dth, c2, cdt, affp, anorm, dky, afdt, adt; float at1; float complex zero, zt1, zt3, zt4, zt6, zt7, zt9; double wp, ws, sum1, sum2; __m128i v_it; __m128 v_dnx, v_dkx, v_dny, v_dky, v_cdt, v_adt, v_afdt, v_dth; __m128 v_anorm, v_at1, v_at2, v_at3, v_at4; __m128 v_zero, v_n, v_m, v_zt1, v_zt2, v_zt4, v_zt6, v_zt7, v_zt9; __m128d v_wp, v_ws, v_d; __attribute__((aligned(16))) double dd[2]; if (ci <= 0.0) return; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; dnx = 6.28318530717959/(float) nx; dny = 6.28318530717959/(float) ny; dth = 0.5*dt; c2 = 1.0/(ci*ci); cdt = c2*dt; affp = creal(ffc[0]); adt = affp*dt; zero = 0.0 + 0.0*_Complex_I; anorm = 1.0/affp; v_dnx = _mm_set1_ps(dnx); v_dny = _mm_set1_ps(dny); v_zero = _mm_set1_ps(0.0f); v_cdt = _mm_set1_ps(cdt); v_adt = _mm_set1_ps(adt); v_dth = _mm_set1_ps(dth); v_anorm = _mm_set1_ps(anorm); v_n = _mm_set_ps(-1.0f,1.0f,-1.0f,1.0f); v_m = _mm_set_ps(1.0f,1.0f,-1.0f,-1.0f); /* update electromagnetic field and sum field energies */ sum1 = 0.0; sum2 = 0.0; /* calculate the electromagnetic fields */ /* mode numbers 0 < kx < nx/2 and 0 < ky < ny/2 */ #pragma omp parallel for \ private(j,k,k1,kk,kj,v_it,v_dkx,v_dky,v_afdt,v_at1,v_at2,v_at3,v_at4, \ v_zt1,v_zt2,v_zt4,v_zt6,v_zt7,v_zt9,v_ws,v_wp,v_d,dd) \ reduction(+:sum1,sum2) for (k = 1; k < nyh; k++) { /* dky = dny*(float) k; */ v_dky = _mm_mul_ps(v_dny,_mm_cvtepi32_ps(_mm_set1_epi32(k))); kk = nxhd*k; kj = 4*nxvh*k; k1 = 4*nxvh*ny - kj; v_wp = _mm_set1_pd(0.0); v_ws = _mm_set1_pd(0.0); for (j = 1; j < nxh; j++) { /* dkx = dnx*(float) j; */ v_dkx = _mm_mul_ps(v_dnx,_mm_cvtepi32_ps(_mm_set1_epi32(j))); v_dkx = _mm_movelh_ps(_mm_sub_ps(v_zero,v_dky),v_dkx); /* afdt = adt*cimagf(ffc[j+kk]); */ v_afdt = _mm_loadl_pi(v_zero,(__m64 *)&ffc[j+kk]); v_afdt = _mm_movelh_ps(v_afdt,v_afdt); v_afdt = _mm_mul_ps(v_adt,_mm_shuffle_ps(v_afdt,v_afdt,245)); /* update magnetic field half time step, ky > 0 */ /* zt1 = -cimagf(exy[2+4*j+kj]) */ /* + crealf(exy[2+4*j+kj])*_Complex_I; */ /* zt2 = -cimagf(exy[1+4*j+kj]) */ /* + crealf(exy[1+4*j+kj])*_Complex_I; */ /* zt3 = -cimagf(exy[4*j+kj]) + crealf(exy[4*j+kj])*_Complex_I; */ v_zt7 = _mm_load_ps((float *)&exy[4*j+kj]); v_zt2 = _mm_mul_ps(v_zt7,v_n); v_zt2 = _mm_shuffle_ps(v_zt2,v_zt2,177); v_zt9 = _mm_load_ps((float *)&exy[2+4*j+kj]); v_zt1 = _mm_mul_ps(v_zt9,v_n); v_zt1 = _mm_movelh_ps(v_zt1,v_zt1); v_zt1 = _mm_shuffle_ps(v_zt1,v_zt1,177); /* zt4 = bxy[4*j+kj] - dth*(dky*zt1); */ /* zt5 = bxy[1+4*j+kj] + dth*(dkx*zt1); */ v_zt4 = _mm_load_ps((float *)&bxy[4*j+kj]); v_zt1 = _mm_mul_ps(v_dth,_mm_mul_ps(v_dkx,v_zt1)); v_zt4 = _mm_add_ps(v_zt4,v_zt1); /* zt6 = bxy[2+4*j+kj] - dth*(dkx*zt2 - dky*zt3); */ v_zt6 = _mm_load_ps((float *)&bxy[2+4*j+kj]); v_zt2 = _mm_mul_ps(v_dth,_mm_mul_ps(v_dkx,v_zt2)); v_at1 = _mm_movelh_ps(v_zt2,v_zt2); v_zt2 = _mm_add_ps(_mm_movehl_ps(v_zt2,v_zt2),v_at1); v_zt6 = _mm_movelh_ps(_mm_sub_ps(v_zt6,v_zt2),v_zero); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt2 = _mm_mul_ps(v_zt4,v_n); v_zt2 = _mm_shuffle_ps(v_zt2,v_zt2,177); v_zt1 = _mm_mul_ps(v_zt6,v_n); v_zt1 = _mm_movelh_ps(v_zt1,v_zt1); v_zt1 = _mm_shuffle_ps(v_zt1,v_zt1,177); /* zt7 = exy[4*j+kj] + cdt*(dky*zt1) - afdt*cu[4*j+kj]; */ /* zt8 = exy[1+4*j+kj] - cdt*(dkx*zt1) - afdt*cu[1+4*j+kj]; */ v_at2 = _mm_load_ps((float *)&cu[4*j+kj]); v_zt1 = _mm_mul_ps(v_cdt,_mm_mul_ps(v_dkx,v_zt1)); v_zt1 = _mm_add_ps(_mm_mul_ps(v_afdt,v_at2),v_zt1); v_zt7 = _mm_sub_ps(v_zt7,v_zt1); /* zt9 = exy[2+4*j+kj] + cdt*(dkx*zt2 - dky*zt3) */ /* - afdt*cu[2+4*j+kj]; */ v_at2 = _mm_load_ps((float *)&cu[2+4*j+kj]); v_zt2 = _mm_mul_ps(v_cdt,_mm_mul_ps(v_dkx,v_zt2)); v_at1 = _mm_movelh_ps(v_zt2,v_zt2); v_zt2 = _mm_add_ps(_mm_movehl_ps(v_zt2,v_zt2),v_at1); v_zt2 = _mm_sub_ps(v_zt2,_mm_mul_ps(v_afdt,v_at2)); v_zt9 = _mm_movelh_ps(_mm_add_ps(v_zt9,v_zt2),v_zero); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt2 = _mm_mul_ps(v_zt7,v_n); v_zt2 = _mm_shuffle_ps(v_zt2,v_zt2,177); v_zt1 = _mm_mul_ps(v_zt9,v_n); v_zt1 = _mm_movelh_ps(v_zt1,v_zt1); v_zt1 = _mm_shuffle_ps(v_zt1,v_zt1,177); /* exy[4*j+kj] = zt7; */ /* exy[1+4*j+kj] = zt8; */ /* exy[2+4*j+kj] = zt9; */ _mm_store_ps((float *)&exy[4*j+kj],v_zt7); _mm_store_ps((float *)&exy[2+4*j+kj],v_zt9); /* at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */ /* ws += (double) at1; */ v_zt7 = _mm_mul_ps(v_zt7,v_zt7); v_at3 = _mm_add_ps(v_zt7,_mm_mul_ps(v_zt9,v_zt9)); /* zt4 -= dth*(dky*zt1); */ /* zt5 += dth*(dkx*zt1); */ v_zt1 = _mm_mul_ps(v_dth,_mm_mul_ps(v_dkx,v_zt1)); v_zt4 = _mm_add_ps(v_zt4,v_zt1); /* zt6 -= dth*(dkx*zt2 - dky*zt3); */ v_zt2 = _mm_mul_ps(v_dth,_mm_mul_ps(v_dkx,v_zt2)); v_at1 = _mm_movelh_ps(v_zt2,v_zt2); v_zt2 = _mm_add_ps(_mm_movehl_ps(v_zt2,v_zt2),v_at1); v_zt6 = _mm_movelh_ps(_mm_sub_ps(v_zt6,v_zt2),v_zero); /* bxy[4*j+kj] = zt4; */ /* bxy[1+4*j+kj] = zt5; */ /* bxy[2+4*j+kj] = zt6; */ _mm_store_ps((float *)&bxy[4*j+kj],v_zt4); _mm_store_ps((float *)&bxy[2+4*j+kj],v_zt6); /* at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */ /* wp += (double) at1; */ v_zt4 = _mm_mul_ps(v_zt4,v_zt4); v_at4 = _mm_add_ps(v_zt4,_mm_mul_ps(v_zt6,v_zt6)); /* update magnetic field half time step, ky < 0 */ v_dkx = _mm_mul_ps(v_dkx,v_m); /* zt1 = -cimagf(exy[2+4*j+k1]) */ /* + crealf(exy[2+4*j+k1])*_Complex_I; */ /* zt2 = -cimagf(exy[1+4*j+k1]) */ /* + crealf(exy[1+4*j+k1])*_Complex_I; */ /* zt3 = -cimagf(exy[4*j+k1]) + crealf(exy[4*j+k1])*_Complex_I; */ v_zt7 = _mm_load_ps((float *)&exy[4*j+k1]); v_zt2 = _mm_mul_ps(v_zt7,v_n); v_zt2 = _mm_shuffle_ps(v_zt2,v_zt2,177); v_zt9 = _mm_load_ps((float *)&exy[2+4*j+k1]); v_zt1 = _mm_mul_ps(v_zt9,v_n); v_zt1 = _mm_movelh_ps(v_zt1,v_zt1); v_zt1 = _mm_shuffle_ps(v_zt1,v_zt1,177); /* zt4 = bxy[4*j+k1] + dth*(dky*zt1); */ /* zt5 = bxy[1+4*j+k1] + dth*(dkx*zt1); */ v_zt4 = _mm_load_ps((float *)&bxy[4*j+k1]); v_zt1 = _mm_mul_ps(v_dth,_mm_mul_ps(v_dkx,v_zt1)); v_zt4 = _mm_add_ps(v_zt4,v_zt1); /* zt6 = bxy[2+4*j+k1] - dth*(dkx*zt2 + dky*zt3); */ v_zt6 = _mm_load_ps((float *)&bxy[2+4*j+k1]); v_zt2 = _mm_mul_ps(v_dth,_mm_mul_ps(v_dkx,v_zt2)); v_at1 = _mm_movelh_ps(v_zt2,v_zt2); v_zt2 = _mm_add_ps(_mm_movehl_ps(v_zt2,v_zt2),v_at1); v_zt6 = _mm_movelh_ps(_mm_sub_ps(v_zt6,v_zt2),v_zero); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ /* zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; */ v_zt2 = _mm_mul_ps(v_zt4,v_n); v_zt2 = _mm_shuffle_ps(v_zt2,v_zt2,177); v_zt1 = _mm_mul_ps(v_zt6,v_n); v_zt1 = _mm_movelh_ps(v_zt1,v_zt1); v_zt1 = _mm_shuffle_ps(v_zt1,v_zt1,177); /* zt7 = exy[4*j+k1] - cdt*(dky*zt1) - afdt*cu[4*j+k1]; */ /* zt8 = exy[1+4*j+k1] - cdt*(dkx*zt1) - afdt*cu[1+4*j+k1]; */ v_at2 = _mm_load_ps((float *)&cu[4*j+k1]); v_zt1 = _mm_mul_ps(v_cdt,_mm_mul_ps(v_dkx,v_zt1)); v_zt1 = _mm_add_ps(_mm_mul_ps(v_afdt,v_at2),v_zt1); v_zt7 = _mm_sub_ps(v_zt7,v_zt1); /* zt9 = exy[2+4*j+k1] + cdt*(dkx*zt2 + dky*zt3) */ /* - afdt*cu[2+4*j+k1]; */ v_at2 = _mm_load_ps((float *)&cu[2+4*j+k1]); v_zt2 = _mm_mul_ps(v_cdt,_mm_mul_ps(v_dkx,v_zt2)); v_at1 = _mm_movelh_ps(v_zt2,v_zt2); v_zt2 = _mm_add_ps(_mm_movehl_ps(v_zt2,v_zt2),v_at1); v_zt2 = _mm_sub_ps(v_zt2,_mm_mul_ps(v_afdt,v_at2)); v_zt9 = _mm_movelh_ps(_mm_add_ps(v_zt9,v_zt2),v_zero); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ /* zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; */ v_zt2 = _mm_mul_ps(v_zt7,v_n); v_zt2 = _mm_shuffle_ps(v_zt2,v_zt2,177); v_zt1 = _mm_mul_ps(v_zt9,v_n); v_zt1 = _mm_movelh_ps(v_zt1,v_zt1); v_zt1 = _mm_shuffle_ps(v_zt1,v_zt1,177); /* exy[4*j+k1] = zt7; */ /* exy[1+4*j+k1] = zt8; */ /* exy[2+4*j+k1] = zt9; */ _mm_store_ps((float *)&exy[4*j+k1],v_zt7); _mm_store_ps((float *)&exy[2+4*j+k1],v_zt9); /* at1 = anorm*(zt7*conjf(zt7) + zt8*conjf(zt8) + zt9*conjf(zt9)); */ /* ws += (double) at1; */ v_zt7 = _mm_mul_ps(v_zt7,v_zt7); v_zt7 = _mm_add_ps(v_zt7,_mm_mul_ps(v_zt9,v_zt9)); v_at3 = _mm_mul_ps(v_anorm,_mm_add_ps(v_at3,v_zt7)); /* convert to double precision before accumulating */ v_d = _mm_cvtps_pd(v_at3); v_ws = _mm_add_pd(v_ws,v_d); v_it = _mm_srli_si128((__m128i)v_at3,8); v_d = _mm_cvtps_pd((__m128)v_it); v_ws = _mm_add_pd(v_ws,v_d); /* zt4 += dth*(dky*zt1); */ /* zt5 += dth*(dkx*zt1); */ v_zt1 = _mm_mul_ps(v_dth,_mm_mul_ps(v_dkx,v_zt1)); v_zt4 = _mm_add_ps(v_zt4,v_zt1); /* zt6 -= dth*(dkx*zt2 + dky*zt3); */ v_zt2 = _mm_mul_ps(v_dth,_mm_mul_ps(v_dkx,v_zt2)); v_at1 = _mm_movelh_ps(v_zt2,v_zt2); v_zt2 = _mm_add_ps(_mm_movehl_ps(v_zt2,v_zt2),v_at1); v_zt6 = _mm_movelh_ps(_mm_sub_ps(v_zt6,v_zt2),v_zero); /* bxy[4*j+k1] = zt4; */ /* bxy[1+4*j+k1] = zt5; */ /* bxy[2+4*j+k1] = zt6; */ _mm_store_ps((float *)&bxy[4*j+k1],v_zt4); _mm_store_ps((float *)&bxy[2+4*j+k1],v_zt6); /* at1 = anorm*(zt4*conjf(zt4) + zt5*conjf(zt5) + zt6*conjf(zt6)); */ /* wp += (double) at1; */ v_zt4 = _mm_mul_ps(v_zt4,v_zt4); v_zt4 = _mm_add_ps(v_zt4,_mm_mul_ps(v_zt6,v_zt6)); v_at4 = _mm_mul_ps(v_anorm,_mm_add_ps(v_at4,v_zt4)); /* convert to double precision before accumulating */ v_d = _mm_cvtps_pd(v_at4); v_wp = _mm_add_pd(v_wp,v_d); v_it = _mm_srli_si128((__m128i)v_at4,8); v_d = _mm_cvtps_pd((__m128)v_it); v_wp = _mm_add_pd(v_wp,v_d); } /* sum1 += ws; */ _mm_store_pd(&dd[0],v_ws); for (j = 1; j < 2; j++) { dd[0] += dd[j]; } sum1 += dd[0]; /* sum2 += wp; */ _mm_store_pd(&dd[0],v_wp); for (j = 1; j < 2; j++) { dd[0] += dd[j]; } sum2 += dd[0]; } ws = 0.0; wp = 0.0; v_wp = _mm_set1_pd(0.0); v_ws = _mm_set1_pd(0.0); /* mode numbers kx = 0, nx/2 */ #pragma ivdep for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = 4*nxvh*k; k1 = 4*nxvh*ny - kj; dky = dny*(float) k; afdt = adt*cimagf(ffc[kk]); /* update magnetic field half time step */ zt1 = -cimagf(exy[2+kj]) + crealf(exy[2+kj])*_Complex_I; zt3 = -cimagf(exy[kj]) + crealf(exy[kj])*_Complex_I; zt4 = bxy[kj] - dth*(dky*zt1); zt6 = bxy[2+kj] + dth*(dky*zt3); /* update electric field whole time step */ zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; zt3 = -cimagf(zt4) + crealf(zt4)*_Complex_I; zt7 = exy[kj] + cdt*(dky*zt1) - afdt*cu[kj]; zt9 = exy[2+kj] - cdt*(dky*zt3) - afdt*cu[2+kj]; /* update magnetic field half time step and store electric field */ zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; zt3 = -cimagf(zt7) + crealf(zt7)*_Complex_I; exy[kj] = zt7; exy[1+kj] = zero; exy[2+kj] = zt9; at1 = anorm*(zt7*conjf(zt7) + zt9*conjf(zt9)); ws += (double) at1; zt4 -= dth*(dky*zt1); zt6 += dth*(dky*zt3); bxy[kj] = zt4; bxy[1+kj] = zero; bxy[2+kj] = zt6; at1 = anorm*(zt4*conjf(zt4) + zt6*conjf(zt6)); wp += (double) at1; bxy[k1] = zero; bxy[1+k1] = zero; bxy[2+k1] = zero; exy[k1] = zero; exy[1+k1] = zero; exy[2+k1] = zero; } sum1 += ws; sum2 += wp; /* mode numbers ky = 0, ny/2 */ k1 = 4*nxvh*nyh; for (j = 1; j < nxh; j++) { /* dkx = dnx*(float) j; */ v_dkx = _mm_mul_ps(v_dnx,_mm_cvtepi32_ps(_mm_set1_epi32(j))); v_dkx = _mm_movelh_ps(v_zero,v_dkx); /* afdt = adt*cimagf(ffc[j]); */ v_afdt = _mm_loadl_pi(v_zero,(__m64 *)&ffc[j]); v_afdt = _mm_movelh_ps(v_afdt,v_afdt); v_afdt = _mm_mul_ps(v_adt,_mm_shuffle_ps(v_afdt,v_afdt,245)); /* update magnetic field half time step */ /* zt1 = -cimagf(exy[2+4*j]) + crealf(exy[2+4*j])*_Complex_I; */ /* zt2 = -cimagf(exy[1+4*j]) + crealf(exy[1+4*j])*_Complex_I; */ v_zt7 = _mm_load_ps((float *)&exy[4*j]); v_zt2 = _mm_mul_ps(v_zt7,v_n); v_zt2 = _mm_shuffle_ps(v_zt2,v_zt2,177); v_zt9 = _mm_load_ps((float *)&exy[2+4*j]); v_zt1 = _mm_mul_ps(v_zt9,v_n); v_zt1 = _mm_movelh_ps(v_zt1,v_zt1); v_zt1 = _mm_shuffle_ps(v_zt1,v_zt1,177); /* zt5 = bxy[1+4*j] + dth*(dkx*zt1); */ v_zt4 = _mm_load_ps((float *)&bxy[4*j]); v_zt1 = _mm_mul_ps(v_dth,_mm_mul_ps(v_dkx,v_zt1)); v_zt4 = _mm_add_ps(v_zt4,v_zt1); /* zt6 = bxy[2+4*j] - dth*(dkx*zt2); */ v_zt6 = _mm_load_ps((float *)&bxy[2+4*j]); v_zt2 = _mm_mul_ps(v_dth,_mm_mul_ps(v_dkx,v_zt2)); v_zt2 = _mm_movehl_ps(v_zero,v_zt2); v_zt6 = _mm_movelh_ps(_mm_sub_ps(v_zt6,v_zt2),v_zero); /* update electric field whole time step */ /* zt1 = -cimagf(zt6) + crealf(zt6)*_Complex_I; */ /* zt2 = -cimagf(zt5) + crealf(zt5)*_Complex_I; */ v_zt2 = _mm_mul_ps(v_zt4,v_n); v_zt2 = _mm_shuffle_ps(v_zt2,v_zt2,177); v_zt1 = _mm_mul_ps(v_zt6,v_n); v_zt1 = _mm_movelh_ps(v_zt1,v_zt1); v_zt1 = _mm_shuffle_ps(v_zt1,v_zt1,177); /* zt8 = exy[1+4*j] - cdt*(dkx*zt1) - afdt*cu[1+4*j]; */ v_at2 = _mm_load_ps((float *)&cu[4*j]); v_zt1 = _mm_mul_ps(v_cdt,_mm_mul_ps(v_dkx,v_zt1)); v_zt1 = _mm_add_ps(_mm_mul_ps(v_afdt,v_at2),v_zt1); v_zt7 = _mm_sub_ps(v_zt7,v_zt1); /* zt9 = exy[2+4*j] + cdt*(dkx*zt2) - afdt*cu[2+4*j]; */ v_at2 = _mm_load_ps((float *)&cu[2+4*j]); v_zt2 = _mm_mul_ps(v_cdt,_mm_mul_ps(v_dkx,v_zt2)); v_zt2 = _mm_movehl_ps(v_zero,v_zt2); v_zt2 = _mm_sub_ps(v_zt2,_mm_mul_ps(v_afdt,v_at2)); v_zt9 = _mm_movelh_ps(_mm_add_ps(v_zt9,v_zt2),v_zero); /* update magnetic field half time step and store electric field */ /* zt1 = -cimagf(zt9) + crealf(zt9)*_Complex_I; */ /* zt2 = -cimagf(zt8) + crealf(zt8)*_Complex_I; */ v_zt2 = _mm_mul_ps(v_zt7,v_n); v_zt2 = _mm_shuffle_ps(v_zt2,v_zt2,177); v_zt1 = _mm_mul_ps(v_zt9,v_n); v_zt1 = _mm_movelh_ps(v_zt1,v_zt1); v_zt1 = _mm_shuffle_ps(v_zt1,v_zt1,177); /* exy[4*j] = zero; */ /* exy[1+4*j] = zt8; */ /* exy[2+4*j] = zt9; */ _mm_store_ps((float *)&exy[4*j],v_zt7); _mm_store_ps((float *)&exy[2+4*j],v_zt9); /* at1 = anorm*(zt8*conjf(zt8) + zt9*conjf(zt9)); */ /* ws += (double) at1; */ v_zt7 = _mm_mul_ps(v_zt7,v_zt7); v_at3 = _mm_add_ps(v_zt7,_mm_mul_ps(v_zt9,v_zt9)); v_at3 = _mm_mul_ps(v_anorm,v_at3); /* convert to double precision before accumulating */ v_d = _mm_cvtps_pd(v_at3); v_ws = _mm_add_pd(v_ws,v_d); v_it = _mm_srli_si128((__m128i)v_at3,8); v_d = _mm_cvtps_pd((__m128)v_it); v_ws = _mm_add_pd(v_ws,v_d); /* zt5 += dth*(dkx*zt1); */ v_zt1 = _mm_mul_ps(v_dth,_mm_mul_ps(v_dkx,v_zt1)); v_zt4 = _mm_add_ps(v_zt4,v_zt1); /* zt6 -= dth*(dkx*zt2); */ v_zt2 = _mm_mul_ps(v_dth,_mm_mul_ps(v_dkx,v_zt2)); v_zt2 = _mm_movehl_ps(v_zero,v_zt2); v_zt6 = _mm_movelh_ps(_mm_sub_ps(v_zt6,v_zt2),v_zero); /* bxy[4*j] = zero; */ /* bxy[1+4*j] = zt5; */ /* bxy[2+4*j] = zt6; */ _mm_store_ps((float *)&bxy[4*j],v_zt4); _mm_store_ps((float *)&bxy[2+4*j],v_zt6); /* at1 = anorm*(zt5*conjf(zt5) + zt6*conjf(zt6)); */ /* wp += (double) at1; */ v_zt4 = _mm_mul_ps(v_zt4,v_zt4); v_at4 = _mm_add_ps(v_zt4,_mm_mul_ps(v_zt6,v_zt6)); v_at4 = _mm_mul_ps(v_anorm,v_at4); /* convert to double precision before accumulating */ v_d = _mm_cvtps_pd(v_at4); v_wp = _mm_add_pd(v_wp,v_d); v_it = _mm_srli_si128((__m128i)v_at4,8); v_d = _mm_cvtps_pd((__m128)v_it); v_wp = _mm_add_pd(v_wp,v_d); /* bxy[4*j+k1] = zero; */ /* bxy[1+4*j+k1] = zero; */ /* bxy[2+4*j+k1] = zero; */ _mm_store_ps((float *)&bxy[4*j+k1],v_zero); _mm_store_ps((float *)&bxy[2+4*j+k1],v_zero); /* exy[4*j+k1] = zero; */ /* exy[1+4*j+k1] = zero; */ /* exy[2+4*j+k1] = zero; */ _mm_store_ps((float *)&exy[4*j+k1],v_zero); _mm_store_ps((float *)&exy[2+4*j+k1],v_zero); } /* bxy[0] = zero; */ /* bxy[1] = zero; */ /* bxy[2] = zero; */ _mm_store_ps((float *)&bxy[0],v_zero); _mm_store_ps((float *)&bxy[2],v_zero); /* exy[0] = zero; */ /* exy[1] = zero; */ /* exy[2] = zero; */ _mm_store_ps((float *)&exy[0],v_zero); _mm_store_ps((float *)&exy[2],v_zero); /* bxy[k1] = zero; */ /* bxy[1+k1] = zero; */ /* bxy[2+k1] = zero; */ _mm_store_ps((float *)&bxy[k1],v_zero); _mm_store_ps((float *)&bxy[2+k1],v_zero); /* exy[k1] = zero; */ /* exy[1+k1] = zero; */ /* exy[2+k1] = zero; */ _mm_store_ps((float *)&exy[k1],v_zero); _mm_store_ps((float *)&exy[2+k1],v_zero); /* *wf = sum1*(float) (nx*ny); */ _mm_store_pd(&dd[0],v_ws); for (j = 1; j < 2; j++) { dd[0] += dd[j]; } *wf = (sum1 + dd[0])*(float) (nx*ny); /* *wm = sum2*c2*(float) (nx*ny); */ _mm_store_pd(&dd[0],v_wp); for (j = 1; j < 2; j++) { dd[0] += dd[j]; } *wm = (sum2 + dd[0])*c2*(float) (nx*ny); return; } /*--------------------------------------------------------------------*/ void csse2memfield2(float complex fxy[], float complex exy[], float complex ffc[], int isign, int nx, int ny, int nxvh, int nyv, int nxhd, int nyhd) { /* this subroutine either adds complex vector fields if isign > 0 or copies complex vector fields if isign < 0 includes additional smoothing requires SSE2, fxy, exy, ffc need to be 16 byte aligned nxhd, nxvh need to be a multiple of 2 fxy, exy, need to have 4 components local data */ int j, k, nxh, nyh, k1, kk, kj; __m128 v_at1, v_zero, v_zt1, v_zt2; nxh = nx/2; nyh = 1 > ny/2 ? 1 : ny/2; v_zero = _mm_set1_ps(0.0f); /* add the fields */ if (isign > 0) { #pragma omp parallel for private(j,k,k1,kk,kj,v_at1,v_zt1,v_zt2) for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = 4*nxvh*k; k1 = 4*nxvh*ny - kj; for (j = 0; j < nxh; j++) { /* at1 = cimagf(ffc[j+kk]); */ v_at1 = _mm_loadl_pi(v_zero,(__m64 *)&ffc[j+kk]); v_at1 = _mm_movelh_ps(v_at1,v_at1); v_at1 = _mm_shuffle_ps(v_at1,v_at1,245); /* fxy[4*j+kj] += exy[4*j+kj]*at1; */ /* fxy[1+4*j+kj] += exy[1+4*j+kj]*at1; */ /* fxy[2+4*j+kj] += exy[2+4*j+kj]*at1; */ v_zt1 = _mm_load_ps((float *)&exy[4*j+kj]); v_zt2 = _mm_load_ps((float *)&fxy[4*j+kj]); v_zt2 = _mm_add_ps(v_zt2,_mm_mul_ps(v_zt1,v_at1)); _mm_store_ps((float *)&fxy[4*j+kj],v_zt2); v_zt1 = _mm_load_ps((float *)&exy[2+4*j+kj]); v_zt2 = _mm_load_ps((float *)&fxy[2+4*j+kj]); v_zt2 = _mm_add_ps(v_zt2,_mm_mul_ps(v_zt1,v_at1)); _mm_store_ps((float *)&fxy[2+4*j+kj],v_zt2); /* fxy[4*j+k1] += exy[4*j+k1]*at1; */ /* fxy[1+4*j+k1] += exy[1+4*j+k1]*at1; */ /* fxy[2+4*j+k1] += exy[2+4*j+k1]*at1; */ v_zt1 = _mm_load_ps((float *)&exy[4*j+k1]); v_zt2 = _mm_load_ps((float *)&fxy[4*j+k1]); v_zt2 = _mm_add_ps(v_zt2,_mm_mul_ps(v_zt1,v_at1)); _mm_store_ps((float *)&fxy[4*j+k1],v_zt2); v_zt1 = _mm_load_ps((float *)&exy[2+4*j+k1]); v_zt2 = _mm_load_ps((float *)&fxy[2+4*j+k1]); v_zt2 = _mm_add_ps(v_zt2,_mm_mul_ps(v_zt1,v_at1)); _mm_store_ps((float *)&fxy[2+4*j+k1],v_zt2); } } k1 = 4*nxvh*nyh; for (j = 0; j < nxh; j++) { /* at1 = cimagf(ffc[j]); */ v_at1 = _mm_loadl_pi(v_zero,(__m64 *)&ffc[j]); v_at1 = _mm_movelh_ps(v_at1,v_at1); v_at1 = _mm_shuffle_ps(v_at1,v_at1,245); /* fxy[4*j] += exy[4*j]*at1; */ /* fxy[1+4*j] += exy[1+4*j]*at1; */ /* fxy[2+4*j] += exy[2+4*j]*at1; */ v_zt1 = _mm_load_ps((float *)&exy[4*j]); v_zt2 = _mm_load_ps((float *)&fxy[4*j]); v_zt2 = _mm_add_ps(v_zt2,_mm_mul_ps(v_zt1,v_at1)); _mm_store_ps((float *)&fxy[4*j],v_zt2); v_zt1 = _mm_load_ps((float *)&exy[2+4*j]); v_zt2 = _mm_load_ps((float *)&fxy[2+4*j]); v_zt2 = _mm_add_ps(v_zt2,_mm_mul_ps(v_zt1,v_at1)); _mm_store_ps((float *)&fxy[2+4*j],v_zt2); /* fxy[4*j+k1] += exy[4*j+k1]*at1; */ /* fxy[1+4*j+k1] += exy[1+4*j+k1]*at1; */ /* fxy[2+4*j+k1] += exy[2+4*j+k1]*at1; */ v_zt1 = _mm_load_ps((float *)&exy[4*j+k1]); v_zt2 = _mm_load_ps((float *)&fxy[4*j+k1]); v_zt2 = _mm_add_ps(v_zt2,_mm_mul_ps(v_zt1,v_at1)); _mm_store_ps((float *)&fxy[4*j+k1],v_zt2); v_zt1 = _mm_load_ps((float *)&exy[2+4*j+k1]); v_zt2 = _mm_load_ps((float *)&fxy[2+4*j+k1]); v_zt2 = _mm_add_ps(v_zt2,_mm_mul_ps(v_zt1,v_at1)); _mm_store_ps((float *)&fxy[2+4*j+k1],v_zt2); } } /* copy the fields */ else if (isign < 0) { #pragma omp parallel for private(j,k,k1,kk,kj,v_at1,v_zt1,v_zt2) for (k = 1; k < nyh; k++) { kk = nxhd*k; kj = 4*nxvh*k; k1 = 4*nxvh*ny - kj; for (j = 0; j < nxh; j++) { /* at1 = cimagf(ffc[j+kk]); */ v_at1 = _mm_loadl_pi(v_zero,(__m64 *)&ffc[j+kk]); v_at1 = _mm_movelh_ps(v_at1,v_at1); v_at1 = _mm_shuffle_ps(v_at1,v_at1,245); /* fxy[4*j+kj] = exy[4*j+kj]*at1; */ /* fxy[1+4*j+kj] = exy[1+4*j+kj]*at1; */ /* fxy[2+4*j+kj] = exy[2+4*j+kj]*at1; */ v_zt1 = _mm_load_ps((float *)&exy[4*j+kj]); v_zt2 = _mm_mul_ps(v_zt1,v_at1); _mm_store_ps((float *)&fxy[4*j+kj],v_zt2); v_zt1 = _mm_load_ps((float *)&exy[2+4*j+kj]); v_zt2 = _mm_mul_ps(v_zt1,v_at1); _mm_store_ps((float *)&fxy[2+4*j+kj],v_zt2); /* fxy[4*j+k1] = exy[4*j+k1]*at1; */ /* fxy[1+4*j+k1] = exy[1+4*j+k1]*at1; */ /* fxy[2+4*j+k1] = exy[2+4*j+k1]*at1; */ v_zt1 = _mm_load_ps((float *)&exy[4*j+k1]); v_zt2 = _mm_mul_ps(v_zt1,v_at1); _mm_store_ps((float *)&fxy[4*j+k1],v_zt2); v_zt1 = _mm_load_ps((float *)&exy[2+4*j+k1]); v_zt2 = _mm_mul_ps(v_zt1,v_at1); _mm_store_ps((float *)&fxy[2+4*j+k1],v_zt2); } } k1 = 4*nxvh*nyh; for (j = 0; j < nxh; j++) { /* at1 = cimagf(ffc[j]); */ v_at1 = _mm_loadl_pi(v_zero,(__m64 *)&ffc[j]); v_at1 = _mm_movelh_ps(v_at1,v_at1); v_at1 = _mm_shuffle_ps(v_at1,v_at1,245); /* fxy[4*j] = exy[4*j]*at1; */ /* fxy[1+4*j] = exy[1+4*j]*at1; */ /* fxy[2+4*j] = exy[2+4*j]*at1; */ v_zt1 = _mm_load_ps((float *)&exy[4*j]); v_zt2 = _mm_mul_ps(v_zt1,v_at1); _mm_store_ps((float *)&fxy[4*j],v_zt2); v_zt1 = _mm_load_ps((float *)&exy[2+4*j]); v_zt2 = _mm_mul_ps(v_zt1,v_at1); _mm_store_ps((float *)&fxy[2+4*j],v_zt2); /* fxy[4*j+k1] = exy[4*j+k1]*at1; */ /* fxy[1+4*j+k1] = exy[1+4*j+k1]*at1; */ /* fxy[2+4*j+k1] = exy[2+4*j+k1]*at1; */ v_zt1 = _mm_load_ps((float *)&exy[4*j+k1]); v_zt2 = _mm_mul_ps(v_zt1,v_at1); _mm_store_ps((float *)&fxy[4*j+k1],v_zt2); v_zt1 = _mm_load_ps((float *)&exy[2+4*j+k1]); v_zt2 = _mm_mul_ps(v_zt1,v_at1); _mm_store_ps((float *)&fxy[2+4*j+k1],v_zt2); } } return; } /*--------------------------------------------------------------------*/ void csse2fft2rmxx(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nyi, int nyp, int nxhd, int nyd, int nxhyd, int nxyhd) { /* this subroutine performs the x part of a two dimensional real to complex fast fourier transform and its inverse, for a subset of y, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, an inverse fourier transform in x is performed f[m][n] = (1/nx*ny)*sum(f[k][j]*exp(-sqrt(-1)*2pi*n*j/nx)) if isign = 1, a forward fourier transform in x is performed f[k][j] = sum(f[m][n]*exp(sqrt(-1)*2pi*n*j/nx)) mixup = array of bit reversed addresses sct = sine/cosine table nyi = initial y index used nyp = number of y indices used nxhd = first dimension of f >= nx/2 nyd = second dimension of f >= ny nxhyd = maximum of (nx/2,ny) nxyhd = maximum of (nx,ny)/2 fourier coefficients are stored as follows: f[k][j] = real, imaginary part of mode j,k, where 0 <= j < nx/2 and 0 <= k < ny, except for f[k][1] = real, imaginary part of mode nx/2,k, where ny/2+1 <= k < ny, and imag(f[0][0]) = real part of mode nx/2,0 and imag(f[0][ny/2]) = real part of mode nx/2,ny/2 written by viktor k. decyk, ucla requires SSE2, f needs to be 16 byte aligned nxhd need to be a multiple of 2 local data */ int indx1, indx1y, nx, nxh, nxhh, ny, nxy, nxhy, nyt; int nrx, i, j, k, l, j1, k1, k2, ns, ns2, km, kmr, nrxb, joff; int nss, nxhhs, it; float ani; float complex t1, t2, t3; __m128 v_m, v_n, v_t1, v_t2, v_t3, v_t4, v_ani; if (isign==0) return; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; nxh = nx/2; nxhh = nx/4; ny = 1L<<indy; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; nyt = nyi + nyp - 1; nxhhs = 2*(nxhh/2); v_m = _mm_set_ps(1.0f,-1.0f,1.0f,-1.0f); v_n = _mm_set_ps(-1.0f,1.0f,-1.0f,1.0f); v_t1 = _mm_setzero_ps(); v_t2 = _mm_setzero_ps(); v_t3 = _mm_setzero_ps(); if (isign > 0) goto L70; /* inverse fourier transform */ nrxb = nxhy/nxh; nrx = nxy/nxh; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,nss,km,kmr,k1,k2,j1,joff,it,ani,t1,t2,t3,v_t1, \ v_t2,v_t3,v_t4) for (i = nyi-1; i < nyt; i++) { joff = nxhd*i; /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { /* t1 = f[j1+joff]; */ v_t1 = _mm_loadl_pi(v_t1,(__m64 *)&f[j1+joff]); /* f[j1+joff] = f[j+joff]; */ v_t2 = _mm_loadl_pi(v_t2,(__m64 *)&f[j+joff]); _mm_storel_pi((__m64 *)&f[j1+joff],v_t2); /* f[j+joff] = t1; */ _mm_storel_pi((__m64 *)&f[j+joff],v_t1); } } /* then transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; nss = 2*(ns/2); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nss; j+=2) { /* t1 = sct[kmr*j]; */ v_t1 = _mm_loadl_pi(v_t1,(__m64 *)&sct[kmr*j]); v_t1 = _mm_loadh_pi(v_t1,(__m64 *)&sct[kmr*j+kmr]); /* t2 = t1*f[j+k2+joff]; */ v_t2 = _mm_load_ps((float *)&f[j+k2+joff]); v_t3 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t1,v_t1,160)); v_t2 = _mm_shuffle_ps(v_t2,v_t2,177); v_t2 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t1,v_t1,245)); v_t2 = _mm_add_ps(v_t3,_mm_mul_ps(v_t2,v_m)); /* f[j+k2+joff] = f[j+k1+joff] - t2; */ v_t3 = _mm_load_ps((float *)&f[j+k1+joff]); _mm_store_ps((float *)&f[j+k2+joff],_mm_sub_ps(v_t3,v_t2)); /* f[j+k1+joff] += t2; */ _mm_store_ps((float *)&f[j+k1+joff],_mm_add_ps(v_t3,v_t2)); } /* loop over remaining elements */ for (j = nss; j < ns; j++) { t1 = sct[kmr*j]; t2 = t1*f[j+k2+joff]; f[j+k2+joff] = f[j+k1+joff] - t2; f[j+k1+joff] += t2; } } ns = ns2; } /* unscramble coefficients and normalize */ kmr = nxy/nx; ani = 0.5/(((float) nx)*((float) ny)); v_ani = _mm_set1_ps(ani); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhhs; j+=2) { /* t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; */ v_t3 = _mm_loadl_pi(v_t3,(__m64 *)&sct[kmr*j]); v_t3 = _mm_loadh_pi(v_t3,(__m64 *)&sct[kmr*j+kmr]); v_t3 = _mm_mul_ps(v_t3,v_m); v_t3 = _mm_shuffle_ps(v_t3,v_t3,177); /* t2 = conjf(f[nxh-j+joff]); */ if (j==0) { v_t2 = _mm_setzero_ps(); } else { v_t2 = _mm_loadl_pi(v_t2,(__m64 *)&f[nxh-j+joff]); } v_t2 = _mm_loadh_pi(v_t2,(__m64 *)&f[nxh-j-1+joff]); v_t2 = _mm_mul_ps(v_t2,v_n); /* t1 = f[j+joff] + t2; */ v_t4 = _mm_load_ps((float *)&f[j+joff]); v_t1 = _mm_add_ps(v_t4,v_t2); /* t2 = (f[j+joff] - t2)*t3; */ v_t2 = _mm_sub_ps(v_t4,v_t2); v_t4 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t3,v_t3,160)); v_t2 = _mm_shuffle_ps(v_t2,v_t2,177); v_t2 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t3,v_t3,245)); v_t2 = _mm_add_ps(v_t4,_mm_mul_ps(v_t2,v_m)); /* f[j+joff] = ani*(t1 + t2); */ /* f[nxh-j+joff] = ani*conjf(t1 - t2); */ v_t3 = _mm_mul_ps(v_ani,_mm_add_ps(v_t1,v_t2)); v_t4 = _mm_mul_ps(v_ani,_mm_mul_ps(_mm_sub_ps(v_t1,v_t2),v_n)); if (j==0) { _mm_storeh_pi((__m64 *)&f[joff+1],v_t3); _mm_storeh_pi((__m64 *)&f[nxh-1+joff],v_t4); } else { _mm_store_ps((float *)&f[j+joff],v_t3); _mm_storel_pi((__m64 *)&f[nxh-j+joff],v_t4); _mm_storeh_pi((__m64 *)&f[nxh-j-1+joff],v_t4); } } /* loop over remaining elements */ it = 1 > nxhhs ? 1 : nxhhs; for (j = it; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; t2 = conjf(f[nxh-j+joff]); t1 = f[j+joff] + t2; t2 = (f[j+joff] - t2)*t3; f[j+joff] = ani*(t1 + t2); f[nxh-j+joff] = ani*conjf(t1 - t2); } ani = 2.0*ani; f[nxhh+joff] = ani*conjf(f[nxhh+joff]); f[joff] = ani*((crealf(f[joff]) + cimagf(f[joff])) + (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I); } return; /* forward fourier transform */ L70: nrxb = nxhy/nxh; nrx = nxy/nxh; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,nss,km,kmr,k1,k2,j1,joff,it,t1,t2,t3,v_t1,v_t2, \ v_t3,v_t4) for (i = nyi-1; i < nyt; i++) { joff = nxhd*i; /* scramble coefficients */ kmr = nxy/nx; /* vector loop over elements in blocks of 2 */ for (j = 0; j < nxhhs; j+=2) { /* t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; */ v_t3 = _mm_loadl_pi(v_t3,(__m64 *)&sct[kmr*j]); v_t3 = _mm_loadh_pi(v_t3,(__m64 *)&sct[kmr*j+kmr]); v_t3 = _mm_shuffle_ps(v_t3,v_t3,177); /* t2 = conjf(f[nxh-j+joff]); */ if (j==0) { v_t2 = _mm_setzero_ps(); } else { v_t2 = _mm_loadl_pi(v_t2,(__m64 *)&f[nxh-j+joff]); } v_t2 = _mm_loadh_pi(v_t2,(__m64 *)&f[nxh-j-1+joff]); v_t2 = _mm_mul_ps(v_t2,v_n); /* t1 = f[j+joff] + t2; */ v_t4 = _mm_load_ps((float *)&f[j+joff]); v_t1 = _mm_add_ps(v_t4,v_t2); /* t2 = (f[j+joff] - t2)*t3; */ v_t2 = _mm_sub_ps(v_t4,v_t2); v_t4 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t3,v_t3,160)); v_t2 = _mm_shuffle_ps(v_t2,v_t2,177); v_t2 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t3,v_t3,245)); v_t2 = _mm_add_ps(v_t4,_mm_mul_ps(v_t2,v_m)); /* f[j+joff] = t1 + t2; */ /* f[nxh-j+joff] = conjf(t1 - t2); */ v_t3 = _mm_add_ps(v_t1,v_t2); v_t4 = _mm_mul_ps(_mm_sub_ps(v_t1,v_t2),v_n); if (j==0) { _mm_storeh_pi((__m64 *)&f[joff+1],v_t3); _mm_storeh_pi((__m64 *)&f[nxh-1+joff],v_t4); } else { _mm_store_ps((float *)&f[j+joff],v_t3); _mm_storel_pi((__m64 *)&f[nxh-j+joff],v_t4); _mm_storeh_pi((__m64 *)&f[nxh-j-1+joff],v_t4); } } /* loop over remaining elements */ it = 1 > nxhhs ? 1 : nxhhs; for (j = it; j < nxhh; j++) { t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; t2 = conjf(f[nxh-j+joff]); t1 = f[j+joff] + t2; t2 = (f[j+joff] - t2)*t3; f[j+joff] = t1 + t2; f[nxh-j+joff] = conjf(t1 - t2); } f[nxhh+joff] = 2.0*conjf(f[nxhh+joff]); f[joff] = (crealf(f[joff]) + cimagf(f[joff])) + (crealf(f[joff]) - cimagf(f[joff]))*_Complex_I; /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { /* t1 = f[j1+joff]; */ v_t1 = _mm_loadl_pi(v_t1,(__m64 *)&f[j1+joff]); /* f[j1+joff] = f[j+joff]; */ v_t2 = _mm_loadl_pi(v_t2,(__m64 *)&f[j+joff]); _mm_storel_pi((__m64 *)&f[j1+joff],v_t2); /* f[j+joff] = t1; */ _mm_storel_pi((__m64 *)&f[j+joff],v_t1); } } /* then transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; nss = 2*(ns/2); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nss; j+=2) { /* t1 = conjf(sct[kmr*j]); */ v_t1 = _mm_loadl_pi(v_t1,(__m64 *)&sct[kmr*j]); v_t1 = _mm_loadh_pi(v_t1,(__m64 *)&sct[kmr*j+kmr]); v_t1 = _mm_mul_ps(v_t1,v_n); /* t2 = t1*f[j+k2+joff]; */ v_t2 = _mm_load_ps((float *)&f[j+k2+joff]); v_t3 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t1,v_t1,160)); v_t2 = _mm_shuffle_ps(v_t2,v_t2,177); v_t2 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t1,v_t1,245)); v_t2 = _mm_add_ps(v_t3,_mm_mul_ps(v_t2,v_m)); /* f[j+k2+joff] = f[j+k1+joff] - t2; */ v_t3 = _mm_load_ps((float *)&f[j+k1+joff]); _mm_store_ps((float *)&f[j+k2+joff],_mm_sub_ps(v_t3,v_t2)); /* f[j+k1+joff] += t2; */ _mm_store_ps((float *)&f[j+k1+joff],_mm_add_ps(v_t3,v_t2)); } /* loop over remaining elements */ for (j = nss; j < ns; j++) { t1 = conjf(sct[kmr*j]); t2 = t1*f[j+k2+joff]; f[j+k2+joff] = f[j+k1+joff] - t2; f[j+k1+joff] += t2; } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void csse2fft2rmxy(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nxi, int nxp, int nxhd, int nyd, int nxhyd, int nxyhd) { /* this subroutine performs the y part of a two dimensional real to complex fast fourier transform and its inverse, for a subset of x, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, an inverse fourier transform in y is performed f[m][n] = sum(f[k][j]*exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, a forward fourier transform in y is performed f[k][j] = sum(f[m][n]*exp(sqrt(-1)*2pi*m*k/ny)) mixup = array of bit reversed addresses sct = sine/cosine table nxi = initial x index used nxp = number of x indices used nxhd = first dimension of f >= nx/2 nyd = second dimension of f >= ny nxhyd = maximum of (nx/2,ny) nxyhd = maximum of (nx,ny)/2 fourier coefficients are stored as follows: f[k][j] = real, imaginary part of mode j,k, where 0 <= j < nx/2 and 0 <= k < ny, except for f[k][1] = real, imaginary part of mode nx/2,k, where ny/2+1 <= k < ny, and imag(f[0][0]) = real part of mode nx/2,0 and imag(f[0][ny/2]) = real part of mode nx/2,ny/2 written by viktor k. decyk, ucla requires SSE2, f needs to be 16 byte aligned nxhd needs to be a multiple of 2, and nxi needs to be odd local data */ int indx1, indx1y, nx, ny, nyh, nxy, nxhy, nxt; int nry, i, j, k, l, j1, j2, k1, k2, ns, ns2, km, kmr, nryb, koff; int nss; float complex t1, t2; __m128 v_m, v_n, v_t1, v_t2, v_t3; if (isign==0) return; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; ny = 1L<<indy; nyh = ny/2; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; nxt = nxi + nxp - 1; v_m = _mm_set_ps(1.0f,-1.0f,1.0f,-1.0f); v_n = _mm_set_ps(-1.0f,1.0f,-1.0f,1.0f); if (isign > 0) goto L70; /* inverse fourier transform */ nryb = nxhy/ny; nry = nxy/ny; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,nss,km,kmr,k1,k2,j1,j2,koff,t1,t2,v_t1,v_t2,v_t3) for (i = nxi-1; i < nxt; i++) { /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { koff = nxhd*k; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd*k1; /* t1 = f[i+k1]; */ v_t1 = _mm_loadl_pi(v_t1,(__m64 *)&f[i+k1]); /* f[i+k1] = f[i+koff]; */ v_t2 = _mm_loadl_pi(v_t2,(__m64 *)&f[i+koff]); _mm_storel_pi((__m64 *)&f[i+k1],v_t2); /* f[i+koff] = t1; */ _mm_storel_pi((__m64 *)&f[i+koff],v_t1); } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; nss = 2*(ns/2); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nss; j+=2) { j1 = nxhd*(j + k1); j2 = nxhd*(j + k2); /* t1 = sct[kmr*j]; */ v_t1 = _mm_loadl_pi(v_t1,(__m64 *)&sct[kmr*j]); v_t1 = _mm_loadh_pi(v_t1,(__m64 *)&sct[kmr*j+kmr]); /* t2 = t1*f[i+j2]; */ v_t2 = _mm_loadl_pi(v_t2,(__m64 *)&f[i+j2]); v_t2 = _mm_loadh_pi(v_t2,(__m64 *)&f[i+j2+nxhd]); v_t3 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t1,v_t1,160)); v_t2 = _mm_shuffle_ps(v_t2,v_t2,177); v_t2 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t1,v_t1,245)); v_t2 = _mm_add_ps(v_t3,_mm_mul_ps(v_t2,v_m)); /* f[i+j2] = f[i+j1] - t2; */ v_t3 = _mm_loadl_pi(v_t3,(__m64 *)&f[i+j1]); v_t3 = _mm_loadh_pi(v_t3,(__m64 *)&f[i+j1+nxhd]); v_t1 = _mm_sub_ps(v_t3,v_t2); _mm_storel_pi((__m64 *)&f[i+j2],v_t1); _mm_storeh_pi((__m64 *)&f[i+j2+nxhd],v_t1); /* f[i+j1] += t2; */ v_t1 = _mm_add_ps(v_t3,v_t2); _mm_storel_pi((__m64 *)&f[i+j1],v_t1); _mm_storeh_pi((__m64 *)&f[i+j1+nxhd],v_t1); } /* loop over remaining elements */ for (j = nss; j < ns; j++) { j1 = nxhd*(j + k1); j2 = nxhd*(j + k2); t1 = sct[kmr*j]; t2 = t1*f[i+j2]; f[i+j2] = f[i+j1] - t2; f[i+j1] += t2; } } ns = ns2; } } /* unscramble modes kx = 0, nx/2 */ if (nxi==1) { for (k = 1; k < nyh; k++) { koff = nxhd*k; k1 = nxhd*ny - koff; t1 = f[k1]; f[k1] = 0.5*(cimagf(f[koff] + t1) + crealf(f[koff] - t1)*_Complex_I); f[koff] = 0.5*(crealf(f[koff] + t1) + cimagf(f[koff] - t1)*_Complex_I); } } return; /* forward fourier transform */ L70: nryb = nxhy/ny; nry = nxy/ny; /* scramble modes kx = 0, nx/2 */ if (nxi==1) { for (k = 1; k < nyh; k++) { koff = nxhd*k; k1 = nxhd*ny - koff; t1 = cimagf(f[k1]) + crealf(f[k1])*_Complex_I; f[k1] = conjf(f[koff] - t1); f[koff] += t1; } } #pragma omp parallel for \ private(i,j,k,l,ns,ns2,nss,km,kmr,k1,k2,j1,j2,koff,t1,t2,v_t1,v_t2,v_t3) for (i = nxi-1; i < nxt; i++) { /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { koff = nxhd*k; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = nxhd*k1; /* t1 = f[i+k1]; */ v_t1 = _mm_loadl_pi(v_t1,(__m64 *)&f[i+k1]); /* f[i+k1] = f[i+koff]; */ v_t2 = _mm_loadl_pi(v_t2,(__m64 *)&f[i+koff]); _mm_storel_pi((__m64 *)&f[i+k1],v_t2); /* f[i+koff] = t1; */ _mm_storel_pi((__m64 *)&f[i+koff],v_t1); } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; nss = 2*(ns/2); /* vector loop over elements in blocks of 2 */ for (j = 0; j < nss; j+=2) { j1 = nxhd*(j + k1); j2 = nxhd*(j + k2); /* t1 = conjf(sct[kmr*j]); */ v_t1 = _mm_loadl_pi(v_t1,(__m64 *)&sct[kmr*j]); v_t1 = _mm_loadh_pi(v_t1,(__m64 *)&sct[kmr*j+kmr]); v_t1 = _mm_mul_ps(v_t1,v_n); /* t2 = t1*f[i+j2]; */ v_t2 = _mm_loadl_pi(v_t2,(__m64 *)&f[i+j2]); v_t2 = _mm_loadh_pi(v_t2,(__m64 *)&f[i+j2+nxhd]); v_t3 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t1,v_t1,160)); v_t2 = _mm_shuffle_ps(v_t2,v_t2,177); v_t2 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t1,v_t1,245)); v_t2 = _mm_add_ps(v_t3,_mm_mul_ps(v_t2,v_m)); /* f[i+j2] = f[i+j1] - t2; */ v_t3 = _mm_loadl_pi(v_t3,(__m64 *)&f[i+j1]); v_t3 = _mm_loadh_pi(v_t3,(__m64 *)&f[i+j1+nxhd]); v_t1 = _mm_sub_ps(v_t3,v_t2); _mm_storel_pi((__m64 *)&f[i+j2],v_t1); _mm_storeh_pi((__m64 *)&f[i+j2+nxhd],v_t1); /* f[i+j1] += t2; */ v_t1 = _mm_add_ps(v_t3,v_t2); _mm_storel_pi((__m64 *)&f[i+j1],v_t1); _mm_storeh_pi((__m64 *)&f[i+j1+nxhd],v_t1); } /* loop over remaining elements */ for (j = nss; j < ns; j++) { j1 = nxhd*(j + k1); j2 = nxhd*(j + k2); t1 = conjf(sct[kmr*j]); t2 = t1*f[i+j2]; f[i+j2] = f[i+j1] - t2; f[i+j1] += t2; } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void csse2fft2rm3x(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nyi, int nyp, int nxhd, int nyd, int nxhyd, int nxyhd) { /* this subroutine performs the x part of 3 two dimensional real to complex fast fourier transforms, and their inverses, for a subset of y, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, two inverse fourier transforms are performed f[m][n][0:2] = (1/nx*ny)*sum(f[k][j][0:2]* exp(-sqrt(-1)*2pi*n*j/nx)*exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, two forward fourier transforms are performed f[k][j][0:2] = sum(f[m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)* exp(sqrt(-1)*2pi*m*k/ny)) mixup = array of bit reversed addresses sct = sine/cosine table nyi = initial y index used nyp = number of y indices used nxhd = second dimension of f >= nx/2 nyd = third dimension of f >= ny nxhyd = maximum of (nx/2,ny) nxyhd = maximum of (nx,ny)/2 fourier coefficients are stored as follows: f[k][j][0:2] = real, imaginary part of mode j,k, where 0 <= j < nx/2 and 0 <= k < ny, except for f[k][1][0:2] = real, imaginary part of mode nx/2,k, where ny/2+1 <= k < ny, and imag(f[0][0][0:2]) = real part of mode nx/2,0 and imag(f[0][ny/2][0:2]) = real part of mode nx/2,ny/2 written by viktor k. decyk, ucla requires SSE2, f needs to be 16 byte aligned f needs to have 4 components local data */ int indx1, indx1y, nx, nxh, nxhh, ny, nxy, nxhy, nyt; int nrx, i, j, k, l, jj, j1, k1, k2, ns, ns2, km, kmr, joff; int nrxb; float ani; /* float complex t1, t2, t3, t4; */ __m128 v_m, v_n, v_t1, v_t2, v_t3, v_t4, v_t5, v_ani; if (isign==0) return; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; nxh = nx/2; nxhh = nx/4; ny = 1L<<indy; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; nyt = nyi + nyp - 1; v_m = _mm_set_ps(1.0f,-1.0f,1.0f,-1.0f); v_n = _mm_set_ps(-1.0f,1.0f,-1.0f,1.0f); v_t1 = _mm_setzero_ps(); v_t3 = _mm_setzero_ps(); if (isign > 0) goto L100; /* inverse fourier transform */ nrxb = nxhy/nxh; nrx = nxy/nxh; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,joff,ani,v_t1,v_t2,v_t3, \ v_t4,v_t5,v_ani) for (i = nyi-1; i < nyt; i++) { joff = 4*nxhd*i; /* swap complex components */ for (j = 0; j < nxh; j++) { v_t1 = _mm_load_ps((float *)&f[4*j+joff]); v_t2 = _mm_load_ps((float *)&f[2+4*j+joff]); v_t3 = _mm_movelh_ps(v_t1,v_t2); v_t3 = _mm_shuffle_ps(v_t3,v_t3,216); _mm_store_ps((float *)&f[4*j+joff],v_t3); v_t3 = _mm_movehl_ps(v_t2,v_t1); v_t3 = _mm_shuffle_ps(v_t3,v_t3,216); _mm_store_ps((float *)&f[2+4*j+joff],v_t3); } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { /* t1 = f[4*j1+joff]; */ /* t2 = f[1+4*j1+joff]; */ /* t3 = f[2+4*j1+joff]; */ v_t1 = _mm_load_ps((float *)&f[4*j1+joff]); v_t3 = _mm_load_ps((float *)&f[2+4*j1+joff]); /* f[4*j1+joff] = f[4*j+joff]; */ /* f[1+4*j1+joff] = f[1+4*j+joff]; */ /* f[2+4*j1+joff] = f[2+4*j+joff]; */ v_t2 = _mm_load_ps((float *)&f[4*j+joff]); _mm_store_ps((float *)&f[4*j1+joff],v_t2); v_t2 = _mm_load_ps((float *)&f[2+4*j+joff]); _mm_store_ps((float *)&f[2+4*j1+joff],v_t2); /* f[4*j+joff] = t1; */ /* f[1+4*j+joff] = t2; */ /* f[2+4*j+joff] = t3; */ _mm_store_ps((float *)&f[4*j+joff],v_t1); _mm_store_ps((float *)&f[2+4*j+joff],v_t3); } } /* then transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = 4*ns2*k; k2 = k1 + 4*ns; for (j = 0; j < ns; j++) { /* t1 = sct[kmr*j]; */ v_t1 = _mm_loadl_pi(v_t1,(__m64 *)&sct[kmr*j]); v_t1 = _mm_movelh_ps(v_t1,v_t1); /* t2 = t1*f[4*j+k2+joff]; */ /* t3 = t1*f[1+4*j+k2+joff]; */ v_t2 = _mm_load_ps((float *)&f[4*j+k2+joff]); v_t3 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t1,v_t1,160)); v_t2 = _mm_shuffle_ps(v_t2,v_t2,177); v_t2 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t1,v_t1,245)); v_t2 = _mm_add_ps(v_t3,_mm_mul_ps(v_t2,v_m)); /* t4 = t1*f[2+4*j+k2+joff]; */ v_t4 = _mm_load_ps((float *)&f[2+4*j+k2+joff]); v_t3 = _mm_mul_ps(v_t4,_mm_shuffle_ps(v_t1,v_t1,160)); v_t4 = _mm_shuffle_ps(v_t4,v_t4,177); v_t4 = _mm_mul_ps(v_t4,_mm_shuffle_ps(v_t1,v_t1,245)); v_t4 = _mm_add_ps(v_t3,_mm_mul_ps(v_t4,v_m)); /* f[4*j+k2+joff] = f[4*j+k1+joff] - t2; */ /* f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; */ v_t3 = _mm_load_ps((float *)&f[4*j+k1+joff]); v_t5 = _mm_sub_ps(v_t3,v_t2); _mm_store_ps((float *)&f[4*j+k2+joff],v_t5); /* f[4*j+k1+joff] += t2; */ /* f[1+4*j+k1+joff] += t3; */ v_t2 = _mm_add_ps(v_t3,v_t2); _mm_store_ps((float *)&f[4*j+k1+joff],v_t2); /* f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; */ v_t3 = _mm_load_ps((float *)&f[2+4*j+k1+joff]); v_t5 = _mm_sub_ps(v_t3,v_t4); _mm_store_ps((float *)&f[2+4*j+k2+joff],v_t5); /* f[2+4*j+k1+joff] += t4; */ v_t4 = _mm_add_ps(v_t3,v_t4); _mm_store_ps((float *)&f[2+4*j+k1+joff],v_t4); } } ns = ns2; } /* unscramble coefficients and normalize */ kmr = nxy/nx; ani = 0.5/(((float) nx)*((float) ny)); v_ani = _mm_set1_ps(ani); for (j = 1; j < nxhh; j++) { /* t3 = cimagf(sct[kmr*j]) - crealf(sct[kmr*j])*_Complex_I; */ v_t3 = _mm_loadl_pi(v_t3,(__m64 *)&sct[kmr*j]); v_t3 = _mm_movelh_ps(v_t3,v_t3); v_t3 = _mm_mul_ps(v_t3,v_m); v_t3 = _mm_shuffle_ps(v_t3,v_t3,177); /* t2 = conjf(f[jj+4*(nxh-j)+joff]); */ v_t2 = _mm_load_ps((float *)&f[4*(nxh-j)+joff]); v_t2 = _mm_mul_ps(v_t2,v_n); v_t4 = _mm_load_ps((float *)&f[2+4*(nxh-j)+joff]); v_t4 = _mm_mul_ps(v_t4,v_n); /* first block, jj=1:2 */ /* t1 = f[jj+4*j+joff] + t2; */ v_t5 = _mm_load_ps((float *)&f[4*j+joff]); v_t1 = _mm_add_ps(v_t5,v_t2); /* t2 = (f[jj+4*j+joff] - t2)*t3; */ v_t2 = _mm_sub_ps(v_t5,v_t2); v_t5 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t3,v_t3,160)); v_t2 = _mm_shuffle_ps(v_t2,v_t2,177); v_t2 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t3,v_t3,245)); v_t2 = _mm_add_ps(v_t5,_mm_mul_ps(v_t2,v_m)); /* f[jj+4*j+joff] = ani*(t1 + t2); */ v_t5 = _mm_mul_ps(v_ani,_mm_add_ps(v_t1,v_t2)); _mm_store_ps((float *)&f[4*j+joff],v_t5); /* f[jj+4*(nxh-j)+joff] = ani*conjf(t1 - t2); */ v_t5 = _mm_mul_ps(v_ani,_mm_mul_ps(_mm_sub_ps(v_t1,v_t2),v_n)); _mm_store_ps((float *)&f[4*(nxh-j)+joff],v_t5); /* second block, jj=3:4 */ /* t1 = f[jj+4*j+joff] + t2; */ v_t5 = _mm_load_ps((float *)&f[2+4*j+joff]); v_t1 = _mm_add_ps(v_t5,v_t4); /* t2 = (f[jj+4*j+joff] - t2)*t3; */ v_t4 = _mm_sub_ps(v_t5,v_t4); v_t5 = _mm_mul_ps(v_t4,_mm_shuffle_ps(v_t3,v_t3,160)); v_t4 = _mm_shuffle_ps(v_t4,v_t4,177); v_t4 = _mm_mul_ps(v_t4,_mm_shuffle_ps(v_t3,v_t3,245)); v_t4 = _mm_add_ps(v_t5,_mm_mul_ps(v_t4,v_m)); /* f[jj+4*j+joff] = ani*(t1 + t2); */ v_t5 = _mm_mul_ps(v_ani,_mm_add_ps(v_t1,v_t4)); _mm_store_ps((float *)&f[2+4*j+joff],v_t5); /* f[jj+4*(nxh-j)+joff] = ani*conjf(t1 - t2); */ v_t5 = _mm_mul_ps(v_ani,_mm_mul_ps(_mm_sub_ps(v_t1,v_t4),v_n)); _mm_store_ps((float *)&f[2+4*(nxh-j)+joff],v_t5); } ani = 2.0*ani; for (jj = 0; jj < 3; jj++) { f[jj+4*nxhh+joff] = ani*conjf(f[jj+4*nxhh+joff]); f[jj+joff] = ani*((crealf(f[jj+joff]) + cimagf(f[jj+joff])) + (crealf(f[jj+joff]) - cimagf(f[jj+joff]))*_Complex_I); } } return; /* forward fourier transform */ L100: nrxb = nxhy/nxh; nrx = nxy/nxh; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,jj,j1,joff,v_t1,v_t2,v_t3,v_t4, \ v_t5,v_ani) for (i = nyi-1; i < nyt; i++) { joff = 4*nxhd*i; /* scramble coefficients */ kmr = nxy/nx; for (j = 1; j < nxhh; j++) { /* t3 = cimagf(sct[kmr*j]) + crealf(sct[kmr*j])*_Complex_I; */ v_t3 = _mm_loadl_pi(v_t3,(__m64 *)&sct[kmr*j]); v_t3 = _mm_movelh_ps(v_t3,v_t3); v_t3 = _mm_shuffle_ps(v_t3,v_t3,177); /* t2 = conjf(f[jj+4*(nxh-j)+joff]); */ v_t2 = _mm_load_ps((float *)&f[4*(nxh-j)+joff]); v_t2 = _mm_mul_ps(v_t2,v_n); v_t4 = _mm_load_ps((float *)&f[2+4*(nxh-j)+joff]); v_t4 = _mm_mul_ps(v_t4,v_n); /* first block, jj=1:2 */ /* t1 = f[jj+4*j+joff] + t2; */ v_t5 = _mm_load_ps((float *)&f[4*j+joff]); v_t1 = _mm_add_ps(v_t5,v_t2); /* t2 = (f[jj+4*j+joff] - t2)*t3; */ v_t2 = _mm_sub_ps(v_t5,v_t2); v_t5 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t3,v_t3,160)); v_t2 = _mm_shuffle_ps(v_t2,v_t2,177); v_t2 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t3,v_t3,245)); v_t2 = _mm_add_ps(v_t5,_mm_mul_ps(v_t2,v_m)); /* f[jj+4*j+joff] = t1 + t2; */ v_t5 = _mm_add_ps(v_t1,v_t2); _mm_store_ps((float *)&f[4*j+joff],v_t5); /* f[jj+4*(nxh-j)+joff] = conjf(t1 - t2); */ v_t5 = _mm_mul_ps(_mm_sub_ps(v_t1,v_t2),v_n); _mm_store_ps((float *)&f[4*(nxh-j)+joff],v_t5); /* second block, jj=3:4 */ /* t1 = f[jj+4*j+joff] + t2; */ v_t5 = _mm_load_ps((float *)&f[2+4*j+joff]); v_t1 = _mm_add_ps(v_t5,v_t4); /* t2 = (f[jj+4*j+joff] - t2)*t3; */ v_t4 = _mm_sub_ps(v_t5,v_t4); v_t5 = _mm_mul_ps(v_t4,_mm_shuffle_ps(v_t3,v_t3,160)); v_t4 = _mm_shuffle_ps(v_t4,v_t4,177); v_t4 = _mm_mul_ps(v_t4,_mm_shuffle_ps(v_t3,v_t3,245)); v_t4 = _mm_add_ps(v_t5,_mm_mul_ps(v_t4,v_m)); /* f[jj+4*j+joff] = t1 + t2; */ v_t5 = _mm_add_ps(v_t1,v_t4); _mm_store_ps((float *)&f[2+4*j+joff],v_t5); /* f[jj+4*(nxh-j)+joff] = conjf(t1 - t2); */ v_t5 = _mm_mul_ps(_mm_sub_ps(v_t1,v_t4),v_n); _mm_store_ps((float *)&f[2+4*(nxh-j)+joff],v_t5); } for (jj = 0; jj < 3; jj++) { f[jj+4*nxhh+joff] = 2.0*conjf(f[jj+4*nxhh+joff]); f[jj+joff] = (crealf(f[jj+joff]) + cimagf(f[jj+joff])) + (crealf(f[jj+joff]) - cimagf(f[jj+joff]))*_Complex_I; } /* bit-reverse array elements in x */ for (j = 0; j < nxh; j++) { j1 = (mixup[j] - 1)/nrxb; if (j < j1) { /* t1 = f[4*j1+joff]; */ /* t2 = f[1+4*j1+joff]; */ /* t3 = f[2+4*j1+joff]; */ v_t1 = _mm_load_ps((float *)&f[4*j1+joff]); v_t3 = _mm_load_ps((float *)&f[2+4*j1+joff]); /* f[4*j1+joff] = f[4*j+joff]; */ /* f[1+4*j1+joff] = f[1+4*j+joff]; */ /* f[2+4*j1+joff] = f[2+4*j+joff]; */ v_t2 = _mm_load_ps((float *)&f[4*j+joff]); _mm_store_ps((float *)&f[4*j1+joff],v_t2); v_t2 = _mm_load_ps((float *)&f[2+4*j+joff]); _mm_store_ps((float *)&f[2+4*j1+joff],v_t2); /* f[4*j+joff] = t1; */ /* f[1+4*j+joff] = t2; */ /* f[2+4*j+joff] = t3; */ _mm_store_ps((float *)&f[4*j+joff],v_t1); _mm_store_ps((float *)&f[2+4*j+joff],v_t3); } } /* then transform in x */ ns = 1; for (l = 0; l < indx1; l++) { ns2 = ns + ns; km = nxhh/ns; kmr = km*nrx; for (k = 0; k < km; k++) { k1 = 4*ns2*k; k2 = k1 + 4*ns; for (j = 0; j < ns; j++) { /* t1 = conjf(sct[kmr*j]); */ v_t1 = _mm_loadl_pi(v_t1,(__m64 *)&sct[kmr*j]); v_t1 = _mm_movelh_ps(v_t1,v_t1); v_t1 = _mm_mul_ps(v_t1,v_n); /* t2 = t1*f[4*j+k2+joff]; */ /* t3 = t1*f[1+4*j+k2+joff]; */ v_t2 = _mm_load_ps((float *)&f[4*j+k2+joff]); v_t3 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t1,v_t1,160)); v_t2 = _mm_shuffle_ps(v_t2,v_t2,177); v_t2 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t1,v_t1,245)); v_t2 = _mm_add_ps(v_t3,_mm_mul_ps(v_t2,v_m)); /* t4 = t1*f[2+4*j+k2+joff]; */ v_t4 = _mm_load_ps((float *)&f[2+4*j+k2+joff]); v_t3 = _mm_mul_ps(v_t4,_mm_shuffle_ps(v_t1,v_t1,160)); v_t4 = _mm_shuffle_ps(v_t4,v_t4,177); v_t4 = _mm_mul_ps(v_t4,_mm_shuffle_ps(v_t1,v_t1,245)); v_t4 = _mm_add_ps(v_t3,_mm_mul_ps(v_t4,v_m)); /* f[4*j+k2+joff] = f[4*j+k1+joff] - t2; */ /* f[1+4*j+k2+joff] = f[1+4*j+k1+joff] - t3; */ v_t3 = _mm_load_ps((float *)&f[4*j+k1+joff]); v_t5 = _mm_sub_ps(v_t3,v_t2); _mm_store_ps((float *)&f[4*j+k2+joff],v_t5); /* f[4*j+k1+joff] += t2; */ /* f[1+4*j+k1+joff] += t3; */ v_t2 = _mm_add_ps(v_t3,v_t2); _mm_store_ps((float *)&f[4*j+k1+joff],v_t2); /* f[2+4*j+k2+joff] = f[2+4*j+k1+joff] - t4; */ v_t3 = _mm_load_ps((float *)&f[2+4*j+k1+joff]); v_t5 = _mm_sub_ps(v_t3,v_t4); _mm_store_ps((float *)&f[2+4*j+k2+joff],v_t5); /* f[2+4*j+k1+joff] += t4; */ v_t4 = _mm_add_ps(v_t3,v_t4); _mm_store_ps((float *)&f[2+4*j+k1+joff],v_t4); } } ns = ns2; } /* swap complex components */ for (j = 0; j < nxh; j++) { v_t1 = _mm_load_ps((float *)&f[4*j+joff]); v_t2 = _mm_load_ps((float *)&f[2+4*j+joff]); v_t1 = _mm_shuffle_ps(v_t1,v_t1,216); v_t2 = _mm_shuffle_ps(v_t2,v_t2,216); v_t3 = _mm_movelh_ps(v_t1,v_t2); _mm_store_ps((float *)&f[4*j+joff],v_t3); v_t3 = _mm_movehl_ps(v_t2,v_t1); _mm_store_ps((float *)&f[2+4*j+joff],v_t3); } } return; } /*--------------------------------------------------------------------*/ void csse2fft2rm3y(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nxi, int nxp, int nxhd, int nyd, int nxhyd, int nxyhd) { /* this subroutine performs the y part of 3 two dimensional real to complex fast fourier transforms, and their inverses, for a subset of x, using complex arithmetic, with OpenMP for isign = (-1,1), input: all, output: f for isign = -1, approximate flop count: N*(5*log2(N) + 19/2) for isign = 1, approximate flop count: N*(5*log2(N) + 15/2) where N = (nx/2)*ny indx/indy = exponent which determines length in x/y direction, where nx=2**indx, ny=2**indy if isign = -1, two inverse fourier transforms are performed f[m][n][0:2] = (1/nx*ny)*sum(f[k][j][0:2] * exp(-sqrt(-1)*2pi*n*j/nx)*exp(-sqrt(-1)*2pi*m*k/ny)) if isign = 1, two forward fourier transforms are performed f[k][j][0:2] = sum(f[m][n][0:2]*exp(sqrt(-1)*2pi*n*j/nx)* exp(sqrt(-1)*2pi*m*k/ny)) mixup = array of bit reversed addresses sct = sine/cosine table nxi = initial x index used nxp = number of x indices used nxhd = second dimension of f >= nx/2 nyd = third dimension of f >= ny nxhyd = maximum of (nx/2,ny) nxyhd = maximum of (nx,ny)/2 fourier coefficients are stored as follows: f[k][j][0:2] = real, imaginary part of mode j,k, where 0 <= j < nx/2 and 0 <= k < ny, except for f[k][1][0:2] = real, imaginary part of mode nx/2,k, where ny/2+1 <= k < ny, and imag(f[0][0][0:2]) = real part of mode nx/2,0 and imag(f[0][ny/2][0:2]) = real part of mode nx/2,ny/2 written by viktor k. decyk, ucla requires SSE2, f needs to be 16 byte aligned f needs to have 4 components local data */ int indx1, indx1y, nx, ny, nyh, nxy, nxhy, nxt; int nry, i, j, k, l, jj, j1, j2, k1, k2, ns, ns2, km, kmr, koff; int nryb; float complex t1; __m128 v_m, v_n, v_t1, v_t2, v_t3, v_t4, v_t5; if (isign==0) return; indx1 = indx - 1; indx1y = indx1 > indy ? indx1 : indy; nx = 1L<<indx; ny = 1L<<indy; nyh = ny/2; nxy = nx > ny ? nx : ny; nxhy = 1L<<indx1y; nxt = nxi + nxp - 1; v_m = _mm_set_ps(1.0f,-1.0f,1.0f,-1.0f); v_n = _mm_set_ps(-1.0f,1.0f,-1.0f,1.0f); v_t1 = _mm_setzero_ps(); if (isign > 0) goto L80; /* inverse fourier transform */ nryb = nxhy/ny; nry = nxy/ny; #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,koff,v_t1,v_t2,v_t3,v_t4,v_t5) for (i = nxi-1; i < nxt; i++) { /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { koff = 4*nxhd*k; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = 4*nxhd*k1; /* t1 = f[4*i+k1]; */ /* t2 = f[1+4*i+k1]; */ /* t3 = f[2+4*i+k1]; */ v_t1 = _mm_load_ps((float *)&f[4*i+k1]); v_t3 = _mm_load_ps((float *)&f[2+4*i+k1]); /* f[4*i+k1] = f[4*i+koff]; */ /* f[1+4*i+k1] = f[1+4*i+koff]; */ /* f[2+4*i+k1] = f[2+4*i+koff]; */ v_t2 = _mm_load_ps((float *)&f[4*i+koff]); _mm_store_ps((float *)&f[4*i+k1],v_t2); v_t2 = _mm_load_ps((float *)&f[2+4*i+koff]); _mm_store_ps((float *)&f[2+4*i+k1],v_t2); /* f[4*i+koff] = t1; */ /* f[1+4*i+koff] = t2; */ /* f[2+4*i+koff] = t3; */ _mm_store_ps((float *)&f[4*i+koff],v_t1); _mm_store_ps((float *)&f[2+4*i+koff],v_t3); } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = 4*nxhd*(j + k1); j2 = 4*nxhd*(j + k2); /* t1 = sct[kmr*j]; */ v_t1 = _mm_loadl_pi(v_t1,(__m64 *)&sct[kmr*j]); v_t1 = _mm_movelh_ps(v_t1,v_t1); /* t2 = t1*f[4*i+j2]; */ /* t3 = t1*f[1+4*i+j2]; */ v_t2 = _mm_load_ps((float *)&f[4*i+j2]); v_t3 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t1,v_t1,160)); v_t2 = _mm_shuffle_ps(v_t2,v_t2,177); v_t2 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t1,v_t1,245)); v_t2 = _mm_add_ps(v_t3,_mm_mul_ps(v_t2,v_m)); /* t4 = t1*f[2+4*i+j2]; */ v_t4 = _mm_load_ps((float *)&f[2+4*i+j2]); v_t3 = _mm_mul_ps(v_t4,_mm_shuffle_ps(v_t1,v_t1,160)); v_t4 = _mm_shuffle_ps(v_t4,v_t4,177); v_t4 = _mm_mul_ps(v_t4,_mm_shuffle_ps(v_t1,v_t1,245)); v_t4 = _mm_add_ps(v_t3,_mm_mul_ps(v_t4,v_m)); /* f[4*i+j2] = f[4*i+j1] - t2; */ /* f[1+4*i+j2] = f[1+4*i+j1] - t3; */ v_t3 = _mm_load_ps((float *)&f[4*i+j1]); v_t5 = _mm_sub_ps(v_t3,v_t2); _mm_store_ps((float *)&f[4*i+j2],v_t5); /* f[4*i+j1] += t2; */ /* f[1+4*i+j1] += t3; */ v_t2 = _mm_add_ps(v_t3,v_t2); _mm_store_ps((float *)&f[4*i+j1],v_t2); /* f[2+4*i+j2] = f[2+4*i+j1] - t4; */ v_t3 = _mm_load_ps((float *)&f[2+4*i+j1]); v_t5 = _mm_sub_ps(v_t3,v_t4); _mm_store_ps((float *)&f[2+4*i+j2],v_t5); /* f[2+4*i+j1] += t4; */ v_t4 = _mm_add_ps(v_t3,v_t4); _mm_store_ps((float *)&f[2+4*i+j1],v_t4); } } ns = ns2; } } /* unscramble modes kx = 0, nx/2 */ if (nxi==1) { for (k = 1; k < nyh; k++) { koff = 4*nxhd*k; k1 = 4*nxhd*ny - koff; for (jj = 0; jj < 3; jj++) { t1 = f[jj+k1]; f[jj+k1] = 0.5*(cimagf(f[jj+koff] + t1) + crealf(f[jj+koff] - t1)*_Complex_I); f[jj+koff] = 0.5*(crealf(f[jj+koff] + t1) + cimagf(f[jj+koff] - t1)*_Complex_I); } } } return; /* forward fourier transform */ L80: nryb = nxhy/ny; nry = nxy/ny; /* scramble modes kx = 0, nx/2 */ if (nxi==1) { for (k = 1; k < nyh; k++) { koff = 4*nxhd*k; k1 = 4*nxhd*ny - koff; for (jj = 0; jj < 3; jj++) { t1 = cimagf(f[jj+k1]) + crealf(f[jj+k1])*_Complex_I; f[jj+k1] = conjf(f[jj+koff] - t1); f[jj+koff] += t1; } } } #pragma omp parallel for \ private(i,j,k,l,ns,ns2,km,kmr,k1,k2,j1,j2,koff,v_t1,v_t2,v_t3,v_t4,v_t5) for (i = nxi-1; i < nxt; i++) { /* bit-reverse array elements in y */ for (k = 0; k < ny; k++) { koff = 4*nxhd*k; k1 = (mixup[k] - 1)/nryb; if (k < k1) { k1 = 4*nxhd*k1; /* t1 = f[4*i+k1]; */ /* t2 = f[1+4*i+k1]; */ /* t3 = f[2+4*i+k1]; */ v_t1 = _mm_load_ps((float *)&f[4*i+k1]); v_t3 = _mm_load_ps((float *)&f[2+4*i+k1]); /* f[4*i+k1] = f[4*i+koff]; */ /* f[1+4*i+k1] = f[1+4*i+koff]; */ /* f[2+4*i+k1] = f[2+4*i+koff]; */ v_t2 = _mm_load_ps((float *)&f[4*i+koff]); _mm_store_ps((float *)&f[4*i+k1],v_t2); v_t2 = _mm_load_ps((float *)&f[2+4*i+koff]); _mm_store_ps((float *)&f[2+4*i+k1],v_t2); /* f[4*i+koff] = t1; */ /* f[1+4*i+koff] = t2; */ /* f[2+4*i+koff] = t3; */ _mm_store_ps((float *)&f[4*i+koff],v_t1); _mm_store_ps((float *)&f[2+4*i+koff],v_t3); } } /* then transform in y */ ns = 1; for (l = 0; l < indy; l++) { ns2 = ns + ns; km = nyh/ns; kmr = km*nry; for (k = 0; k < km; k++) { k1 = ns2*k; k2 = k1 + ns; for (j = 0; j < ns; j++) { j1 = 4*nxhd*(j + k1); j2 = 4*nxhd*(j + k2); /* t1 = conjf(sct[kmr*j]); */ v_t1 = _mm_loadl_pi(v_t1,(__m64 *)&sct[kmr*j]); v_t1 = _mm_movelh_ps(v_t1,v_t1); v_t1 = _mm_mul_ps(v_t1,v_n); /* t2 = t1*f[4*i+j2]; */ /* t3 = t1*f[1+4*i+j2]; */ v_t2 = _mm_load_ps((float *)&f[4*i+j2]); v_t3 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t1,v_t1,160)); v_t2 = _mm_shuffle_ps(v_t2,v_t2,177); v_t2 = _mm_mul_ps(v_t2,_mm_shuffle_ps(v_t1,v_t1,245)); v_t2 = _mm_add_ps(v_t3,_mm_mul_ps(v_t2,v_m)); /* t4 = t1*f[2+4*i+j2]; */ v_t4 = _mm_load_ps((float *)&f[2+4*i+j2]); v_t3 = _mm_mul_ps(v_t4,_mm_shuffle_ps(v_t1,v_t1,160)); v_t4 = _mm_shuffle_ps(v_t4,v_t4,177); v_t4 = _mm_mul_ps(v_t4,_mm_shuffle_ps(v_t1,v_t1,245)); v_t4 = _mm_add_ps(v_t3,_mm_mul_ps(v_t4,v_m)); /* f[4*i+j2] = f[4*i+j1] - t2; */ /* f[1+4*i+j2] = f[1+4*i+j1] - t3; */ v_t3 = _mm_load_ps((float *)&f[4*i+j1]); v_t5 = _mm_sub_ps(v_t3,v_t2); _mm_store_ps((float *)&f[4*i+j2],v_t5); /* f[4*i+j1] += t2; */ /* f[1+4*i+j1] += t3; */ v_t2 = _mm_add_ps(v_t3,v_t2); _mm_store_ps((float *)&f[4*i+j1],v_t2); /* f[2+4*i+j2] = f[2+4*i+j1] - t4; */ v_t3 = _mm_load_ps((float *)&f[2+4*i+j1]); v_t5 = _mm_sub_ps(v_t3,v_t4); _mm_store_ps((float *)&f[2+4*i+j2],v_t5); /* f[2+4*i+j1] += t4; */ v_t4 = _mm_add_ps(v_t3,v_t4); _mm_store_ps((float *)&f[2+4*i+j1],v_t4); } } ns = ns2; } } return; } /*--------------------------------------------------------------------*/ void csse2wfft2rmx(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nxhd, int nyd, int nxhyd, int nxyhd) { /* wrapper function for real to complex fft, with packed data */ /* parallelized with OpenMP */ /* local data */ int nxh, ny; static int nxi = 1, nyi = 1; /* calculate range of indices */ nxh = 1L<<(indx - 1); ny = 1L<<indy; /* inverse fourier transform */ if (isign < 0) { /* perform x fft */ csse2fft2rmxx(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd, nxyhd); /* perform y fft */ csse2fft2rmxy(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd, nxyhd); } /* forward fourier transform */ else if (isign > 0) { /* perform y fft */ csse2fft2rmxy(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd, nxyhd); /* perform x fft */ csse2fft2rmxx(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd, nxyhd); } return; } /*--------------------------------------------------------------------*/ void csse2wfft2rm3(float complex f[], int isign, int mixup[], float complex sct[], int indx, int indy, int nxhd, int nyd, int nxhyd, int nxyhd) { /* wrapper function for 3 2d real to complex ffts, with packed data */ /* parallelized with OpenMP */ /* local data */ int nxh, ny; static int nxi = 1, nyi = 1; /* calculate range of indices */ nxh = 1L<<(indx - 1); ny = 1L<<indy; /* inverse fourier transform */ if (isign < 0) { /* perform x fft */ csse2fft2rm3x(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd, nxyhd); /* perform y fft */ csse2fft2rm3y(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd, nxyhd); } /* forward fourier transform */ else if (isign > 0) { /* perform y fft */ csse2fft2rm3y(f,isign,mixup,sct,indx,indy,nxi,nxh,nxhd,nyd,nxhyd, nxyhd); /* perform x fft */ csse2fft2rm3x(f,isign,mixup,sct,indx,indy,nyi,ny,nxhd,nyd,nxhyd, nxyhd); } return; } /* Interfaces to Fortran */ /*--------------------------------------------------------------------*/ void csse2gbppush23lt_(float *ppart, float *fxy, float *bxy, int *kpic, float *qbm, float *dt, float *dtc, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ipbc) { csse2gbppush23lt(ppart,fxy,bxy,kpic,*qbm,*dt,*dtc,ek,*idimp,*nppmx, *nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ipbc); return; } /*--------------------------------------------------------------------*/ void csse2gbppushf23lt_(float *ppart, float *fxy, float *bxy, int *kpic, int *ncl, int *ihole, float *qbm, float *dt, float *dtc, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ntmax, int *irc) { csse2gbppushf23lt(ppart,fxy,bxy,kpic,ncl,ihole,*qbm,*dt,*dtc,ek, *idimp,*nppmx,*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1, *ntmax,irc); return; } /*--------------------------------------------------------------------*/ void csse2grbppush23lt_(float *ppart, float *fxy, float *bxy, int *kpic, float *qbm, float *dt, float *dtc, float *ci, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ipbc) { csse2grbppush23lt(ppart,fxy,bxy,kpic,*qbm,*dt,*dtc,*ci,ek,*idimp, *nppmx,*nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ipbc); return; } /*--------------------------------------------------------------------*/ void csse2grbppushf23lt_(float *ppart, float *fxy, float *bxy, int *kpic, int *ncl, int *ihole, float *qbm, float *dt, float *dtc, float *ci, float *ek, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ntmax, int *irc) { csse2grbppushf23lt(ppart,fxy,bxy,kpic,ncl,ihole,*qbm,*dt,*dtc,*ci,ek, *idimp,*nppmx,*nx,*ny,*mx,*my,*nxv,*nyv,*mx1, *mxy1,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void csse2gppost2lt_(float *ppart, float *q, int *kpic, float *qm, int *nppmx, int *idimp, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1) { csse2gppost2lt(ppart,q,kpic,*qm,*nppmx,*idimp,*mx,*my,*nxv,*nyv,*mx1, *mxy1); return; } /*--------------------------------------------------------------------*/ void csse2gjppost2lt_(float *ppart, float *cu, int *kpic, float *qm, float *dt, int *nppmx, int *idimp, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ipbc) { csse2gjppost2lt(ppart,cu,kpic,*qm,*dt,*nppmx,*idimp,*nx,*ny,*mx,*my, *nxv,*nyv,*mx1,*mxy1,*ipbc); return; } /*--------------------------------------------------------------------*/ void csse2gjppostf2lt_(float *ppart, float *cu, int *kpic, int *ncl, int *ihole, float *qm, float *dt, int *nppmx, int *idimp, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ntmax, int *irc) { csse2gjppostf2lt(ppart,cu,kpic,ncl,ihole,*qm,*dt,*nppmx,*idimp,*nx, *ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void csse2grjppost2lt_(float *ppart, float *cu, int *kpic, float *qm, float *dt, float *ci, int *nppmx, int *idimp, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ipbc) { csse2grjppost2lt(ppart,cu,kpic,*qm,*dt,*ci,*nppmx,*idimp,*nx,*ny,*mx, *my,*nxv,*nyv,*mx1,*mxy1,*ipbc); return; } /*--------------------------------------------------------------------*/ void csse2grjppostf2lt_(float *ppart, float *cu, int *kpic, int *ncl, int *ihole, float *qm, float *dt, float *ci, int *nppmx, int *idimp, int *nx, int *ny, int *mx, int *my, int *nxv, int *nyv, int *mx1, int *mxy1, int *ntmax, int *irc) { csse2grjppostf2lt(ppart,cu,kpic,ncl,ihole,*qm,*dt,*ci,*nppmx,*idimp, *nx,*ny,*mx,*my,*nxv,*nyv,*mx1,*mxy1,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void csse2pporder2lt_(float *ppart, float *ppbuff, int *kpic, int *ncl, int *ihole, int *idimp, int *nppmx, int *nx, int *ny, int *mx, int *my, int *mx1, int *my1, int *npbmx, int *ntmax, int *irc) { csse2pporder2lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*nx,*ny, *mx,*my,*mx1,*my1,*npbmx,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void csse2pporderf2lt_(float *ppart, float *ppbuff, int *kpic, int *ncl, int *ihole, int *idimp, int *nppmx, int *mx1, int *my1, int *npbmx, int *ntmax, int *irc) { csse2pporderf2lt(ppart,ppbuff,kpic,ncl,ihole,*idimp,*nppmx,*mx1,*my1, *npbmx,*ntmax,irc); return; } /*--------------------------------------------------------------------*/ void csse2bguard2l_(float *bxy, int *nx, int *ny, int *nxe, int *nye) { csse2bguard2l(bxy,*nx,*ny,*nxe,*nye); return; } /*--------------------------------------------------------------------*/ void csse2acguard2l_(float *cu, int *nx, int *ny, int *nxe, int *nye) { csse2acguard2l(cu,*nx,*ny,*nxe,*nye); return; } /*--------------------------------------------------------------------*/ void csse2aguard2l_(float *q, int *nx, int *ny, int *nxe, int *nye) { csse2aguard2l(q,*nx,*ny,*nxe,*nye); return; } /*--------------------------------------------------------------------*/ void csse2mpois23_(float complex *q, float complex *fxy, int *isign, float complex *ffc, float *ax, float *ay, float *affp, float *we, int *nx, int *ny, int *nxvh, int *nyv, int *nxhd, int *nyhd) { csse2mpois23(q,fxy,*isign,ffc,*ax,*ay,*affp,we,*nx,*ny,*nxvh,*nyv, *nxhd,*nyhd); return; } /*--------------------------------------------------------------------*/ void csse2mcuperp2_(float complex *cu, int *nx, int *ny, int *nxvh, int *nyv) { csse2mcuperp2(cu,*nx,*ny,*nxvh,*nyv); return; } /*--------------------------------------------------------------------*/ void csse2mibpois23_(float complex *cu, float complex *bxy, float complex *ffc, float *ci, float *wm, int *nx, int *ny, int *nxvh, int *nyv, int *nxhd, int *nyhd) { csse2mibpois23(cu,bxy,ffc,*ci,wm,*nx,*ny,*nxvh,*nyv,*nxhd,*nyhd); return; } /*--------------------------------------------------------------------*/ void csse2mmaxwel2_(float complex *exy, float complex *bxy, float complex *cu, float complex *ffc, float *ci, float *dt, float *wf, float *wm, int *nx, int *ny, int *nxvh, int *nyv, int *nxhd, int *nyhd) { csse2mmaxwel2(exy,bxy,cu,ffc,*ci,*dt,wf,wm,*nx,*ny,*nxvh,*nyv,*nxhd, *nyhd); return; } /*--------------------------------------------------------------------*/ void csse2memfield2_(float complex *fxy, float complex *exy, float complex *ffc, int *isign, int *nx, int *ny, int *nxvh, int *nyv, int *nxhd, int *nyhd) { csse2memfield2(fxy,exy,ffc,*isign,*nx,*ny,*nxvh,*nyv,*nxhd,*nyhd); return; } /*--------------------------------------------------------------------*/ void csse2wfft2rmx_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) { csse2wfft2rmx(f,*isign,mixup,sct,*indx,*indy,*nxhd,*nyd,*nxhyd, *nxyhd); return; } /*--------------------------------------------------------------------*/ void csse2wfft2rm3_(float complex *f, int *isign, int *mixup, float complex *sct, int *indx, int *indy, int *nxhd, int *nyd, int *nxhyd, int *nxyhd) { csse2wfft2rm3(f,*isign,mixup,sct,*indx,*indy,*nxhd,*nyd,*nxhyd, *nxyhd); return; }
c-decl.c
/* Process declarations and variables for C compiler. Copyright (C) 1988-2020 Free Software Foundation, Inc. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ /* Process declarations and symbol lookup for C front end. Also constructs types; the standard scalar types at initialization, and structure, union, array and enum types when they are declared. */ /* ??? not all decl nodes are given the most useful possible line numbers. For example, the CONST_DECLs for enum values. */ #include "config.h" #define INCLUDE_UNIQUE_PTR #include "system.h" #include "coretypes.h" #include "target.h" #include "function.h" #include "c-tree.h" #include "timevar.h" #include "stringpool.h" #include "cgraph.h" #include "intl.h" #include "print-tree.h" #include "stor-layout.h" #include "varasm.h" #include "attribs.h" #include "toplev.h" #include "debug.h" #include "c-family/c-objc.h" #include "c-family/c-pragma.h" #include "c-family/c-ubsan.h" #include "c-lang.h" #include "langhooks.h" #include "tree-iterator.h" #include "dumpfile.h" #include "plugin.h" #include "c-family/c-ada-spec.h" #include "builtins.h" #include "spellcheck-tree.h" #include "gcc-rich-location.h" #include "asan.h" #include "c-family/name-hint.h" #include "c-family/known-headers.h" #include "c-family/c-spellcheck.h" /* In grokdeclarator, distinguish syntactic contexts of declarators. */ enum decl_context { NORMAL, /* Ordinary declaration */ FUNCDEF, /* Function definition */ PARM, /* Declaration of parm before function body */ FIELD, /* Declaration inside struct or union */ TYPENAME}; /* Typename (inside cast or sizeof) */ /* States indicating how grokdeclarator() should handle declspecs marked with __attribute__((deprecated)). An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ enum deprecated_states { DEPRECATED_NORMAL, DEPRECATED_SUPPRESS }; /* Nonzero if we have seen an invalid cross reference to a struct, union, or enum, but not yet printed the message. */ tree pending_invalid_xref; /* File and line to appear in the eventual error message. */ location_t pending_invalid_xref_location; /* The file and line that the prototype came from if this is an old-style definition; used for diagnostics in store_parm_decls_oldstyle. */ static location_t current_function_prototype_locus; /* Whether this prototype was built-in. */ static bool current_function_prototype_built_in; /* The argument type information of this prototype. */ static tree current_function_prototype_arg_types; /* The argument information structure for the function currently being defined. */ static struct c_arg_info *current_function_arg_info; /* The obstack on which parser and related data structures, which are not live beyond their top-level declaration or definition, are allocated. */ struct obstack parser_obstack; /* The current statement tree. */ static GTY(()) struct stmt_tree_s c_stmt_tree; /* State saving variables. */ tree c_break_label; tree c_cont_label; /* A list of decls to be made automatically visible in each file scope. */ static GTY(()) tree visible_builtins; /* Set to 0 at beginning of a function definition, set to 1 if a return statement that specifies a return value is seen. */ int current_function_returns_value; /* Set to 0 at beginning of a function definition, set to 1 if a return statement with no argument is seen. */ int current_function_returns_null; /* Set to 0 at beginning of a function definition, set to 1 if a call to a noreturn function is seen. */ int current_function_returns_abnormally; /* Set to nonzero by `grokdeclarator' for a function whose return type is defaulted, if warnings for this are desired. */ static int warn_about_return_type; /* Nonzero when the current toplevel function contains a declaration of a nested function which is never defined. */ static bool undef_nested_function; /* If non-zero, implicit "omp declare target" attribute is added into the attribute lists. */ int current_omp_declare_target_attribute; /* Each c_binding structure describes one binding of an identifier to a decl. All the decls in a scope - irrespective of namespace - are chained together by the ->prev field, which (as the name implies) runs in reverse order. All the decls in a given namespace bound to a given identifier are chained by the ->shadowed field, which runs from inner to outer scopes. The ->decl field usually points to a DECL node, but there are two exceptions. In the namespace of type tags, the bound entity is a RECORD_TYPE, UNION_TYPE, or ENUMERAL_TYPE node. If an undeclared identifier is encountered, it is bound to error_mark_node to suppress further errors about that identifier in the current function. The ->u.type field stores the type of the declaration in this scope; if NULL, the type is the type of the ->decl field. This is only of relevance for objects with external or internal linkage which may be redeclared in inner scopes, forming composite types that only persist for the duration of those scopes. In the external scope, this stores the composite of all the types declared for this object, visible or not. The ->inner_comp field (used only at file scope) stores whether an incomplete array type at file scope was completed at an inner scope to an array size other than 1. The ->u.label field is used for labels. It points to a structure which stores additional information used for warnings. The depth field is copied from the scope structure that holds this decl. It is used to preserve the proper ordering of the ->shadowed field (see bind()) and also for a handful of special-case checks. Finally, the invisible bit is true for a decl which should be ignored for purposes of normal name lookup, and the nested bit is true for a decl that's been bound a second time in an inner scope; in all such cases, the binding in the outer scope will have its invisible bit true. */ struct GTY((chain_next ("%h.prev"))) c_binding { union GTY(()) { /* first so GTY desc can use decl */ tree GTY((tag ("0"))) type; /* the type in this scope */ struct c_label_vars * GTY((tag ("1"))) label; /* for warnings */ } GTY((desc ("TREE_CODE (%0.decl) == LABEL_DECL"))) u; tree decl; /* the decl bound */ tree id; /* the identifier it's bound to */ struct c_binding *prev; /* the previous decl in this scope */ struct c_binding *shadowed; /* the innermost decl shadowed by this one */ unsigned int depth : 28; /* depth of this scope */ BOOL_BITFIELD invisible : 1; /* normal lookup should ignore this binding */ BOOL_BITFIELD nested : 1; /* do not set DECL_CONTEXT when popping */ BOOL_BITFIELD inner_comp : 1; /* incomplete array completed in inner scope */ BOOL_BITFIELD in_struct : 1; /* currently defined as struct field */ location_t locus; /* location for nested bindings */ }; #define B_IN_SCOPE(b1, b2) ((b1)->depth == (b2)->depth) #define B_IN_CURRENT_SCOPE(b) ((b)->depth == current_scope->depth) #define B_IN_FILE_SCOPE(b) ((b)->depth == 1 /*file_scope->depth*/) #define B_IN_EXTERNAL_SCOPE(b) ((b)->depth == 0 /*external_scope->depth*/) /* Each C symbol points to three linked lists of c_binding structures. These describe the values of the identifier in the three different namespaces defined by the language. */ struct GTY(()) lang_identifier { struct c_common_identifier common_id; struct c_binding *symbol_binding; /* vars, funcs, constants, typedefs */ struct c_binding *tag_binding; /* struct/union/enum tags */ struct c_binding *label_binding; /* labels */ }; /* Validate c-lang.c's assumptions. */ extern char C_SIZEOF_STRUCT_LANG_IDENTIFIER_isnt_accurate [(sizeof(struct lang_identifier) == C_SIZEOF_STRUCT_LANG_IDENTIFIER) ? 1 : -1]; /* The binding oracle; see c-tree.h. */ void (*c_binding_oracle) (enum c_oracle_request, tree identifier); /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's symbol binding. */ #define I_SYMBOL_CHECKED(node) \ (TREE_LANG_FLAG_4 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding* * i_symbol_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->symbol_binding == NULL && c_binding_oracle != NULL && !I_SYMBOL_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_SYMBOL_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_SYMBOL, node); } return &lid->symbol_binding; } #define I_SYMBOL_BINDING(node) (*i_symbol_binding (node)) #define I_SYMBOL_DECL(node) \ (I_SYMBOL_BINDING(node) ? I_SYMBOL_BINDING(node)->decl : 0) /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's tag binding. */ #define I_TAG_CHECKED(node) \ (TREE_LANG_FLAG_5 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding ** i_tag_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->tag_binding == NULL && c_binding_oracle != NULL && !I_TAG_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_TAG_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_TAG, node); } return &lid->tag_binding; } #define I_TAG_BINDING(node) (*i_tag_binding (node)) #define I_TAG_DECL(node) \ (I_TAG_BINDING(node) ? I_TAG_BINDING(node)->decl : 0) /* This flag is set on an identifier if we have previously asked the binding oracle for this identifier's label binding. */ #define I_LABEL_CHECKED(node) \ (TREE_LANG_FLAG_6 (IDENTIFIER_NODE_CHECK (node))) static inline struct c_binding ** i_label_binding (tree node) { struct lang_identifier *lid = (struct lang_identifier *) IDENTIFIER_NODE_CHECK (node); if (lid->label_binding == NULL && c_binding_oracle != NULL && !I_LABEL_CHECKED (node)) { /* Set the "checked" flag first, to avoid infinite recursion when the binding oracle calls back into gcc. */ I_LABEL_CHECKED (node) = 1; c_binding_oracle (C_ORACLE_LABEL, node); } return &lid->label_binding; } #define I_LABEL_BINDING(node) (*i_label_binding (node)) #define I_LABEL_DECL(node) \ (I_LABEL_BINDING(node) ? I_LABEL_BINDING(node)->decl : 0) /* The resulting tree type. */ union GTY((desc ("TREE_CODE (&%h.generic) == IDENTIFIER_NODE"), chain_next ("(union lang_tree_node *) c_tree_chain_next (&%h.generic)"))) lang_tree_node { union tree_node GTY ((tag ("0"), desc ("tree_node_structure (&%h)"))) generic; struct lang_identifier GTY ((tag ("1"))) identifier; }; /* Track bindings and other things that matter for goto warnings. For efficiency, we do not gather all the decls at the point of definition. Instead, we point into the bindings structure. As scopes are popped, we update these structures and gather the decls that matter at that time. */ struct GTY(()) c_spot_bindings { /* The currently open scope which holds bindings defined when the label was defined or the goto statement was found. */ struct c_scope *scope; /* The bindings in the scope field which were defined at the point of the label or goto. This lets us look at older or newer bindings in the scope, as appropriate. */ struct c_binding *bindings_in_scope; /* The number of statement expressions that have started since this label or goto statement was defined. This is zero if we are at the same statement expression level. It is positive if we are in a statement expression started since this spot. It is negative if this spot was in a statement expression and we have left it. */ int stmt_exprs; /* Whether we started in a statement expression but are no longer in it. This is set to true if stmt_exprs ever goes negative. */ bool left_stmt_expr; }; /* This structure is used to keep track of bindings seen when a goto statement is defined. This is only used if we see the goto statement before we see the label. */ struct GTY(()) c_goto_bindings { /* The location of the goto statement. */ location_t loc; /* The bindings of the goto statement. */ struct c_spot_bindings goto_bindings; }; typedef struct c_goto_bindings *c_goto_bindings_p; /* The additional information we keep track of for a label binding. These fields are updated as scopes are popped. */ struct GTY(()) c_label_vars { /* The shadowed c_label_vars, when one label shadows another (which can only happen using a __label__ declaration). */ struct c_label_vars *shadowed; /* The bindings when the label was defined. */ struct c_spot_bindings label_bindings; /* A list of decls that we care about: decls about which we should warn if a goto branches to this label from later in the function. Decls are added to this list as scopes are popped. We only add the decls that matter. */ vec<tree, va_gc> *decls_in_scope; /* A list of goto statements to this label. This is only used for goto statements seen before the label was defined, so that we can issue appropriate warnings for them. */ vec<c_goto_bindings_p, va_gc> *gotos; }; /* Each c_scope structure describes the complete contents of one scope. Four scopes are distinguished specially: the innermost or current scope, the innermost function scope, the file scope (always the second to outermost) and the outermost or external scope. Most declarations are recorded in the current scope. All normal label declarations are recorded in the innermost function scope, as are bindings of undeclared identifiers to error_mark_node. (GCC permits nested functions as an extension, hence the 'innermost' qualifier.) Explicitly declared labels (using the __label__ extension) appear in the current scope. Being in the file scope (current_scope == file_scope) causes special behavior in several places below. Also, under some conditions the Objective-C front end records declarations in the file scope even though that isn't the current scope. All declarations with external linkage are recorded in the external scope, even if they aren't visible there; this models the fact that such declarations are visible to the entire program, and (with a bit of cleverness, see pushdecl) allows diagnosis of some violations of C99 6.2.2p7 and 6.2.7p2: If, within the same translation unit, the same identifier appears with both internal and external linkage, the behavior is undefined. All declarations that refer to the same object or function shall have compatible type; otherwise, the behavior is undefined. Initially only the built-in declarations, which describe compiler intrinsic functions plus a subset of the standard library, are in this scope. The order of the blocks list matters, and it is frequently appended to. To avoid having to walk all the way to the end of the list on each insertion, or reverse the list later, we maintain a pointer to the last list entry. (FIXME: It should be feasible to use a reversed list here.) The bindings list is strictly in reverse order of declarations; pop_scope relies on this. */ struct GTY((chain_next ("%h.outer"))) c_scope { /* The scope containing this one. */ struct c_scope *outer; /* The next outermost function scope. */ struct c_scope *outer_function; /* All bindings in this scope. */ struct c_binding *bindings; /* For each scope (except the global one), a chain of BLOCK nodes for all the scopes that were entered and exited one level down. */ tree blocks; tree blocks_last; /* The depth of this scope. Used to keep the ->shadowed chain of bindings sorted innermost to outermost. */ unsigned int depth : 28; /* True if we are currently filling this scope with parameter declarations. */ BOOL_BITFIELD parm_flag : 1; /* True if we saw [*] in this scope. Used to give an error messages if these appears in a function definition. */ BOOL_BITFIELD had_vla_unspec : 1; /* True if we already complained about forward parameter decls in this scope. This prevents double warnings on foo (int a; int b; ...) */ BOOL_BITFIELD warned_forward_parm_decls : 1; /* True if this is the outermost block scope of a function body. This scope contains the parameters, the local variables declared in the outermost block, and all the labels (except those in nested functions, or declared at block scope with __label__). */ BOOL_BITFIELD function_body : 1; /* True means make a BLOCK for this scope no matter what. */ BOOL_BITFIELD keep : 1; /* True means that an unsuffixed float constant is _Decimal64. */ BOOL_BITFIELD float_const_decimal64 : 1; /* True if this scope has any label bindings. This is used to speed up searching for labels when popping scopes, particularly since labels are normally only found at function scope. */ BOOL_BITFIELD has_label_bindings : 1; /* True if we should issue a warning if a goto statement crosses any of the bindings. We still need to check the list of bindings to find the specific ones we need to warn about. This is true if decl_jump_unsafe would return true for any of the bindings. This is used to avoid looping over all the bindings unnecessarily. */ BOOL_BITFIELD has_jump_unsafe_decl : 1; }; /* The scope currently in effect. */ static GTY(()) struct c_scope *current_scope; /* The innermost function scope. Ordinary (not explicitly declared) labels, bindings to error_mark_node, and the lazily-created bindings of __func__ and its friends get this scope. */ static GTY(()) struct c_scope *current_function_scope; /* The C file scope. This is reset for each input translation unit. */ static GTY(()) struct c_scope *file_scope; /* The outermost scope. This is used for all declarations with external linkage, and only these, hence the name. */ static GTY(()) struct c_scope *external_scope; /* A chain of c_scope structures awaiting reuse. */ static GTY((deletable)) struct c_scope *scope_freelist; /* A chain of c_binding structures awaiting reuse. */ static GTY((deletable)) struct c_binding *binding_freelist; /* Append VAR to LIST in scope SCOPE. */ #define SCOPE_LIST_APPEND(scope, list, decl) do { \ struct c_scope *s_ = (scope); \ tree d_ = (decl); \ if (s_->list##_last) \ BLOCK_CHAIN (s_->list##_last) = d_; \ else \ s_->list = d_; \ s_->list##_last = d_; \ } while (0) /* Concatenate FROM in scope FSCOPE onto TO in scope TSCOPE. */ #define SCOPE_LIST_CONCAT(tscope, to, fscope, from) do { \ struct c_scope *t_ = (tscope); \ struct c_scope *f_ = (fscope); \ if (t_->to##_last) \ BLOCK_CHAIN (t_->to##_last) = f_->from; \ else \ t_->to = f_->from; \ t_->to##_last = f_->from##_last; \ } while (0) /* A c_inline_static structure stores details of a static identifier referenced in a definition of a function that may be an inline definition if no subsequent declaration of that function uses "extern" or does not use "inline". */ struct GTY((chain_next ("%h.next"))) c_inline_static { /* The location for a diagnostic. */ location_t location; /* The function that may be an inline definition. */ tree function; /* The object or function referenced. */ tree static_decl; /* What sort of reference this is. */ enum c_inline_static_type type; /* The next such structure or NULL. */ struct c_inline_static *next; }; /* List of static identifiers used or referenced in functions that may be inline definitions. */ static GTY(()) struct c_inline_static *c_inline_statics; /* True means unconditionally make a BLOCK for the next scope pushed. */ static bool keep_next_level_flag; /* True means the next call to push_scope will be the outermost scope of a function body, so do not push a new scope, merely cease expecting parameter decls. */ static bool next_is_function_body; /* A vector of pointers to c_binding structures. */ typedef struct c_binding *c_binding_ptr; /* Information that we keep for a struct or union while it is being parsed. */ class c_struct_parse_info { public: /* If warn_cxx_compat, a list of types defined within this struct. */ auto_vec<tree> struct_types; /* If warn_cxx_compat, a list of field names which have bindings, and which are defined in this struct, but which are not defined in any enclosing struct. This is used to clear the in_struct field of the c_bindings structure. */ auto_vec<c_binding_ptr> fields; /* If warn_cxx_compat, a list of typedef names used when defining fields in this struct. */ auto_vec<tree> typedefs_seen; }; /* Information for the struct or union currently being parsed, or NULL if not parsing a struct or union. */ static class c_struct_parse_info *struct_parse_info; /* Forward declarations. */ static tree lookup_name_in_scope (tree, struct c_scope *); static tree c_make_fname_decl (location_t, tree, int); static tree grokdeclarator (const struct c_declarator *, struct c_declspecs *, enum decl_context, bool, tree *, tree *, tree *, bool *, enum deprecated_states); static tree grokparms (struct c_arg_info *, bool); static void layout_array_type (tree); static void warn_defaults_to (location_t, int, const char *, ...) ATTRIBUTE_GCC_DIAG(3,4); static const char *header_for_builtin_fn (tree); /* T is a statement. Add it to the statement-tree. This is the C/ObjC version--C++ has a slightly different version of this function. */ tree add_stmt (tree t) { enum tree_code code = TREE_CODE (t); if (CAN_HAVE_LOCATION_P (t) && code != LABEL_EXPR) { if (!EXPR_HAS_LOCATION (t)) SET_EXPR_LOCATION (t, input_location); } if (code == LABEL_EXPR || code == CASE_LABEL_EXPR) STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1; /* Add T to the statement-tree. Non-side-effect statements need to be recorded during statement expressions. */ if (!building_stmt_list_p ()) push_stmt_list (); append_to_statement_list_force (t, &cur_stmt_list); return t; } /* Build a pointer type using the default pointer mode. */ static tree c_build_pointer_type (tree to_type) { addr_space_t as = to_type == error_mark_node? ADDR_SPACE_GENERIC : TYPE_ADDR_SPACE (to_type); machine_mode pointer_mode; if (as != ADDR_SPACE_GENERIC || c_default_pointer_mode == VOIDmode) pointer_mode = targetm.addr_space.pointer_mode (as); else pointer_mode = c_default_pointer_mode; return build_pointer_type_for_mode (to_type, pointer_mode, false); } /* Return true if we will want to say something if a goto statement crosses DECL. */ static bool decl_jump_unsafe (tree decl) { if (error_operand_p (decl)) return false; /* Don't warn for compound literals. If a goto statement crosses their initialization, it should cross also all the places where the complit is used or where the complit address might be saved into some variable, so code after the label to which goto jumps should not be able to refer to the compound literal. */ if (VAR_P (decl) && C_DECL_COMPOUND_LITERAL_P (decl)) return false; /* Always warn about crossing variably modified types. */ if ((VAR_P (decl) || TREE_CODE (decl) == TYPE_DECL) && variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) return true; /* Otherwise, only warn if -Wgoto-misses-init and this is an initialized automatic decl. */ if (warn_jump_misses_init && VAR_P (decl) && !TREE_STATIC (decl) && DECL_INITIAL (decl) != NULL_TREE) return true; return false; } void c_print_identifier (FILE *file, tree node, int indent) { void (*save) (enum c_oracle_request, tree identifier); /* Temporarily hide any binding oracle. Without this, calls to debug_tree from the debugger will end up calling into the oracle, making for a confusing debug session. As the oracle isn't needed here for normal operation, it's simplest to suppress it. */ save = c_binding_oracle; c_binding_oracle = NULL; print_node (file, "symbol", I_SYMBOL_DECL (node), indent + 4); print_node (file, "tag", I_TAG_DECL (node), indent + 4); print_node (file, "label", I_LABEL_DECL (node), indent + 4); if (C_IS_RESERVED_WORD (node) && C_RID_CODE (node) != RID_CXX_COMPAT_WARN) { tree rid = ridpointers[C_RID_CODE (node)]; indent_to (file, indent + 4); fprintf (file, "rid " HOST_PTR_PRINTF " \"%s\"", (void *) rid, IDENTIFIER_POINTER (rid)); } c_binding_oracle = save; } /* Establish a binding between NAME, an IDENTIFIER_NODE, and DECL, which may be any of several kinds of DECL or TYPE or error_mark_node, in the scope SCOPE. */ static void bind (tree name, tree decl, struct c_scope *scope, bool invisible, bool nested, location_t locus) { struct c_binding *b, **here; if (binding_freelist) { b = binding_freelist; binding_freelist = b->prev; } else b = ggc_alloc<c_binding> (); b->shadowed = 0; b->decl = decl; b->id = name; b->depth = scope->depth; b->invisible = invisible; b->nested = nested; b->inner_comp = 0; b->in_struct = 0; b->locus = locus; b->u.type = NULL; b->prev = scope->bindings; scope->bindings = b; if (decl_jump_unsafe (decl)) scope->has_jump_unsafe_decl = 1; if (!name) return; switch (TREE_CODE (decl)) { case LABEL_DECL: here = &I_LABEL_BINDING (name); break; case ENUMERAL_TYPE: case UNION_TYPE: case RECORD_TYPE: here = &I_TAG_BINDING (name); break; case VAR_DECL: case FUNCTION_DECL: case TYPE_DECL: case CONST_DECL: case PARM_DECL: case ERROR_MARK: here = &I_SYMBOL_BINDING (name); break; default: gcc_unreachable (); } /* Locate the appropriate place in the chain of shadowed decls to insert this binding. Normally, scope == current_scope and this does nothing. */ while (*here && (*here)->depth > scope->depth) here = &(*here)->shadowed; b->shadowed = *here; *here = b; } /* Clear the binding structure B, stick it on the binding_freelist, and return the former value of b->prev. This is used by pop_scope and get_parm_info to iterate destructively over all the bindings from a given scope. */ static struct c_binding * free_binding_and_advance (struct c_binding *b) { struct c_binding *prev = b->prev; memset (b, 0, sizeof (struct c_binding)); b->prev = binding_freelist; binding_freelist = b; return prev; } /* Bind a label. Like bind, but skip fields which aren't used for labels, and add the LABEL_VARS value. */ static void bind_label (tree name, tree label, struct c_scope *scope, struct c_label_vars *label_vars) { struct c_binding *b; bind (name, label, scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); scope->has_label_bindings = true; b = scope->bindings; gcc_assert (b->decl == label); label_vars->shadowed = b->u.label; b->u.label = label_vars; } /* Hook called at end of compilation to assume 1 elt for a file-scope tentative array defn that wasn't complete before. */ void c_finish_incomplete_decl (tree decl) { if (VAR_P (decl)) { tree type = TREE_TYPE (decl); if (type != error_mark_node && TREE_CODE (type) == ARRAY_TYPE && !DECL_EXTERNAL (decl) && TYPE_DOMAIN (type) == NULL_TREE) { warning_at (DECL_SOURCE_LOCATION (decl), 0, "array %q+D assumed to have one element", decl); complete_array_type (&TREE_TYPE (decl), NULL_TREE, true); relayout_decl (decl); } } } /* Record that inline function FUNC contains a reference (location LOC) to static DECL (file-scope or function-local according to TYPE). */ void record_inline_static (location_t loc, tree func, tree decl, enum c_inline_static_type type) { c_inline_static *csi = ggc_alloc<c_inline_static> (); csi->location = loc; csi->function = func; csi->static_decl = decl; csi->type = type; csi->next = c_inline_statics; c_inline_statics = csi; } /* Check for references to static declarations in inline functions at the end of the translation unit and diagnose them if the functions are still inline definitions. */ static void check_inline_statics (void) { struct c_inline_static *csi; for (csi = c_inline_statics; csi; csi = csi->next) { if (DECL_EXTERNAL (csi->function)) switch (csi->type) { case csi_internal: pedwarn (csi->location, 0, "%qD is static but used in inline function %qD " "which is not static", csi->static_decl, csi->function); break; case csi_modifiable: pedwarn (csi->location, 0, "%q+D is static but declared in inline function %qD " "which is not static", csi->static_decl, csi->function); break; default: gcc_unreachable (); } } c_inline_statics = NULL; } /* Fill in a c_spot_bindings structure. If DEFINING is true, set it for the current state, otherwise set it to uninitialized. */ static void set_spot_bindings (struct c_spot_bindings *p, bool defining) { if (defining) { p->scope = current_scope; p->bindings_in_scope = current_scope->bindings; } else { p->scope = NULL; p->bindings_in_scope = NULL; } p->stmt_exprs = 0; p->left_stmt_expr = false; } /* Update spot bindings P as we pop out of SCOPE. Return true if we should push decls for a label. */ static bool update_spot_bindings (struct c_scope *scope, struct c_spot_bindings *p) { if (p->scope != scope) { /* This label or goto is defined in some other scope, or it is a label which is not yet defined. There is nothing to update. */ return false; } /* Adjust the spot bindings to refer to the bindings already defined in the enclosing scope. */ p->scope = scope->outer; p->bindings_in_scope = p->scope->bindings; return true; } /* The Objective-C front-end often needs to determine the current scope. */ void * objc_get_current_scope (void) { return current_scope; } /* The following function is used only by Objective-C. It needs to live here because it accesses the innards of c_scope. */ void objc_mark_locals_volatile (void *enclosing_blk) { struct c_scope *scope; struct c_binding *b; for (scope = current_scope; scope && scope != enclosing_blk; scope = scope->outer) { for (b = scope->bindings; b; b = b->prev) objc_volatilize_decl (b->decl); /* Do not climb up past the current function. */ if (scope->function_body) break; } } /* Return true if we are in the global binding level. */ bool global_bindings_p (void) { return current_scope == file_scope; } /* Return true if we're declaring parameters in an old-style function declaration. */ bool old_style_parameter_scope (void) { /* If processing parameters and there is no function statement list, we * have an old-style function declaration. */ return (current_scope->parm_flag && !DECL_SAVED_TREE (current_function_decl)); } void keep_next_level (void) { keep_next_level_flag = true; } /* Set the flag for the FLOAT_CONST_DECIMAL64 pragma being ON. */ void set_float_const_decimal64 (void) { current_scope->float_const_decimal64 = true; } /* Clear the flag for the FLOAT_CONST_DECIMAL64 pragma. */ void clear_float_const_decimal64 (void) { current_scope->float_const_decimal64 = false; } /* Return nonzero if an unsuffixed float constant is _Decimal64. */ bool float_const_decimal64_p (void) { return current_scope->float_const_decimal64; } /* Identify this scope as currently being filled with parameters. */ void declare_parm_level (void) { current_scope->parm_flag = true; } void push_scope (void) { if (next_is_function_body) { /* This is the transition from the parameters to the top level of the function body. These are the same scope (C99 6.2.1p4,6) so we do not push another scope structure. next_is_function_body is set only by store_parm_decls, which in turn is called when and only when we are about to encounter the opening curly brace for the function body. The outermost block of a function always gets a BLOCK node, because the debugging output routines expect that each function has at least one BLOCK. */ current_scope->parm_flag = false; current_scope->function_body = true; current_scope->keep = true; current_scope->outer_function = current_function_scope; current_function_scope = current_scope; keep_next_level_flag = false; next_is_function_body = false; /* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */ if (current_scope->outer) current_scope->float_const_decimal64 = current_scope->outer->float_const_decimal64; else current_scope->float_const_decimal64 = false; } else { struct c_scope *scope; if (scope_freelist) { scope = scope_freelist; scope_freelist = scope->outer; } else scope = ggc_cleared_alloc<c_scope> (); /* The FLOAT_CONST_DECIMAL64 pragma applies to nested scopes. */ if (current_scope) scope->float_const_decimal64 = current_scope->float_const_decimal64; else scope->float_const_decimal64 = false; scope->keep = keep_next_level_flag; scope->outer = current_scope; scope->depth = current_scope ? (current_scope->depth + 1) : 0; /* Check for scope depth overflow. Unlikely (2^28 == 268,435,456) but possible. */ if (current_scope && scope->depth == 0) { scope->depth--; sorry ("GCC supports only %u nested scopes", scope->depth); } current_scope = scope; keep_next_level_flag = false; } } /* This is called when we are leaving SCOPE. For each label defined in SCOPE, add any appropriate decls to its decls_in_scope fields. These are the decls whose initialization will be skipped by a goto later in the function. */ static void update_label_decls (struct c_scope *scope) { struct c_scope *s; s = scope; while (s != NULL) { if (s->has_label_bindings) { struct c_binding *b; for (b = s->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; struct c_binding *b1; bool hjud; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; b1 = label_vars->label_bindings.bindings_in_scope; if (label_vars->label_bindings.scope == NULL) hjud = false; else hjud = label_vars->label_bindings.scope->has_jump_unsafe_decl; if (update_spot_bindings (scope, &label_vars->label_bindings)) { /* This label is defined in this scope. */ if (hjud) { for (; b1 != NULL; b1 = b1->prev) { /* A goto from later in the function to this label will never see the initialization of B1, if any. Save it to issue a warning if needed. */ if (decl_jump_unsafe (b1->decl)) vec_safe_push(label_vars->decls_in_scope, b1->decl); } } } /* Update the bindings of any goto statements associated with this label. */ FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) update_spot_bindings (scope, &g->goto_bindings); } } /* Don't search beyond the current function. */ if (s == current_function_scope) break; s = s->outer; } } /* Set the TYPE_CONTEXT of all of TYPE's variants to CONTEXT. */ static void set_type_context (tree type, tree context) { for (type = TYPE_MAIN_VARIANT (type); type; type = TYPE_NEXT_VARIANT (type)) TYPE_CONTEXT (type) = context; } /* Exit a scope. Restore the state of the identifier-decl mappings that were in effect when this scope was entered. Return a BLOCK node containing all the DECLs in this scope that are of interest to debug info generation. */ tree pop_scope (void) { struct c_scope *scope = current_scope; tree block, context, p; struct c_binding *b; bool functionbody = scope->function_body; bool keep = functionbody || scope->keep || scope->bindings; update_label_decls (scope); /* If appropriate, create a BLOCK to record the decls for the life of this function. */ block = NULL_TREE; if (keep) { block = make_node (BLOCK); BLOCK_SUBBLOCKS (block) = scope->blocks; TREE_USED (block) = 1; /* In each subblock, record that this is its superior. */ for (p = scope->blocks; p; p = BLOCK_CHAIN (p)) BLOCK_SUPERCONTEXT (p) = block; BLOCK_VARS (block) = NULL_TREE; } /* The TYPE_CONTEXTs for all of the tagged types belonging to this scope must be set so that they point to the appropriate construct, i.e. either to the current FUNCTION_DECL node, or else to the BLOCK node we just constructed. Note that for tagged types whose scope is just the formal parameter list for some function type specification, we can't properly set their TYPE_CONTEXTs here, because we don't have a pointer to the appropriate FUNCTION_TYPE node readily available to us. For those cases, the TYPE_CONTEXTs of the relevant tagged type nodes get set in `grokdeclarator' as soon as we have created the FUNCTION_TYPE node which will represent the "scope" for these "parameter list local" tagged types. */ if (scope->function_body) context = current_function_decl; else if (scope == file_scope) { tree file_decl = build_translation_unit_decl (get_identifier (main_input_filename)); context = file_decl; debug_hooks->register_main_translation_unit (file_decl); } else context = block; /* Clear all bindings in this scope. */ for (b = scope->bindings; b; b = free_binding_and_advance (b)) { p = b->decl; switch (TREE_CODE (p)) { case LABEL_DECL: /* Warnings for unused labels, errors for undefined labels. */ if (TREE_USED (p) && !DECL_INITIAL (p)) { error ("label %q+D used but not defined", p); DECL_INITIAL (p) = error_mark_node; } else warn_for_unused_label (p); /* Labels go in BLOCK_VARS. */ DECL_CHAIN (p) = BLOCK_VARS (block); BLOCK_VARS (block) = p; gcc_assert (I_LABEL_BINDING (b->id) == b); I_LABEL_BINDING (b->id) = b->shadowed; /* Also pop back to the shadowed label_vars. */ release_tree_vector (b->u.label->decls_in_scope); b->u.label = b->u.label->shadowed; break; case ENUMERAL_TYPE: case UNION_TYPE: case RECORD_TYPE: set_type_context (p, context); /* Types may not have tag-names, in which case the type appears in the bindings list with b->id NULL. */ if (b->id) { gcc_assert (I_TAG_BINDING (b->id) == b); I_TAG_BINDING (b->id) = b->shadowed; } break; case FUNCTION_DECL: /* Propagate TREE_ADDRESSABLE from nested functions to their containing functions. */ if (!TREE_ASM_WRITTEN (p) && DECL_INITIAL (p) != NULL_TREE && TREE_ADDRESSABLE (p) && DECL_ABSTRACT_ORIGIN (p) != NULL_TREE && DECL_ABSTRACT_ORIGIN (p) != p) TREE_ADDRESSABLE (DECL_ABSTRACT_ORIGIN (p)) = 1; if (!TREE_PUBLIC (p) && !DECL_INITIAL (p) && !b->nested && scope != file_scope && scope != external_scope) { error ("nested function %q+D declared but never defined", p); undef_nested_function = true; } else if (DECL_DECLARED_INLINE_P (p) && TREE_PUBLIC (p) && !DECL_INITIAL (p)) { /* C99 6.7.4p6: "a function with external linkage... declared with an inline function specifier ... shall also be defined in the same translation unit." */ if (!flag_gnu89_inline && !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (p)) && scope == external_scope) pedwarn (input_location, 0, "inline function %q+D declared but never defined", p); DECL_EXTERNAL (p) = 1; } goto common_symbol; case VAR_DECL: /* Warnings for unused variables. */ if ((!TREE_USED (p) || !DECL_READ_P (p)) && !TREE_NO_WARNING (p) && !DECL_IN_SYSTEM_HEADER (p) && DECL_NAME (p) && !DECL_ARTIFICIAL (p) && scope != file_scope && scope != external_scope) { if (!TREE_USED (p)) warning (OPT_Wunused_variable, "unused variable %q+D", p); else if (DECL_CONTEXT (p) == current_function_decl) warning_at (DECL_SOURCE_LOCATION (p), OPT_Wunused_but_set_variable, "variable %qD set but not used", p); } if (b->inner_comp) { error ("type of array %q+D completed incompatibly with" " implicit initialization", p); } /* Fall through. */ case TYPE_DECL: case CONST_DECL: common_symbol: /* All of these go in BLOCK_VARS, but only if this is the binding in the home scope. */ if (!b->nested) { DECL_CHAIN (p) = BLOCK_VARS (block); BLOCK_VARS (block) = p; } else if (VAR_OR_FUNCTION_DECL_P (p) && scope != file_scope) { /* For block local externs add a special DECL_EXTERNAL decl for debug info generation. */ tree extp = copy_node (p); DECL_EXTERNAL (extp) = 1; TREE_STATIC (extp) = 0; TREE_PUBLIC (extp) = 1; DECL_INITIAL (extp) = NULL_TREE; DECL_LANG_SPECIFIC (extp) = NULL; DECL_CONTEXT (extp) = current_function_decl; if (TREE_CODE (p) == FUNCTION_DECL) { DECL_RESULT (extp) = NULL_TREE; DECL_SAVED_TREE (extp) = NULL_TREE; DECL_STRUCT_FUNCTION (extp) = NULL; } if (b->locus != UNKNOWN_LOCATION) DECL_SOURCE_LOCATION (extp) = b->locus; DECL_CHAIN (extp) = BLOCK_VARS (block); BLOCK_VARS (block) = extp; } /* If this is the file scope set DECL_CONTEXT of each decl to the TRANSLATION_UNIT_DECL. This makes same_translation_unit_p work. */ if (scope == file_scope) { DECL_CONTEXT (p) = context; if (TREE_CODE (p) == TYPE_DECL && TREE_TYPE (p) != error_mark_node) set_type_context (TREE_TYPE (p), context); } gcc_fallthrough (); /* Parameters go in DECL_ARGUMENTS, not BLOCK_VARS, and have already been put there by store_parm_decls. Unused- parameter warnings are handled by function.c. error_mark_node obviously does not go in BLOCK_VARS and does not get unused-variable warnings. */ case PARM_DECL: case ERROR_MARK: /* It is possible for a decl not to have a name. We get here with b->id NULL in this case. */ if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; if (b->shadowed && b->shadowed->u.type) TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type; } break; default: gcc_unreachable (); } } /* Dispose of the block that we just made inside some higher level. */ if ((scope->function_body || scope == file_scope) && context) { DECL_INITIAL (context) = block; BLOCK_SUPERCONTEXT (block) = context; } else if (scope->outer) { if (block) SCOPE_LIST_APPEND (scope->outer, blocks, block); /* If we did not make a block for the scope just exited, any blocks made for inner scopes must be carried forward so they will later become subblocks of something else. */ else if (scope->blocks) SCOPE_LIST_CONCAT (scope->outer, blocks, scope, blocks); } /* Pop the current scope, and free the structure for reuse. */ current_scope = scope->outer; if (scope->function_body) current_function_scope = scope->outer_function; memset (scope, 0, sizeof (struct c_scope)); scope->outer = scope_freelist; scope_freelist = scope; return block; } void push_file_scope (void) { tree decl; if (file_scope) return; push_scope (); file_scope = current_scope; start_fname_decls (); for (decl = visible_builtins; decl; decl = DECL_CHAIN (decl)) bind (DECL_NAME (decl), decl, file_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); } void pop_file_scope (void) { /* In case there were missing closebraces, get us back to the global binding level. */ while (current_scope != file_scope) pop_scope (); /* __FUNCTION__ is defined at file scope (""). This call may not be necessary as my tests indicate it still works without it. */ finish_fname_decls (); check_inline_statics (); /* This is the point to write out a PCH if we're doing that. In that case we do not want to do anything else. */ if (pch_file) { c_common_write_pch (); /* Ensure even the callers don't try to finalize the CU. */ flag_syntax_only = 1; return; } /* Pop off the file scope and close this translation unit. */ pop_scope (); file_scope = 0; maybe_apply_pending_pragma_weaks (); } /* Adjust the bindings for the start of a statement expression. */ void c_bindings_start_stmt_expr (struct c_spot_bindings* switch_bindings) { struct c_scope *scope; for (scope = current_scope; scope != NULL; scope = scope->outer) { struct c_binding *b; if (!scope->has_label_bindings) continue; for (b = scope->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; ++label_vars->label_bindings.stmt_exprs; FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) ++g->goto_bindings.stmt_exprs; } } if (switch_bindings != NULL) ++switch_bindings->stmt_exprs; } /* Adjust the bindings for the end of a statement expression. */ void c_bindings_end_stmt_expr (struct c_spot_bindings *switch_bindings) { struct c_scope *scope; for (scope = current_scope; scope != NULL; scope = scope->outer) { struct c_binding *b; if (!scope->has_label_bindings) continue; for (b = scope->bindings; b != NULL; b = b->prev) { struct c_label_vars *label_vars; unsigned int ix; struct c_goto_bindings *g; if (TREE_CODE (b->decl) != LABEL_DECL) continue; label_vars = b->u.label; --label_vars->label_bindings.stmt_exprs; if (label_vars->label_bindings.stmt_exprs < 0) { label_vars->label_bindings.left_stmt_expr = true; label_vars->label_bindings.stmt_exprs = 0; } FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) { --g->goto_bindings.stmt_exprs; if (g->goto_bindings.stmt_exprs < 0) { g->goto_bindings.left_stmt_expr = true; g->goto_bindings.stmt_exprs = 0; } } } } if (switch_bindings != NULL) { --switch_bindings->stmt_exprs; gcc_assert (switch_bindings->stmt_exprs >= 0); } } /* Push a definition or a declaration of struct, union or enum tag "name". "type" should be the type node. We assume that the tag "name" is not already defined, and has a location of LOC. Note that the definition may really be just a forward reference. In that case, the TYPE_SIZE will be zero. */ static void pushtag (location_t loc, tree name, tree type) { /* Record the identifier as the type's name if it has none. */ if (name && !TYPE_NAME (type)) TYPE_NAME (type) = name; bind (name, type, current_scope, /*invisible=*/false, /*nested=*/false, loc); /* Create a fake NULL-named TYPE_DECL node whose TREE_TYPE will be the tagged type we just added to the current scope. This fake NULL-named TYPE_DECL node helps dwarfout.c to know when it needs to output a representation of a tagged type, and it also gives us a convenient place to record the "scope start" address for the tagged type. */ TYPE_STUB_DECL (type) = pushdecl (build_decl (loc, TYPE_DECL, NULL_TREE, type)); /* An approximation for now, so we can tell this is a function-scope tag. This will be updated in pop_scope. */ TYPE_CONTEXT (type) = DECL_CONTEXT (TYPE_STUB_DECL (type)); if (warn_cxx_compat && name != NULL_TREE) { struct c_binding *b = I_SYMBOL_BINDING (name); if (b != NULL && b->decl != NULL_TREE && TREE_CODE (b->decl) == TYPE_DECL && (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) && (TYPE_MAIN_VARIANT (TREE_TYPE (b->decl)) != TYPE_MAIN_VARIANT (type))) { auto_diagnostic_group d; if (warning_at (loc, OPT_Wc___compat, ("using %qD as both a typedef and a tag is " "invalid in C++"), b->decl) && b->locus != UNKNOWN_LOCATION) inform (b->locus, "originally defined here"); } } } /* An exported interface to pushtag. This is used by the gdb plugin's binding oracle to introduce a new tag binding. */ void c_pushtag (location_t loc, tree name, tree type) { pushtag (loc, name, type); } /* An exported interface to bind a declaration. LOC is the location to use. DECL is the declaration to bind. The decl's name is used to determine how it is bound. If DECL is a VAR_DECL, then IS_GLOBAL determines whether the decl is put into the global (file and external) scope or the current function's scope; if DECL is not a VAR_DECL then it is always put into the file scope. */ void c_bind (location_t loc, tree decl, bool is_global) { struct c_scope *scope; bool nested = false; if (!VAR_P (decl) || current_function_scope == NULL) { /* Types and functions are always considered to be global. */ scope = file_scope; DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; } else if (is_global) { /* Also bind it into the external scope. */ bind (DECL_NAME (decl), decl, external_scope, true, false, loc); nested = true; scope = file_scope; DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; } else { DECL_CONTEXT (decl) = current_function_decl; TREE_PUBLIC (decl) = 0; scope = current_function_scope; } bind (DECL_NAME (decl), decl, scope, false, nested, loc); } /* Stores the first FILE*, const struct tm* etc. argument type (whatever it is) seen in a declaration of a file I/O etc. built-in, corresponding to the builtin_structptr_types array. Subsequent declarations of such built-ins are expected to refer to it rather than to fileptr_type_node, etc. which is just void* (or to any other type). Used only by match_builtin_function_types. */ static const unsigned builtin_structptr_type_count = sizeof builtin_structptr_types / sizeof builtin_structptr_types[0]; static GTY(()) tree last_structptr_types[builtin_structptr_type_count]; /* Returns true if types T1 and T2 representing return types or types of function arguments are close enough to be considered interchangeable in redeclarations of built-in functions. */ static bool types_close_enough_to_match (tree t1, tree t2) { return (TYPE_MODE (t1) == TYPE_MODE (t2) && POINTER_TYPE_P (t1) == POINTER_TYPE_P (t2) && FUNCTION_POINTER_TYPE_P (t1) == FUNCTION_POINTER_TYPE_P (t2)); } /* Subroutine of compare_decls. Allow harmless mismatches in return and argument types provided that the type modes match. Set *STRICT and *ARGNO to the expected argument type and number in case of an argument type mismatch or null and zero otherwise. Return a unified type given a suitable match, and 0 otherwise. */ static tree match_builtin_function_types (tree newtype, tree oldtype, tree *strict, unsigned *argno) { *argno = 0; *strict = NULL_TREE; /* Accept the return type of the new declaration if it has the same mode and if they're both pointers or if neither is. */ tree oldrettype = TREE_TYPE (oldtype); tree newrettype = TREE_TYPE (newtype); if (!types_close_enough_to_match (oldrettype, newrettype)) return NULL_TREE; /* Check that the return types are compatible but don't fail if they are not (e.g., int vs long in ILP32) and just let the caller know. */ if (!comptypes (TYPE_MAIN_VARIANT (oldrettype), TYPE_MAIN_VARIANT (newrettype))) *strict = oldrettype; tree oldargs = TYPE_ARG_TYPES (oldtype); tree newargs = TYPE_ARG_TYPES (newtype); tree tryargs = newargs; const unsigned nlst = sizeof last_structptr_types / sizeof last_structptr_types[0]; const unsigned nbst = sizeof builtin_structptr_types / sizeof builtin_structptr_types[0]; gcc_checking_assert (nlst == nbst); for (unsigned i = 1; oldargs || newargs; ++i) { if (!oldargs || !newargs || !TREE_VALUE (oldargs) || !TREE_VALUE (newargs)) return NULL_TREE; tree oldtype = TYPE_MAIN_VARIANT (TREE_VALUE (oldargs)); tree newtype = TYPE_MAIN_VARIANT (TREE_VALUE (newargs)); if (!types_close_enough_to_match (oldtype, newtype)) return NULL_TREE; unsigned j = nbst; if (POINTER_TYPE_P (oldtype)) /* Iterate over well-known struct types like FILE (whose types aren't known to us) and compare the pointer to each to the pointer argument. */ for (j = 0; j < nbst; ++j) { if (TREE_VALUE (oldargs) != builtin_structptr_types[j].node) continue; /* Store the first FILE* etc. argument type (whatever it is), and expect any subsequent declarations of file I/O etc. built-ins to refer to it rather than to fileptr_type_node etc. which is just void* (or const void*). */ if (last_structptr_types[j]) { if (!comptypes (last_structptr_types[j], newtype)) { *argno = i; *strict = last_structptr_types[j]; } } else last_structptr_types[j] = newtype; break; } if (j == nbst && !comptypes (oldtype, newtype)) { if (POINTER_TYPE_P (oldtype)) { /* For incompatible pointers, only reject differences in the unqualified variants of the referenced types but consider differences in qualifiers as benign (report those to caller via *STRICT below). */ tree oldref = TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)); tree newref = TYPE_MAIN_VARIANT (TREE_TYPE (newtype)); if (!comptypes (oldref, newref)) return NULL_TREE; } if (!*strict) { *argno = i; *strict = oldtype; } } oldargs = TREE_CHAIN (oldargs); newargs = TREE_CHAIN (newargs); } tree trytype = build_function_type (newrettype, tryargs); /* Allow declaration to change transaction_safe attribute. */ tree oldattrs = TYPE_ATTRIBUTES (oldtype); tree oldtsafe = lookup_attribute ("transaction_safe", oldattrs); tree newattrs = TYPE_ATTRIBUTES (newtype); tree newtsafe = lookup_attribute ("transaction_safe", newattrs); if (oldtsafe && !newtsafe) oldattrs = remove_attribute ("transaction_safe", oldattrs); else if (newtsafe && !oldtsafe) oldattrs = tree_cons (get_identifier ("transaction_safe"), NULL_TREE, oldattrs); return build_type_attribute_variant (trytype, oldattrs); } /* Subroutine of diagnose_mismatched_decls. Check for function type mismatch involving an empty arglist vs a nonempty one and give clearer diagnostics. */ static void diagnose_arglist_conflict (tree newdecl, tree olddecl, tree newtype, tree oldtype) { tree t; if (TREE_CODE (olddecl) != FUNCTION_DECL || !comptypes (TREE_TYPE (oldtype), TREE_TYPE (newtype)) || !((!prototype_p (oldtype) && DECL_INITIAL (olddecl) == NULL_TREE) || (!prototype_p (newtype) && DECL_INITIAL (newdecl) == NULL_TREE))) return; t = TYPE_ARG_TYPES (oldtype); if (t == NULL_TREE) t = TYPE_ARG_TYPES (newtype); for (; t; t = TREE_CHAIN (t)) { tree type = TREE_VALUE (t); if (TREE_CHAIN (t) == NULL_TREE && TYPE_MAIN_VARIANT (type) != void_type_node) { inform (input_location, "a parameter list with an ellipsis " "cannot match an empty parameter name list declaration"); break; } if (c_type_promotes_to (type) != type) { inform (input_location, "an argument type that has a default " "promotion cannot match an empty parameter name list " "declaration"); break; } } } /* Another subroutine of diagnose_mismatched_decls. OLDDECL is an old-style function definition, NEWDECL is a prototype declaration. Diagnose inconsistencies in the argument list. Returns TRUE if the prototype is compatible, FALSE if not. */ static bool validate_proto_after_old_defn (tree newdecl, tree newtype, tree oldtype) { tree newargs, oldargs; int i; #define END_OF_ARGLIST(t) ((t) == void_type_node) oldargs = TYPE_ACTUAL_ARG_TYPES (oldtype); newargs = TYPE_ARG_TYPES (newtype); i = 1; for (;;) { tree oldargtype = TREE_VALUE (oldargs); tree newargtype = TREE_VALUE (newargs); if (oldargtype == error_mark_node || newargtype == error_mark_node) return false; oldargtype = (TYPE_ATOMIC (oldargtype) ? c_build_qualified_type (TYPE_MAIN_VARIANT (oldargtype), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (oldargtype)); newargtype = (TYPE_ATOMIC (newargtype) ? c_build_qualified_type (TYPE_MAIN_VARIANT (newargtype), TYPE_QUAL_ATOMIC) : TYPE_MAIN_VARIANT (newargtype)); if (END_OF_ARGLIST (oldargtype) && END_OF_ARGLIST (newargtype)) break; /* Reaching the end of just one list means the two decls don't agree on the number of arguments. */ if (END_OF_ARGLIST (oldargtype)) { error ("prototype for %q+D declares more arguments " "than previous old-style definition", newdecl); return false; } else if (END_OF_ARGLIST (newargtype)) { error ("prototype for %q+D declares fewer arguments " "than previous old-style definition", newdecl); return false; } /* Type for passing arg must be consistent with that declared for the arg. */ else if (!comptypes (oldargtype, newargtype)) { error ("prototype for %q+D declares argument %d" " with incompatible type", newdecl, i); return false; } oldargs = TREE_CHAIN (oldargs); newargs = TREE_CHAIN (newargs); i++; } /* If we get here, no errors were found, but do issue a warning for this poor-style construct. */ warning (0, "prototype for %q+D follows non-prototype definition", newdecl); return true; #undef END_OF_ARGLIST } /* Subroutine of diagnose_mismatched_decls. Report the location of DECL, first in a pair of mismatched declarations, using the diagnostic function DIAG. */ static void locate_old_decl (tree decl) { if (TREE_CODE (decl) == FUNCTION_DECL && fndecl_built_in_p (decl) && !C_DECL_DECLARED_BUILTIN (decl)) ; else if (DECL_INITIAL (decl)) inform (input_location, "previous definition of %q+D was here", decl); else if (C_DECL_IMPLICIT (decl)) inform (input_location, "previous implicit declaration of %q+D was here", decl); else inform (input_location, "previous declaration of %q+D was here", decl); } /* Subroutine of duplicate_decls. Compare NEWDECL to OLDDECL. Returns true if the caller should proceed to merge the two, false if OLDDECL should simply be discarded. As a side effect, issues all necessary diagnostics for invalid or poor-style combinations. If it returns true, writes the types of NEWDECL and OLDDECL to *NEWTYPEP and *OLDTYPEP - these may have been adjusted from TREE_TYPE (NEWDECL, OLDDECL) respectively. */ static bool diagnose_mismatched_decls (tree newdecl, tree olddecl, tree *newtypep, tree *oldtypep) { tree newtype, oldtype; bool retval = true; #define DECL_EXTERN_INLINE(DECL) (DECL_DECLARED_INLINE_P (DECL) \ && DECL_EXTERNAL (DECL)) /* If we have error_mark_node for either decl or type, just discard the previous decl - we're in an error cascade already. */ if (olddecl == error_mark_node || newdecl == error_mark_node) return false; *oldtypep = oldtype = TREE_TYPE (olddecl); *newtypep = newtype = TREE_TYPE (newdecl); if (oldtype == error_mark_node || newtype == error_mark_node) return false; /* Two different categories of symbol altogether. This is an error unless OLDDECL is a builtin. OLDDECL will be discarded in any case. */ if (TREE_CODE (olddecl) != TREE_CODE (newdecl)) { if (!(TREE_CODE (olddecl) == FUNCTION_DECL && fndecl_built_in_p (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl))) { auto_diagnostic_group d; error ("%q+D redeclared as different kind of symbol", newdecl); locate_old_decl (olddecl); } else if (TREE_PUBLIC (newdecl)) warning (OPT_Wbuiltin_declaration_mismatch, "built-in function %q+D declared as non-function", newdecl); else warning (OPT_Wshadow, "declaration of %q+D shadows " "a built-in function", newdecl); return false; } /* Enumerators have no linkage, so may only be declared once in a given scope. */ if (TREE_CODE (olddecl) == CONST_DECL) { auto_diagnostic_group d; error ("redeclaration of enumerator %q+D", newdecl); locate_old_decl (olddecl); return false; } bool pedwarned = false; bool warned = false; auto_diagnostic_group d; if (!comptypes (oldtype, newtype)) { if (TREE_CODE (olddecl) == FUNCTION_DECL && fndecl_built_in_p (olddecl, BUILT_IN_NORMAL) && !C_DECL_DECLARED_BUILTIN (olddecl)) { /* Accept "harmless" mismatches in function types such as missing qualifiers or int vs long when they're the same size. However, diagnose return and argument types that are incompatible according to language rules. */ tree mismatch_expect; unsigned mismatch_argno; tree trytype = match_builtin_function_types (newtype, oldtype, &mismatch_expect, &mismatch_argno); if (trytype && comptypes (newtype, trytype)) *oldtypep = oldtype = trytype; else { /* If types don't match for a built-in, throw away the built-in. No point in calling locate_old_decl here, it won't print anything. */ const char *header = header_for_builtin_fn (olddecl); location_t loc = DECL_SOURCE_LOCATION (newdecl); if (warning_at (loc, OPT_Wbuiltin_declaration_mismatch, "conflicting types for built-in function %q+D; " "expected %qT", newdecl, oldtype) && header) { /* Suggest the right header to include as the preferred solution rather than the spelling of the declaration. */ rich_location richloc (line_table, loc); maybe_add_include_fixit (&richloc, header, true); inform (&richloc, "%qD is declared in header %qs", olddecl, header); } return false; } if (mismatch_expect && extra_warnings) { location_t newloc = DECL_SOURCE_LOCATION (newdecl); bool warned = false; if (mismatch_argno) warned = warning_at (newloc, OPT_Wbuiltin_declaration_mismatch, "mismatch in argument %u type of built-in " "function %qD; expected %qT", mismatch_argno, newdecl, mismatch_expect); else warned = warning_at (newloc, OPT_Wbuiltin_declaration_mismatch, "mismatch in return type of built-in " "function %qD; expected %qT", newdecl, mismatch_expect); const char *header = header_for_builtin_fn (olddecl); if (warned && header) { rich_location richloc (line_table, newloc); maybe_add_include_fixit (&richloc, header, true); inform (&richloc, "%qD is declared in header %qs", olddecl, header); } } } else if (TREE_CODE (olddecl) == FUNCTION_DECL && DECL_IS_BUILTIN (olddecl)) { /* A conflicting function declaration for a predeclared function that isn't actually built in. Objective C uses these. The new declaration silently overrides everything but the volatility (i.e. noreturn) indication. See also below. FIXME: Make Objective C use normal builtins. */ TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl); return false; } /* Permit void foo (...) to match int foo (...) if the latter is the definition and implicit int was used. See c-torture/compile/920625-2.c. */ else if (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) && TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == void_type_node && TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == integer_type_node && C_FUNCTION_IMPLICIT_INT (newdecl) && !DECL_INITIAL (olddecl)) { pedwarned = pedwarn (input_location, 0, "conflicting types for %q+D", newdecl); /* Make sure we keep void as the return type. */ TREE_TYPE (newdecl) = *newtypep = newtype = oldtype; C_FUNCTION_IMPLICIT_INT (newdecl) = 0; } /* Permit void foo (...) to match an earlier call to foo (...) with no declared type (thus, implicitly int). */ else if (TREE_CODE (newdecl) == FUNCTION_DECL && TYPE_MAIN_VARIANT (TREE_TYPE (newtype)) == void_type_node && TYPE_MAIN_VARIANT (TREE_TYPE (oldtype)) == integer_type_node && C_DECL_IMPLICIT (olddecl) && !DECL_INITIAL (olddecl)) { pedwarned = pedwarn (input_location, 0, "conflicting types for %q+D", newdecl); /* Make sure we keep void as the return type. */ TREE_TYPE (olddecl) = *oldtypep = oldtype = newtype; } else { int new_quals = TYPE_QUALS (newtype); int old_quals = TYPE_QUALS (oldtype); if (new_quals != old_quals) { addr_space_t new_addr = DECODE_QUAL_ADDR_SPACE (new_quals); addr_space_t old_addr = DECODE_QUAL_ADDR_SPACE (old_quals); if (new_addr != old_addr) { if (ADDR_SPACE_GENERIC_P (new_addr)) error ("conflicting named address spaces (generic vs %s) " "for %q+D", c_addr_space_name (old_addr), newdecl); else if (ADDR_SPACE_GENERIC_P (old_addr)) error ("conflicting named address spaces (%s vs generic) " "for %q+D", c_addr_space_name (new_addr), newdecl); else error ("conflicting named address spaces (%s vs %s) " "for %q+D", c_addr_space_name (new_addr), c_addr_space_name (old_addr), newdecl); } if (CLEAR_QUAL_ADDR_SPACE (new_quals) != CLEAR_QUAL_ADDR_SPACE (old_quals)) error ("conflicting type qualifiers for %q+D", newdecl); } else error ("conflicting types for %q+D", newdecl); diagnose_arglist_conflict (newdecl, olddecl, newtype, oldtype); locate_old_decl (olddecl); return false; } } /* Redeclaration of a type is a constraint violation (6.7.2.3p1), but silently ignore the redeclaration if either is in a system header. (Conflicting redeclarations were handled above.) This is allowed for C11 if the types are the same, not just compatible. */ if (TREE_CODE (newdecl) == TYPE_DECL) { bool types_different = false; int comptypes_result; comptypes_result = comptypes_check_different_types (oldtype, newtype, &types_different); if (comptypes_result != 1 || types_different) { error ("redefinition of typedef %q+D with different type", newdecl); locate_old_decl (olddecl); return false; } if (DECL_IN_SYSTEM_HEADER (newdecl) || DECL_IN_SYSTEM_HEADER (olddecl) || TREE_NO_WARNING (newdecl) || TREE_NO_WARNING (olddecl)) return true; /* Allow OLDDECL to continue in use. */ if (variably_modified_type_p (newtype, NULL)) { error ("redefinition of typedef %q+D with variably modified type", newdecl); locate_old_decl (olddecl); } else if (pedwarn_c99 (input_location, OPT_Wpedantic, "redefinition of typedef %q+D", newdecl)) locate_old_decl (olddecl); return true; } /* Function declarations can either be 'static' or 'extern' (no qualifier is equivalent to 'extern' - C99 6.2.2p5) and therefore can never conflict with each other on account of linkage (6.2.2p4). Multiple definitions are not allowed (6.9p3,5) but gnu89 mode permits two definitions if one is 'extern inline' and one is not. The non- extern-inline definition supersedes the extern-inline definition. */ else if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* If you declare a built-in function name as static, or define the built-in with an old-style definition (so we can't validate the argument list) the built-in definition is overridden, but optionally warn this was a bad choice of name. */ if (fndecl_built_in_p (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl)) { if (!TREE_PUBLIC (newdecl) || (DECL_INITIAL (newdecl) && !prototype_p (TREE_TYPE (newdecl)))) { warning_at (DECL_SOURCE_LOCATION (newdecl), OPT_Wshadow, "declaration of %qD shadows " "a built-in function", newdecl); /* Discard the old built-in function. */ return false; } if (!prototype_p (TREE_TYPE (newdecl))) { /* Set for built-ins that take no arguments. */ bool func_void_args = false; if (tree at = TYPE_ARG_TYPES (oldtype)) func_void_args = VOID_TYPE_P (TREE_VALUE (at)); if (extra_warnings && !func_void_args) warning_at (DECL_SOURCE_LOCATION (newdecl), OPT_Wbuiltin_declaration_mismatch, "declaration of built-in function %qD without " "a prototype; expected %qT", newdecl, TREE_TYPE (olddecl)); } } if (DECL_INITIAL (newdecl)) { if (DECL_INITIAL (olddecl)) { /* If both decls are in the same TU and the new declaration isn't overriding an extern inline reject the new decl. In c99, no overriding is allowed in the same translation unit. */ if ((!DECL_EXTERN_INLINE (olddecl) || DECL_EXTERN_INLINE (newdecl) || (!flag_gnu89_inline && (!DECL_DECLARED_INLINE_P (olddecl) || !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (olddecl))) && (!DECL_DECLARED_INLINE_P (newdecl) || !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)))) ) && same_translation_unit_p (newdecl, olddecl)) { auto_diagnostic_group d; error ("redefinition of %q+D", newdecl); locate_old_decl (olddecl); return false; } } } /* If we have a prototype after an old-style function definition, the argument types must be checked specially. */ else if (DECL_INITIAL (olddecl) && !prototype_p (oldtype) && prototype_p (newtype) && TYPE_ACTUAL_ARG_TYPES (oldtype)) { auto_diagnostic_group d; if (!validate_proto_after_old_defn (newdecl, newtype, oldtype)) { locate_old_decl (olddecl); return false; } } /* A non-static declaration (even an "extern") followed by a static declaration is undefined behavior per C99 6.2.2p3-5,7. The same is true for a static forward declaration at block scope followed by a non-static declaration/definition at file scope. Static followed by non-static at the same scope is not undefined behavior, and is the most convenient way to get some effects (see e.g. what unwind-dw2-fde-glibc.c does to the definition of _Unwind_Find_FDE in unwind-dw2-fde.c), but we do diagnose it if -Wtraditional. */ if (TREE_PUBLIC (olddecl) && !TREE_PUBLIC (newdecl)) { /* Two exceptions to the rule. If olddecl is an extern inline, or a predeclared function that isn't actually built in, newdecl silently overrides olddecl. The latter occur only in Objective C; see also above. (FIXME: Make Objective C use normal builtins.) */ if (!DECL_IS_BUILTIN (olddecl) && !DECL_EXTERN_INLINE (olddecl)) { auto_diagnostic_group d; error ("static declaration of %q+D follows " "non-static declaration", newdecl); locate_old_decl (olddecl); } return false; } else if (TREE_PUBLIC (newdecl) && !TREE_PUBLIC (olddecl)) { if (DECL_CONTEXT (olddecl)) { auto_diagnostic_group d; error ("non-static declaration of %q+D follows " "static declaration", newdecl); locate_old_decl (olddecl); return false; } else if (warn_traditional) { warned |= warning (OPT_Wtraditional, "non-static declaration of %q+D " "follows static declaration", newdecl); } } /* Make sure gnu_inline attribute is either not present, or present on all inline decls. */ if (DECL_DECLARED_INLINE_P (olddecl) && DECL_DECLARED_INLINE_P (newdecl)) { bool newa = lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)) != NULL; bool olda = lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (olddecl)) != NULL; if (newa != olda) { auto_diagnostic_group d; error_at (input_location, "%<gnu_inline%> attribute present on %q+D", newa ? newdecl : olddecl); error_at (DECL_SOURCE_LOCATION (newa ? olddecl : newdecl), "but not here"); } } } else if (VAR_P (newdecl)) { /* Only variables can be thread-local, and all declarations must agree on this property. */ if (C_DECL_THREADPRIVATE_P (olddecl) && !DECL_THREAD_LOCAL_P (newdecl)) { /* Nothing to check. Since OLDDECL is marked threadprivate and NEWDECL does not have a thread-local attribute, we will merge the threadprivate attribute into NEWDECL. */ ; } else if (DECL_THREAD_LOCAL_P (newdecl) != DECL_THREAD_LOCAL_P (olddecl)) { auto_diagnostic_group d; if (DECL_THREAD_LOCAL_P (newdecl)) error ("thread-local declaration of %q+D follows " "non-thread-local declaration", newdecl); else error ("non-thread-local declaration of %q+D follows " "thread-local declaration", newdecl); locate_old_decl (olddecl); return false; } /* Multiple initialized definitions are not allowed (6.9p3,5). */ if (DECL_INITIAL (newdecl) && DECL_INITIAL (olddecl)) { auto_diagnostic_group d; error ("redefinition of %q+D", newdecl); locate_old_decl (olddecl); return false; } /* Objects declared at file scope: if the first declaration had external linkage (even if it was an external reference) the second must have external linkage as well, or the behavior is undefined. If the first declaration had internal linkage, then the second must too, or else be an external reference (in which case the composite declaration still has internal linkage). As for function declarations, we warn about the static-then- extern case only for -Wtraditional. See generally 6.2.2p3-5,7. */ if (DECL_FILE_SCOPE_P (newdecl) && TREE_PUBLIC (newdecl) != TREE_PUBLIC (olddecl)) { if (DECL_EXTERNAL (newdecl)) { if (!DECL_FILE_SCOPE_P (olddecl)) { auto_diagnostic_group d; error ("extern declaration of %q+D follows " "declaration with no linkage", newdecl); locate_old_decl (olddecl); return false; } else if (warn_traditional) { warned |= warning (OPT_Wtraditional, "non-static declaration of %q+D " "follows static declaration", newdecl); } } else { auto_diagnostic_group d; if (TREE_PUBLIC (newdecl)) error ("non-static declaration of %q+D follows " "static declaration", newdecl); else error ("static declaration of %q+D follows " "non-static declaration", newdecl); locate_old_decl (olddecl); return false; } } /* Two objects with the same name declared at the same block scope must both be external references (6.7p3). */ else if (!DECL_FILE_SCOPE_P (newdecl)) { if (DECL_EXTERNAL (newdecl)) { /* Extern with initializer at block scope, which will already have received an error. */ } else if (DECL_EXTERNAL (olddecl)) { auto_diagnostic_group d; error ("declaration of %q+D with no linkage follows " "extern declaration", newdecl); locate_old_decl (olddecl); } else { auto_diagnostic_group d; error ("redeclaration of %q+D with no linkage", newdecl); locate_old_decl (olddecl); } return false; } /* C++ does not permit a decl to appear multiple times at file scope. */ if (warn_cxx_compat && DECL_FILE_SCOPE_P (newdecl) && !DECL_EXTERNAL (newdecl) && !DECL_EXTERNAL (olddecl)) warned |= warning_at (DECL_SOURCE_LOCATION (newdecl), OPT_Wc___compat, ("duplicate declaration of %qD is " "invalid in C++"), newdecl); } /* warnings */ /* All decls must agree on a visibility. */ if (CODE_CONTAINS_STRUCT (TREE_CODE (newdecl), TS_DECL_WITH_VIS) && DECL_VISIBILITY_SPECIFIED (newdecl) && DECL_VISIBILITY_SPECIFIED (olddecl) && DECL_VISIBILITY (newdecl) != DECL_VISIBILITY (olddecl)) { warned |= warning (0, "redeclaration of %q+D with different visibility " "(old visibility preserved)", newdecl); } if (TREE_CODE (newdecl) == FUNCTION_DECL) warned |= diagnose_mismatched_attributes (olddecl, newdecl); else /* PARM_DECL, VAR_DECL */ { /* Redeclaration of a parameter is a constraint violation (this is not explicitly stated, but follows from C99 6.7p3 [no more than one declaration of the same identifier with no linkage in the same scope, except type tags] and 6.2.2p6 [parameters have no linkage]). We must check for a forward parameter declaration, indicated by TREE_ASM_WRITTEN on the old declaration - this is an extension, the mandatory diagnostic for which is handled by mark_forward_parm_decls. */ if (TREE_CODE (newdecl) == PARM_DECL && (!TREE_ASM_WRITTEN (olddecl) || TREE_ASM_WRITTEN (newdecl))) { auto_diagnostic_group d; error ("redefinition of parameter %q+D", newdecl); locate_old_decl (olddecl); return false; } } /* Optional warning for completely redundant decls. */ if (!warned && !pedwarned && warn_redundant_decls /* Don't warn about a function declaration followed by a definition. */ && !(TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl)) /* Don't warn about redundant redeclarations of builtins. */ && !(TREE_CODE (newdecl) == FUNCTION_DECL && !fndecl_built_in_p (newdecl) && fndecl_built_in_p (olddecl) && !C_DECL_DECLARED_BUILTIN (olddecl)) /* Don't warn about an extern followed by a definition. */ && !(DECL_EXTERNAL (olddecl) && !DECL_EXTERNAL (newdecl)) /* Don't warn about forward parameter decls. */ && !(TREE_CODE (newdecl) == PARM_DECL && TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl)) /* Don't warn about a variable definition following a declaration. */ && !(VAR_P (newdecl) && DECL_INITIAL (newdecl) && !DECL_INITIAL (olddecl))) { warned = warning (OPT_Wredundant_decls, "redundant redeclaration of %q+D", newdecl); } /* Report location of previous decl/defn. */ if (warned || pedwarned) locate_old_decl (olddecl); #undef DECL_EXTERN_INLINE return retval; } /* Subroutine of duplicate_decls. NEWDECL has been found to be consistent with OLDDECL, but carries new information. Merge the new information into OLDDECL. This function issues no diagnostics. */ static void merge_decls (tree newdecl, tree olddecl, tree newtype, tree oldtype) { bool new_is_definition = (TREE_CODE (newdecl) == FUNCTION_DECL && DECL_INITIAL (newdecl) != NULL_TREE); bool new_is_prototype = (TREE_CODE (newdecl) == FUNCTION_DECL && prototype_p (TREE_TYPE (newdecl))); bool old_is_prototype = (TREE_CODE (olddecl) == FUNCTION_DECL && prototype_p (TREE_TYPE (olddecl))); /* For real parm decl following a forward decl, rechain the old decl in its new location and clear TREE_ASM_WRITTEN (it's not a forward decl anymore). */ if (TREE_CODE (newdecl) == PARM_DECL && TREE_ASM_WRITTEN (olddecl) && !TREE_ASM_WRITTEN (newdecl)) { struct c_binding *b, **here; for (here = &current_scope->bindings; *here; here = &(*here)->prev) if ((*here)->decl == olddecl) goto found; gcc_unreachable (); found: b = *here; *here = b->prev; b->prev = current_scope->bindings; current_scope->bindings = b; TREE_ASM_WRITTEN (olddecl) = 0; } DECL_ATTRIBUTES (newdecl) = targetm.merge_decl_attributes (olddecl, newdecl); /* For typedefs use the old type, as the new type's DECL_NAME points at newdecl, which will be ggc_freed. */ if (TREE_CODE (newdecl) == TYPE_DECL) { /* But NEWTYPE might have an attribute, honor that. */ tree tem = newtype; newtype = oldtype; if (TYPE_USER_ALIGN (tem)) { if (TYPE_ALIGN (tem) > TYPE_ALIGN (newtype)) SET_TYPE_ALIGN (newtype, TYPE_ALIGN (tem)); TYPE_USER_ALIGN (newtype) = true; } /* And remove the new type from the variants list. */ if (TYPE_NAME (TREE_TYPE (newdecl)) == newdecl) { tree remove = TREE_TYPE (newdecl); if (TYPE_MAIN_VARIANT (remove) == remove) { gcc_assert (TYPE_NEXT_VARIANT (remove) == NULL_TREE); /* If remove is the main variant, no need to remove that from the list. One of the DECL_ORIGINAL_TYPE variants, e.g. created for aligned attribute, might still refer to the newdecl TYPE_DECL though, so remove that one in that case. */ if (DECL_ORIGINAL_TYPE (newdecl) && DECL_ORIGINAL_TYPE (newdecl) != remove) for (tree t = TYPE_MAIN_VARIANT (DECL_ORIGINAL_TYPE (newdecl)); t; t = TYPE_MAIN_VARIANT (t)) if (TYPE_NAME (TYPE_NEXT_VARIANT (t)) == newdecl) { TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (TYPE_NEXT_VARIANT (t)); break; } } else for (tree t = TYPE_MAIN_VARIANT (remove); ; t = TYPE_NEXT_VARIANT (t)) if (TYPE_NEXT_VARIANT (t) == remove) { TYPE_NEXT_VARIANT (t) = TYPE_NEXT_VARIANT (remove); break; } } } /* Merge the data types specified in the two decls. */ TREE_TYPE (newdecl) = TREE_TYPE (olddecl) = composite_type (newtype, oldtype); /* Lay the type out, unless already done. */ if (!comptypes (oldtype, TREE_TYPE (newdecl))) { if (TREE_TYPE (newdecl) != error_mark_node) layout_type (TREE_TYPE (newdecl)); if (TREE_CODE (newdecl) != FUNCTION_DECL && TREE_CODE (newdecl) != TYPE_DECL && TREE_CODE (newdecl) != CONST_DECL) layout_decl (newdecl, 0); } else { /* Since the type is OLDDECL's, make OLDDECL's size go with. */ DECL_SIZE (newdecl) = DECL_SIZE (olddecl); DECL_SIZE_UNIT (newdecl) = DECL_SIZE_UNIT (olddecl); SET_DECL_MODE (newdecl, DECL_MODE (olddecl)); if (DECL_ALIGN (olddecl) > DECL_ALIGN (newdecl)) { SET_DECL_ALIGN (newdecl, DECL_ALIGN (olddecl)); DECL_USER_ALIGN (newdecl) |= DECL_USER_ALIGN (olddecl); } if (DECL_WARN_IF_NOT_ALIGN (olddecl) > DECL_WARN_IF_NOT_ALIGN (newdecl)) SET_DECL_WARN_IF_NOT_ALIGN (newdecl, DECL_WARN_IF_NOT_ALIGN (olddecl)); } /* Keep the old rtl since we can safely use it. */ if (HAS_RTL_P (olddecl)) COPY_DECL_RTL (olddecl, newdecl); /* Merge the type qualifiers. */ if (TREE_READONLY (newdecl)) TREE_READONLY (olddecl) = 1; if (TREE_THIS_VOLATILE (newdecl)) TREE_THIS_VOLATILE (olddecl) = 1; /* Merge deprecatedness. */ if (TREE_DEPRECATED (newdecl)) TREE_DEPRECATED (olddecl) = 1; /* If a decl is in a system header and the other isn't, keep the one on the system header. Otherwise, keep source location of definition rather than declaration and of prototype rather than non-prototype unless that prototype is built-in. */ if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS) && DECL_IN_SYSTEM_HEADER (olddecl) && !DECL_IN_SYSTEM_HEADER (newdecl) ) DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl); else if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS) && DECL_IN_SYSTEM_HEADER (newdecl) && !DECL_IN_SYSTEM_HEADER (olddecl)) DECL_SOURCE_LOCATION (olddecl) = DECL_SOURCE_LOCATION (newdecl); else if ((DECL_INITIAL (newdecl) == NULL_TREE && DECL_INITIAL (olddecl) != NULL_TREE) || (old_is_prototype && !new_is_prototype && !C_DECL_BUILTIN_PROTOTYPE (olddecl))) DECL_SOURCE_LOCATION (newdecl) = DECL_SOURCE_LOCATION (olddecl); /* Merge the initialization information. */ if (DECL_INITIAL (newdecl) == NULL_TREE) DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl); /* Merge the threadprivate attribute. */ if (VAR_P (olddecl) && C_DECL_THREADPRIVATE_P (olddecl)) C_DECL_THREADPRIVATE_P (newdecl) = 1; if (CODE_CONTAINS_STRUCT (TREE_CODE (olddecl), TS_DECL_WITH_VIS)) { /* Copy the assembler name. Currently, it can only be defined in the prototype. */ COPY_DECL_ASSEMBLER_NAME (olddecl, newdecl); /* Use visibility of whichever declaration had it specified */ if (DECL_VISIBILITY_SPECIFIED (olddecl)) { DECL_VISIBILITY (newdecl) = DECL_VISIBILITY (olddecl); DECL_VISIBILITY_SPECIFIED (newdecl) = 1; } if (TREE_CODE (newdecl) == FUNCTION_DECL) { DECL_STATIC_CONSTRUCTOR(newdecl) |= DECL_STATIC_CONSTRUCTOR(olddecl); DECL_STATIC_DESTRUCTOR (newdecl) |= DECL_STATIC_DESTRUCTOR (olddecl); DECL_NO_LIMIT_STACK (newdecl) |= DECL_NO_LIMIT_STACK (olddecl); DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (newdecl) |= DECL_NO_INSTRUMENT_FUNCTION_ENTRY_EXIT (olddecl); TREE_THIS_VOLATILE (newdecl) |= TREE_THIS_VOLATILE (olddecl); DECL_IS_MALLOC (newdecl) |= DECL_IS_MALLOC (olddecl); if (DECL_IS_OPERATOR_NEW_P (olddecl)) DECL_SET_IS_OPERATOR_NEW (newdecl, true); if (DECL_IS_OPERATOR_DELETE_P (olddecl)) DECL_SET_IS_OPERATOR_DELETE (newdecl, true); TREE_READONLY (newdecl) |= TREE_READONLY (olddecl); DECL_PURE_P (newdecl) |= DECL_PURE_P (olddecl); DECL_IS_NOVOPS (newdecl) |= DECL_IS_NOVOPS (olddecl); } /* Merge the storage class information. */ merge_weak (newdecl, olddecl); /* For functions, static overrides non-static. */ if (TREE_CODE (newdecl) == FUNCTION_DECL) { TREE_PUBLIC (newdecl) &= TREE_PUBLIC (olddecl); /* This is since we don't automatically copy the attributes of NEWDECL into OLDDECL. */ TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl); /* If this clears `static', clear it in the identifier too. */ if (!TREE_PUBLIC (olddecl)) TREE_PUBLIC (DECL_NAME (olddecl)) = 0; } } /* In c99, 'extern' declaration before (or after) 'inline' means this function is not DECL_EXTERNAL, unless 'gnu_inline' attribute is present. */ if (TREE_CODE (newdecl) == FUNCTION_DECL && !flag_gnu89_inline && (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) && (!DECL_DECLARED_INLINE_P (newdecl) || !DECL_DECLARED_INLINE_P (olddecl) || !DECL_EXTERNAL (olddecl)) && DECL_EXTERNAL (newdecl) && !lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (newdecl)) && !current_function_decl) DECL_EXTERNAL (newdecl) = 0; /* An inline definition following a static declaration is not DECL_EXTERNAL. */ if (new_is_definition && (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) && !TREE_PUBLIC (olddecl)) DECL_EXTERNAL (newdecl) = 0; if (DECL_EXTERNAL (newdecl)) { TREE_STATIC (newdecl) = TREE_STATIC (olddecl); DECL_EXTERNAL (newdecl) = DECL_EXTERNAL (olddecl); /* An extern decl does not override previous storage class. */ TREE_PUBLIC (newdecl) = TREE_PUBLIC (olddecl); if (!DECL_EXTERNAL (newdecl)) { DECL_CONTEXT (newdecl) = DECL_CONTEXT (olddecl); DECL_COMMON (newdecl) = DECL_COMMON (olddecl); } } else { TREE_STATIC (olddecl) = TREE_STATIC (newdecl); TREE_PUBLIC (olddecl) = TREE_PUBLIC (newdecl); } if (TREE_CODE (newdecl) == FUNCTION_DECL) { /* If we're redefining a function previously defined as extern inline, make sure we emit debug info for the inline before we throw it away, in case it was inlined into a function that hasn't been written out yet. */ if (new_is_definition && DECL_INITIAL (olddecl)) /* The new defn must not be inline. */ DECL_UNINLINABLE (newdecl) = 1; else { /* If either decl says `inline', this fn is inline, unless its definition was passed already. */ if (DECL_DECLARED_INLINE_P (newdecl) || DECL_DECLARED_INLINE_P (olddecl)) DECL_DECLARED_INLINE_P (newdecl) = 1; DECL_UNINLINABLE (newdecl) = DECL_UNINLINABLE (olddecl) = (DECL_UNINLINABLE (newdecl) || DECL_UNINLINABLE (olddecl)); DECL_DISREGARD_INLINE_LIMITS (newdecl) = DECL_DISREGARD_INLINE_LIMITS (olddecl) = (DECL_DISREGARD_INLINE_LIMITS (newdecl) || DECL_DISREGARD_INLINE_LIMITS (olddecl)); } if (fndecl_built_in_p (olddecl)) { /* If redeclaring a builtin function, it stays built in. But it gets tagged as having been declared. */ copy_decl_built_in_function (newdecl, olddecl); C_DECL_DECLARED_BUILTIN (newdecl) = 1; if (new_is_prototype) { C_DECL_BUILTIN_PROTOTYPE (newdecl) = 0; if (DECL_BUILT_IN_CLASS (newdecl) == BUILT_IN_NORMAL) { enum built_in_function fncode = DECL_FUNCTION_CODE (newdecl); switch (fncode) { /* If a compatible prototype of these builtin functions is seen, assume the runtime implements it with the expected semantics. */ case BUILT_IN_STPCPY: if (builtin_decl_explicit_p (fncode)) set_builtin_decl_implicit_p (fncode, true); break; default: if (builtin_decl_explicit_p (fncode)) set_builtin_decl_declared_p (fncode, true); break; } copy_attributes_to_builtin (newdecl); } } else C_DECL_BUILTIN_PROTOTYPE (newdecl) = C_DECL_BUILTIN_PROTOTYPE (olddecl); } /* Preserve function specific target and optimization options */ if (DECL_FUNCTION_SPECIFIC_TARGET (olddecl) && !DECL_FUNCTION_SPECIFIC_TARGET (newdecl)) DECL_FUNCTION_SPECIFIC_TARGET (newdecl) = DECL_FUNCTION_SPECIFIC_TARGET (olddecl); if (DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl) && !DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl)) DECL_FUNCTION_SPECIFIC_OPTIMIZATION (newdecl) = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (olddecl); /* Also preserve various other info from the definition. */ if (!new_is_definition) { tree t; DECL_RESULT (newdecl) = DECL_RESULT (olddecl); DECL_INITIAL (newdecl) = DECL_INITIAL (olddecl); DECL_STRUCT_FUNCTION (newdecl) = DECL_STRUCT_FUNCTION (olddecl); DECL_SAVED_TREE (newdecl) = DECL_SAVED_TREE (olddecl); DECL_ARGUMENTS (newdecl) = copy_list (DECL_ARGUMENTS (olddecl)); for (t = DECL_ARGUMENTS (newdecl); t ; t = DECL_CHAIN (t)) DECL_CONTEXT (t) = newdecl; /* See if we've got a function to instantiate from. */ if (DECL_SAVED_TREE (olddecl)) DECL_ABSTRACT_ORIGIN (newdecl) = DECL_ABSTRACT_ORIGIN (olddecl); } } /* Merge the USED information. */ if (TREE_USED (olddecl)) TREE_USED (newdecl) = 1; else if (TREE_USED (newdecl)) TREE_USED (olddecl) = 1; if (VAR_P (olddecl) || TREE_CODE (olddecl) == PARM_DECL) DECL_READ_P (newdecl) |= DECL_READ_P (olddecl); if (DECL_PRESERVE_P (olddecl)) DECL_PRESERVE_P (newdecl) = 1; else if (DECL_PRESERVE_P (newdecl)) DECL_PRESERVE_P (olddecl) = 1; /* Merge DECL_COMMON */ if (VAR_P (olddecl) && VAR_P (newdecl) && !lookup_attribute ("common", DECL_ATTRIBUTES (newdecl)) && !lookup_attribute ("nocommon", DECL_ATTRIBUTES (newdecl))) DECL_COMMON (newdecl) = DECL_COMMON (newdecl) && DECL_COMMON (olddecl); /* Copy most of the decl-specific fields of NEWDECL into OLDDECL. But preserve OLDDECL's DECL_UID, DECL_CONTEXT and DECL_ARGUMENTS (if appropriate). */ { unsigned olddecl_uid = DECL_UID (olddecl); tree olddecl_context = DECL_CONTEXT (olddecl); tree olddecl_arguments = NULL; if (TREE_CODE (olddecl) == FUNCTION_DECL) olddecl_arguments = DECL_ARGUMENTS (olddecl); memcpy ((char *) olddecl + sizeof (struct tree_common), (char *) newdecl + sizeof (struct tree_common), sizeof (struct tree_decl_common) - sizeof (struct tree_common)); DECL_USER_ALIGN (olddecl) = DECL_USER_ALIGN (newdecl); switch (TREE_CODE (olddecl)) { case FUNCTION_DECL: case VAR_DECL: { struct symtab_node *snode = olddecl->decl_with_vis.symtab_node; memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common)); olddecl->decl_with_vis.symtab_node = snode; if ((DECL_EXTERNAL (olddecl) || TREE_PUBLIC (olddecl) || TREE_STATIC (olddecl)) && DECL_SECTION_NAME (newdecl) != NULL) set_decl_section_name (olddecl, DECL_SECTION_NAME (newdecl)); /* This isn't quite correct for something like int __thread x attribute ((tls_model ("local-exec"))); extern int __thread x; as we'll lose the "local-exec" model. */ if (VAR_P (olddecl) && DECL_THREAD_LOCAL_P (newdecl)) set_decl_tls_model (olddecl, DECL_TLS_MODEL (newdecl)); break; } case FIELD_DECL: case PARM_DECL: case LABEL_DECL: case RESULT_DECL: case CONST_DECL: case TYPE_DECL: memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), tree_code_size (TREE_CODE (olddecl)) - sizeof (struct tree_decl_common)); break; default: memcpy ((char *) olddecl + sizeof (struct tree_decl_common), (char *) newdecl + sizeof (struct tree_decl_common), sizeof (struct tree_decl_non_common) - sizeof (struct tree_decl_common)); } DECL_UID (olddecl) = olddecl_uid; DECL_CONTEXT (olddecl) = olddecl_context; if (TREE_CODE (olddecl) == FUNCTION_DECL) DECL_ARGUMENTS (olddecl) = olddecl_arguments; } /* If OLDDECL had its DECL_RTL instantiated, re-invoke make_decl_rtl so that encode_section_info has a chance to look at the new decl flags and attributes. */ if (DECL_RTL_SET_P (olddecl) && (TREE_CODE (olddecl) == FUNCTION_DECL || (VAR_P (olddecl) && TREE_STATIC (olddecl)))) make_decl_rtl (olddecl); } /* Handle when a new declaration NEWDECL has the same name as an old one OLDDECL in the same binding contour. Prints an error message if appropriate. If safely possible, alter OLDDECL to look like NEWDECL, and return true. Otherwise, return false. */ static bool duplicate_decls (tree newdecl, tree olddecl) { tree newtype = NULL, oldtype = NULL; if (!diagnose_mismatched_decls (newdecl, olddecl, &newtype, &oldtype)) { /* Avoid `unused variable' and other warnings for OLDDECL. */ TREE_NO_WARNING (olddecl) = 1; return false; } merge_decls (newdecl, olddecl, newtype, oldtype); /* The NEWDECL will no longer be needed. Before releasing the node, be sure to remove function from symbol table that might have been inserted there to record comdat group. Be sure to however do not free DECL_STRUCT_FUNCTION because this structure is shared in between NEWDECL and OLDECL. */ if (TREE_CODE (newdecl) == FUNCTION_DECL) DECL_STRUCT_FUNCTION (newdecl) = NULL; if (VAR_OR_FUNCTION_DECL_P (newdecl)) { struct symtab_node *snode = symtab_node::get (newdecl); if (snode) snode->remove (); } ggc_free (newdecl); return true; } /* Check whether decl-node NEW_DECL shadows an existing declaration. */ static void warn_if_shadowing (tree new_decl) { struct c_binding *b; /* Shadow warnings wanted? */ if (!(warn_shadow || warn_shadow_local || warn_shadow_compatible_local) /* No shadow warnings for internally generated vars. */ || DECL_IS_BUILTIN (new_decl)) return; /* Is anything being shadowed? Invisible decls do not count. */ for (b = I_SYMBOL_BINDING (DECL_NAME (new_decl)); b; b = b->shadowed) if (b->decl && b->decl != new_decl && !b->invisible && (b->decl == error_mark_node || diagnostic_report_warnings_p (global_dc, DECL_SOURCE_LOCATION (b->decl)))) { tree old_decl = b->decl; if (old_decl == error_mark_node) { warning (OPT_Wshadow, "declaration of %q+D shadows previous " "non-variable", new_decl); break; } bool warned = false; auto_diagnostic_group d; if (TREE_CODE (old_decl) == PARM_DECL) { enum opt_code warning_code; /* If '-Wshadow=compatible-local' is specified without other -Wshadow= flags, we will warn only when the types of the shadowing variable (i.e. new_decl) and the shadowed variable (old_decl) are compatible. */ if (warn_shadow) warning_code = OPT_Wshadow; else if (comptypes (TREE_TYPE (old_decl), TREE_TYPE (new_decl))) warning_code = OPT_Wshadow_compatible_local; else warning_code = OPT_Wshadow_local; warned = warning_at (DECL_SOURCE_LOCATION (new_decl), warning_code, "declaration of %qD shadows a parameter", new_decl); } else if (DECL_FILE_SCOPE_P (old_decl)) { /* Do not warn if a variable shadows a function, unless the variable is a function or a pointer-to-function. */ if (TREE_CODE (old_decl) == FUNCTION_DECL && TREE_CODE (new_decl) != FUNCTION_DECL && !FUNCTION_POINTER_TYPE_P (TREE_TYPE (new_decl))) continue; warned = warning_at (DECL_SOURCE_LOCATION (new_decl), OPT_Wshadow, "declaration of %qD shadows a global " "declaration", new_decl); } else if (TREE_CODE (old_decl) == FUNCTION_DECL && fndecl_built_in_p (old_decl)) { warning (OPT_Wshadow, "declaration of %q+D shadows " "a built-in function", new_decl); break; } else { enum opt_code warning_code; /* If '-Wshadow=compatible-local' is specified without other -Wshadow= flags, we will warn only when the types of the shadowing variable (i.e. new_decl) and the shadowed variable (old_decl) are compatible. */ if (warn_shadow) warning_code = OPT_Wshadow; else if (comptypes (TREE_TYPE (old_decl), TREE_TYPE (new_decl))) warning_code = OPT_Wshadow_compatible_local; else warning_code = OPT_Wshadow_local; warned = warning_at (DECL_SOURCE_LOCATION (new_decl), warning_code, "declaration of %qD shadows a previous local", new_decl); } if (warned) inform (DECL_SOURCE_LOCATION (old_decl), "shadowed declaration is here"); break; } } /* Record a decl-node X as belonging to the current lexical scope. Check for errors (such as an incompatible declaration for the same name already seen in the same scope). Returns either X or an old decl for the same name. If an old decl is returned, it may have been smashed to agree with what X says. */ tree pushdecl (tree x) { tree name = DECL_NAME (x); struct c_scope *scope = current_scope; struct c_binding *b; bool nested = false; location_t locus = DECL_SOURCE_LOCATION (x); /* Must set DECL_CONTEXT for everything not at file scope or DECL_FILE_SCOPE_P won't work. Local externs don't count unless they have initializers (which generate code). */ if (current_function_decl && (!VAR_OR_FUNCTION_DECL_P (x) || DECL_INITIAL (x) || !TREE_PUBLIC (x))) DECL_CONTEXT (x) = current_function_decl; /* Anonymous decls are just inserted in the scope. */ if (!name) { bind (name, x, scope, /*invisible=*/false, /*nested=*/false, locus); return x; } /* First, see if there is another declaration with the same name in the current scope. If there is, duplicate_decls may do all the work for us. If duplicate_decls returns false, that indicates two incompatible decls in the same scope; we are to silently replace the old one (duplicate_decls has issued all appropriate diagnostics). In particular, we should not consider possible duplicates in the external scope, or shadowing. */ b = I_SYMBOL_BINDING (name); if (b && B_IN_SCOPE (b, scope)) { struct c_binding *b_ext, *b_use; tree type = TREE_TYPE (x); tree visdecl = b->decl; tree vistype = TREE_TYPE (visdecl); if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && COMPLETE_TYPE_P (TREE_TYPE (x))) b->inner_comp = false; b_use = b; b_ext = b; /* If this is an external linkage declaration, we should check for compatibility with the type in the external scope before setting the type at this scope based on the visible information only. */ if (TREE_PUBLIC (x) && TREE_PUBLIC (visdecl)) { while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext)) b_ext = b_ext->shadowed; if (b_ext) { b_use = b_ext; if (b_use->u.type) TREE_TYPE (b_use->decl) = b_use->u.type; } } if (duplicate_decls (x, b_use->decl)) { if (b_use != b) { /* Save the updated type in the external scope and restore the proper type for this scope. */ tree thistype; if (comptypes (vistype, type)) thistype = composite_type (vistype, type); else thistype = TREE_TYPE (b_use->decl); b_use->u.type = TREE_TYPE (b_use->decl); if (TREE_CODE (b_use->decl) == FUNCTION_DECL && fndecl_built_in_p (b_use->decl)) thistype = build_type_attribute_variant (thistype, TYPE_ATTRIBUTES (b_use->u.type)); TREE_TYPE (b_use->decl) = thistype; } return b_use->decl; } else goto skip_external_and_shadow_checks; } /* All declarations with external linkage, and all external references, go in the external scope, no matter what scope is current. However, the binding in that scope is ignored for purposes of normal name lookup. A separate binding structure is created in the requested scope; this governs the normal visibility of the symbol. The binding in the externals scope is used exclusively for detecting duplicate declarations of the same object, no matter what scope they are in; this is what we do here. (C99 6.2.7p2: All declarations that refer to the same object or function shall have compatible type; otherwise, the behavior is undefined.) However, in Objective-C, we also want to detect declarations conflicting with those of the basic types. */ if ((DECL_EXTERNAL (x) || scope == file_scope) && (VAR_OR_FUNCTION_DECL_P (x) || c_dialect_objc ())) { tree type = TREE_TYPE (x); tree vistype = NULL_TREE; tree visdecl = NULL_TREE; bool type_saved = false; if (b && !B_IN_EXTERNAL_SCOPE (b) && VAR_OR_FUNCTION_DECL_P (b->decl) && DECL_FILE_SCOPE_P (b->decl)) { visdecl = b->decl; vistype = TREE_TYPE (visdecl); } if (scope != file_scope && !DECL_IN_SYSTEM_HEADER (x)) warning_at (locus, OPT_Wnested_externs, "nested extern declaration of %qD", x); while (b && !B_IN_EXTERNAL_SCOPE (b)) { /* If this decl might be modified, save its type. This is done here rather than when the decl is first bound because the type may change after first binding, through being completed or through attributes being added. If we encounter multiple such decls, only the first should have its type saved; the others will already have had their proper types saved and the types will not have changed as their scopes will not have been re-entered. */ if (DECL_P (b->decl) && DECL_FILE_SCOPE_P (b->decl) && !type_saved) { b->u.type = TREE_TYPE (b->decl); type_saved = true; } if (B_IN_FILE_SCOPE (b) && VAR_P (b->decl) && TREE_STATIC (b->decl) && TREE_CODE (TREE_TYPE (b->decl)) == ARRAY_TYPE && !TYPE_DOMAIN (TREE_TYPE (b->decl)) && TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) && !integer_zerop (TYPE_MAX_VALUE (TYPE_DOMAIN (type)))) { /* Array type completed in inner scope, which should be diagnosed if the completion does not have size 1 and it does not get completed in the file scope. */ b->inner_comp = true; } b = b->shadowed; } /* If a matching external declaration has been found, set its type to the composite of all the types of that declaration. After the consistency checks, it will be reset to the composite of the visible types only. */ if (b && (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl)) && b->u.type) TREE_TYPE (b->decl) = b->u.type; /* The point of the same_translation_unit_p check here is, we want to detect a duplicate decl for a construct like foo() { extern bar(); } ... static bar(); but not if they are in different translation units. In any case, the static does not go in the externals scope. */ if (b && (TREE_PUBLIC (x) || same_translation_unit_p (x, b->decl)) && duplicate_decls (x, b->decl)) { tree thistype; if (vistype) { if (comptypes (vistype, type)) thistype = composite_type (vistype, type); else thistype = TREE_TYPE (b->decl); } else thistype = type; b->u.type = TREE_TYPE (b->decl); if (TREE_CODE (b->decl) == FUNCTION_DECL && fndecl_built_in_p (b->decl)) thistype = build_type_attribute_variant (thistype, TYPE_ATTRIBUTES (b->u.type)); TREE_TYPE (b->decl) = thistype; bind (name, b->decl, scope, /*invisible=*/false, /*nested=*/true, locus); return b->decl; } else if (TREE_PUBLIC (x)) { if (visdecl && !b && duplicate_decls (x, visdecl)) { /* An external declaration at block scope referring to a visible entity with internal linkage. The composite type will already be correct for this scope, so we just need to fall through to make the declaration in this scope. */ nested = true; x = visdecl; } else { bind (name, x, external_scope, /*invisible=*/true, /*nested=*/false, locus); nested = true; } } } if (TREE_CODE (x) != PARM_DECL) warn_if_shadowing (x); skip_external_and_shadow_checks: if (TREE_CODE (x) == TYPE_DECL) { /* So this is a typedef, set its underlying type. */ set_underlying_type (x); /* If X is a typedef defined in the current function, record it for the purpose of implementing the -Wunused-local-typedefs warning. */ record_locally_defined_typedef (x); } bind (name, x, scope, /*invisible=*/false, nested, locus); /* If x's type is incomplete because it's based on a structure or union which has not yet been fully declared, attach it to that structure or union type, so we can go back and complete the variable declaration later, if the structure or union gets fully declared. If the input is erroneous, we can have error_mark in the type slot (e.g. "f(void a, ...)") - that doesn't count as an incomplete type. */ if (TREE_TYPE (x) != error_mark_node && !COMPLETE_TYPE_P (TREE_TYPE (x))) { tree element = TREE_TYPE (x); while (TREE_CODE (element) == ARRAY_TYPE) element = TREE_TYPE (element); element = TYPE_MAIN_VARIANT (element); if ((RECORD_OR_UNION_TYPE_P (element) || TREE_CODE (element) == ENUMERAL_TYPE) && (TREE_CODE (x) != TYPE_DECL || TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE) && !COMPLETE_TYPE_P (element)) C_TYPE_INCOMPLETE_VARS (element) = tree_cons (NULL_TREE, x, C_TYPE_INCOMPLETE_VARS (element)); } return x; } /* Issue a warning about implicit function declaration. ID is the function identifier, OLDDECL is a declaration of the function in a different scope, or NULL_TREE. */ static void implicit_decl_warning (location_t loc, tree id, tree olddecl) { if (!warn_implicit_function_declaration) return; bool warned; auto_diagnostic_group d; name_hint hint; if (!olddecl) hint = lookup_name_fuzzy (id, FUZZY_LOOKUP_FUNCTION_NAME, loc); if (flag_isoc99) { if (const char *suggestion = hint.suggestion ()) { gcc_rich_location richloc (loc); richloc.add_fixit_replace (suggestion); warned = pedwarn (&richloc, OPT_Wimplicit_function_declaration, "implicit declaration of function %qE;" " did you mean %qs?", id, suggestion); } else warned = pedwarn (loc, OPT_Wimplicit_function_declaration, "implicit declaration of function %qE", id); } else if (const char *suggestion = hint.suggestion ()) { gcc_rich_location richloc (loc); richloc.add_fixit_replace (suggestion); warned = warning_at (&richloc, OPT_Wimplicit_function_declaration, G_("implicit declaration of function %qE; did you mean %qs?"), id, suggestion); } else warned = warning_at (loc, OPT_Wimplicit_function_declaration, G_("implicit declaration of function %qE"), id); if (olddecl && warned) locate_old_decl (olddecl); if (!warned) hint.suppress (); } /* Return the name of the header file that declares built-in function FNDECL, or null if either we don't know or don't expect to see an explicit declaration. */ static const char * header_for_builtin_fn (tree fndecl) { if (DECL_BUILT_IN_CLASS (fndecl) != BUILT_IN_NORMAL) return NULL; switch (DECL_FUNCTION_CODE (fndecl)) { CASE_FLT_FN (BUILT_IN_ACOS): CASE_FLT_FN (BUILT_IN_ACOSH): CASE_FLT_FN (BUILT_IN_ASIN): CASE_FLT_FN (BUILT_IN_ASINH): CASE_FLT_FN (BUILT_IN_ATAN): CASE_FLT_FN (BUILT_IN_ATANH): CASE_FLT_FN (BUILT_IN_ATAN2): CASE_FLT_FN (BUILT_IN_CBRT): CASE_FLT_FN (BUILT_IN_CEIL): CASE_FLT_FN_FLOATN_NX (BUILT_IN_CEIL): CASE_FLT_FN (BUILT_IN_COPYSIGN): CASE_FLT_FN_FLOATN_NX (BUILT_IN_COPYSIGN): CASE_FLT_FN (BUILT_IN_COS): CASE_FLT_FN (BUILT_IN_COSH): CASE_FLT_FN (BUILT_IN_ERF): CASE_FLT_FN (BUILT_IN_ERFC): CASE_FLT_FN (BUILT_IN_EXP): CASE_FLT_FN (BUILT_IN_EXP2): CASE_FLT_FN (BUILT_IN_EXPM1): CASE_FLT_FN (BUILT_IN_FABS): CASE_FLT_FN_FLOATN_NX (BUILT_IN_FABS): CASE_FLT_FN (BUILT_IN_FDIM): CASE_FLT_FN (BUILT_IN_FLOOR): CASE_FLT_FN_FLOATN_NX (BUILT_IN_FLOOR): CASE_FLT_FN (BUILT_IN_FMA): CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMA): CASE_FLT_FN (BUILT_IN_FMAX): CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMAX): CASE_FLT_FN (BUILT_IN_FMIN): CASE_FLT_FN_FLOATN_NX (BUILT_IN_FMIN): CASE_FLT_FN (BUILT_IN_FMOD): CASE_FLT_FN (BUILT_IN_FREXP): CASE_FLT_FN (BUILT_IN_HYPOT): CASE_FLT_FN (BUILT_IN_ILOGB): CASE_FLT_FN (BUILT_IN_LDEXP): CASE_FLT_FN (BUILT_IN_LGAMMA): CASE_FLT_FN (BUILT_IN_LLRINT): CASE_FLT_FN (BUILT_IN_LLROUND): CASE_FLT_FN (BUILT_IN_LOG): CASE_FLT_FN (BUILT_IN_LOG10): CASE_FLT_FN (BUILT_IN_LOG1P): CASE_FLT_FN (BUILT_IN_LOG2): CASE_FLT_FN (BUILT_IN_LOGB): CASE_FLT_FN (BUILT_IN_LRINT): CASE_FLT_FN (BUILT_IN_LROUND): CASE_FLT_FN (BUILT_IN_MODF): CASE_FLT_FN (BUILT_IN_NAN): CASE_FLT_FN (BUILT_IN_NEARBYINT): CASE_FLT_FN_FLOATN_NX (BUILT_IN_NEARBYINT): CASE_FLT_FN (BUILT_IN_NEXTAFTER): CASE_FLT_FN (BUILT_IN_NEXTTOWARD): CASE_FLT_FN (BUILT_IN_POW): CASE_FLT_FN (BUILT_IN_REMAINDER): CASE_FLT_FN (BUILT_IN_REMQUO): CASE_FLT_FN (BUILT_IN_RINT): CASE_FLT_FN_FLOATN_NX (BUILT_IN_RINT): CASE_FLT_FN (BUILT_IN_ROUND): CASE_FLT_FN_FLOATN_NX (BUILT_IN_ROUND): CASE_FLT_FN (BUILT_IN_SCALBLN): CASE_FLT_FN (BUILT_IN_SCALBN): CASE_FLT_FN (BUILT_IN_SIN): CASE_FLT_FN (BUILT_IN_SINH): CASE_FLT_FN (BUILT_IN_SINCOS): CASE_FLT_FN (BUILT_IN_SQRT): CASE_FLT_FN_FLOATN_NX (BUILT_IN_SQRT): CASE_FLT_FN (BUILT_IN_TAN): CASE_FLT_FN (BUILT_IN_TANH): CASE_FLT_FN (BUILT_IN_TGAMMA): CASE_FLT_FN (BUILT_IN_TRUNC): CASE_FLT_FN_FLOATN_NX (BUILT_IN_TRUNC): case BUILT_IN_ISINF: case BUILT_IN_ISNAN: return "<math.h>"; CASE_FLT_FN (BUILT_IN_CABS): CASE_FLT_FN (BUILT_IN_CACOS): CASE_FLT_FN (BUILT_IN_CACOSH): CASE_FLT_FN (BUILT_IN_CARG): CASE_FLT_FN (BUILT_IN_CASIN): CASE_FLT_FN (BUILT_IN_CASINH): CASE_FLT_FN (BUILT_IN_CATAN): CASE_FLT_FN (BUILT_IN_CATANH): CASE_FLT_FN (BUILT_IN_CCOS): CASE_FLT_FN (BUILT_IN_CCOSH): CASE_FLT_FN (BUILT_IN_CEXP): CASE_FLT_FN (BUILT_IN_CIMAG): CASE_FLT_FN (BUILT_IN_CLOG): CASE_FLT_FN (BUILT_IN_CONJ): CASE_FLT_FN (BUILT_IN_CPOW): CASE_FLT_FN (BUILT_IN_CPROJ): CASE_FLT_FN (BUILT_IN_CREAL): CASE_FLT_FN (BUILT_IN_CSIN): CASE_FLT_FN (BUILT_IN_CSINH): CASE_FLT_FN (BUILT_IN_CSQRT): CASE_FLT_FN (BUILT_IN_CTAN): CASE_FLT_FN (BUILT_IN_CTANH): return "<complex.h>"; case BUILT_IN_MEMCHR: case BUILT_IN_MEMCMP: case BUILT_IN_MEMCPY: case BUILT_IN_MEMMOVE: case BUILT_IN_MEMSET: case BUILT_IN_STRCAT: case BUILT_IN_STRCHR: case BUILT_IN_STRCMP: case BUILT_IN_STRCPY: case BUILT_IN_STRCSPN: case BUILT_IN_STRLEN: case BUILT_IN_STRNCAT: case BUILT_IN_STRNCMP: case BUILT_IN_STRNCPY: case BUILT_IN_STRPBRK: case BUILT_IN_STRRCHR: case BUILT_IN_STRSPN: case BUILT_IN_STRSTR: return "<string.h>"; case BUILT_IN_FPRINTF: case BUILT_IN_PUTC: case BUILT_IN_FPUTC: case BUILT_IN_FPUTS: case BUILT_IN_FSCANF: case BUILT_IN_FWRITE: case BUILT_IN_PRINTF: case BUILT_IN_PUTCHAR: case BUILT_IN_PUTS: case BUILT_IN_SCANF: case BUILT_IN_SNPRINTF: case BUILT_IN_SPRINTF: case BUILT_IN_SSCANF: case BUILT_IN_VFPRINTF: case BUILT_IN_VFSCANF: case BUILT_IN_VPRINTF: case BUILT_IN_VSCANF: case BUILT_IN_VSNPRINTF: case BUILT_IN_VSPRINTF: case BUILT_IN_VSSCANF: return "<stdio.h>"; case BUILT_IN_ISALNUM: case BUILT_IN_ISALPHA: case BUILT_IN_ISBLANK: case BUILT_IN_ISCNTRL: case BUILT_IN_ISDIGIT: case BUILT_IN_ISGRAPH: case BUILT_IN_ISLOWER: case BUILT_IN_ISPRINT: case BUILT_IN_ISPUNCT: case BUILT_IN_ISSPACE: case BUILT_IN_ISUPPER: case BUILT_IN_ISXDIGIT: case BUILT_IN_TOLOWER: case BUILT_IN_TOUPPER: return "<ctype.h>"; case BUILT_IN_ISWALNUM: case BUILT_IN_ISWALPHA: case BUILT_IN_ISWBLANK: case BUILT_IN_ISWCNTRL: case BUILT_IN_ISWDIGIT: case BUILT_IN_ISWGRAPH: case BUILT_IN_ISWLOWER: case BUILT_IN_ISWPRINT: case BUILT_IN_ISWPUNCT: case BUILT_IN_ISWSPACE: case BUILT_IN_ISWUPPER: case BUILT_IN_ISWXDIGIT: case BUILT_IN_TOWLOWER: case BUILT_IN_TOWUPPER: return "<wctype.h>"; case BUILT_IN_ABORT: case BUILT_IN_ABS: case BUILT_IN_CALLOC: case BUILT_IN_EXIT: case BUILT_IN_FREE: case BUILT_IN_LABS: case BUILT_IN_LLABS: case BUILT_IN_MALLOC: case BUILT_IN_REALLOC: case BUILT_IN__EXIT2: case BUILT_IN_ALIGNED_ALLOC: return "<stdlib.h>"; case BUILT_IN_IMAXABS: return "<inttypes.h>"; case BUILT_IN_STRFTIME: return "<time.h>"; default: return NULL; } } /* Generate an implicit declaration for identifier FUNCTIONID at LOC as a function of type int (). */ tree implicitly_declare (location_t loc, tree functionid) { struct c_binding *b; tree decl = NULL_TREE; tree asmspec_tree; for (b = I_SYMBOL_BINDING (functionid); b; b = b->shadowed) { if (B_IN_SCOPE (b, external_scope)) { decl = b->decl; break; } } if (decl) { if (TREE_CODE (decl) != FUNCTION_DECL) return decl; /* FIXME: Objective-C has weird not-really-builtin functions which are supposed to be visible automatically. They wind up in the external scope because they're pushed before the file scope gets created. Catch this here and rebind them into the file scope. */ if (!fndecl_built_in_p (decl) && DECL_IS_BUILTIN (decl)) { bind (functionid, decl, file_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); return decl; } else { tree newtype = default_function_type; if (b->u.type) TREE_TYPE (decl) = b->u.type; /* Implicit declaration of a function already declared (somehow) in a different scope, or as a built-in. If this is the first time this has happened, warn; then recycle the old declaration but with the new type. */ if (!C_DECL_IMPLICIT (decl)) { implicit_decl_warning (loc, functionid, decl); C_DECL_IMPLICIT (decl) = 1; } if (fndecl_built_in_p (decl)) { newtype = build_type_attribute_variant (newtype, TYPE_ATTRIBUTES (TREE_TYPE (decl))); if (!comptypes (newtype, TREE_TYPE (decl))) { bool warned = warning_at (loc, 0, "incompatible implicit " "declaration of built-in " "function %qD", decl); /* See if we can hint which header to include. */ const char *header = header_for_builtin_fn (decl); if (header != NULL && warned) { rich_location richloc (line_table, loc); maybe_add_include_fixit (&richloc, header, true); inform (&richloc, "include %qs or provide a declaration of %qD", header, decl); } newtype = TREE_TYPE (decl); } } else { if (!comptypes (newtype, TREE_TYPE (decl))) { auto_diagnostic_group d; error_at (loc, "incompatible implicit declaration of " "function %qD", decl); locate_old_decl (decl); } } b->u.type = TREE_TYPE (decl); TREE_TYPE (decl) = newtype; bind (functionid, decl, current_scope, /*invisible=*/false, /*nested=*/true, DECL_SOURCE_LOCATION (decl)); return decl; } } /* Not seen before. */ decl = build_decl (loc, FUNCTION_DECL, functionid, default_function_type); DECL_EXTERNAL (decl) = 1; TREE_PUBLIC (decl) = 1; C_DECL_IMPLICIT (decl) = 1; implicit_decl_warning (loc, functionid, 0); asmspec_tree = maybe_apply_renaming_pragma (decl, /*asmname=*/NULL); if (asmspec_tree) set_user_assembler_name (decl, TREE_STRING_POINTER (asmspec_tree)); /* C89 says implicit declarations are in the innermost block. So we record the decl in the standard fashion. */ decl = pushdecl (decl); /* No need to call objc_check_decl here - it's a function type. */ rest_of_decl_compilation (decl, 0, 0); /* Write a record describing this implicit function declaration to the prototypes file (if requested). */ gen_aux_info_record (decl, 0, 1, 0); /* Possibly apply some default attributes to this implicit declaration. */ decl_attributes (&decl, NULL_TREE, 0); return decl; } /* Issue an error message for a reference to an undeclared variable ID, including a reference to a builtin outside of function-call context. Establish a binding of the identifier to error_mark_node in an appropriate scope, which will suppress further errors for the same identifier. The error message should be given location LOC. */ void undeclared_variable (location_t loc, tree id) { static bool already = false; struct c_scope *scope; auto_diagnostic_group d; if (current_function_decl == NULL_TREE) { name_hint guessed_id = lookup_name_fuzzy (id, FUZZY_LOOKUP_NAME, loc); if (const char *suggestion = guessed_id.suggestion ()) { gcc_rich_location richloc (loc); richloc.add_fixit_replace (suggestion); error_at (&richloc, "%qE undeclared here (not in a function);" " did you mean %qs?", id, suggestion); } else error_at (loc, "%qE undeclared here (not in a function)", id); scope = current_scope; } else { if (!objc_diagnose_private_ivar (id)) { name_hint guessed_id = lookup_name_fuzzy (id, FUZZY_LOOKUP_NAME, loc); if (const char *suggestion = guessed_id.suggestion ()) { gcc_rich_location richloc (loc); richloc.add_fixit_replace (suggestion); error_at (&richloc, "%qE undeclared (first use in this function);" " did you mean %qs?", id, suggestion); } else error_at (loc, "%qE undeclared (first use in this function)", id); } if (!already) { inform (loc, "each undeclared identifier is reported only" " once for each function it appears in"); already = true; } /* If we are parsing old-style parameter decls, current_function_decl will be nonnull but current_function_scope will be null. */ scope = current_function_scope ? current_function_scope : current_scope; } bind (id, error_mark_node, scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } /* Subroutine of lookup_label, declare_label, define_label: construct a LABEL_DECL with all the proper frills. Also create a struct c_label_vars initialized for the current scope. */ static tree make_label (location_t location, tree name, bool defining, struct c_label_vars **p_label_vars) { tree label = build_decl (location, LABEL_DECL, name, void_type_node); DECL_CONTEXT (label) = current_function_decl; SET_DECL_MODE (label, VOIDmode); c_label_vars *label_vars = ggc_alloc<c_label_vars> (); label_vars->shadowed = NULL; set_spot_bindings (&label_vars->label_bindings, defining); label_vars->decls_in_scope = make_tree_vector (); label_vars->gotos = NULL; *p_label_vars = label_vars; return label; } /* Get the LABEL_DECL corresponding to identifier NAME as a label. Create one if none exists so far for the current function. This is called when a label is used in a goto expression or has its address taken. */ tree lookup_label (tree name) { tree label; struct c_label_vars *label_vars; if (current_function_scope == 0) { error ("label %qE referenced outside of any function", name); return NULL_TREE; } /* Use a label already defined or ref'd with this name, but not if it is inherited from a containing function and wasn't declared using __label__. */ label = I_LABEL_DECL (name); if (label && (DECL_CONTEXT (label) == current_function_decl || C_DECLARED_LABEL_FLAG (label))) { /* If the label has only been declared, update its apparent location to point here, for better diagnostics if it turns out not to have been defined. */ if (DECL_INITIAL (label) == NULL_TREE) DECL_SOURCE_LOCATION (label) = input_location; return label; } /* No label binding for that identifier; make one. */ label = make_label (input_location, name, false, &label_vars); /* Ordinary labels go in the current function scope. */ bind_label (name, label, current_function_scope, label_vars); return label; } /* Issue a warning about DECL for a goto statement at GOTO_LOC going to LABEL. */ static void warn_about_goto (location_t goto_loc, tree label, tree decl) { if (variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) error_at (goto_loc, "jump into scope of identifier with variably modified type"); else warning_at (goto_loc, OPT_Wjump_misses_init, "jump skips variable initialization"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); inform (DECL_SOURCE_LOCATION (decl), "%qD declared here", decl); } /* Look up a label because of a goto statement. This is like lookup_label, but also issues any appropriate warnings. */ tree lookup_label_for_goto (location_t loc, tree name) { tree label; struct c_label_vars *label_vars; unsigned int ix; tree decl; label = lookup_label (name); if (label == NULL_TREE) return NULL_TREE; /* If we are jumping to a different function, we can't issue any useful warnings. */ if (DECL_CONTEXT (label) != current_function_decl) { gcc_assert (C_DECLARED_LABEL_FLAG (label)); return label; } label_vars = I_LABEL_BINDING (name)->u.label; /* If the label has not yet been defined, then push this goto on a list for possible later warnings. */ if (label_vars->label_bindings.scope == NULL) { c_goto_bindings *g = ggc_alloc<c_goto_bindings> (); g->loc = loc; set_spot_bindings (&g->goto_bindings, true); vec_safe_push (label_vars->gotos, g); return label; } /* If there are any decls in label_vars->decls_in_scope, then this goto has missed the declaration of the decl. This happens for a case like int i = 1; lab: ... goto lab; Issue a warning or error. */ FOR_EACH_VEC_SAFE_ELT (label_vars->decls_in_scope, ix, decl) warn_about_goto (loc, label, decl); if (label_vars->label_bindings.left_stmt_expr) { error_at (loc, "jump into statement expression"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); } return label; } /* Make a label named NAME in the current function, shadowing silently any that may be inherited from containing functions or containing scopes. This is called for __label__ declarations. */ tree declare_label (tree name) { struct c_binding *b = I_LABEL_BINDING (name); tree label; struct c_label_vars *label_vars; /* Check to make sure that the label hasn't already been declared at this scope */ if (b && B_IN_CURRENT_SCOPE (b)) { auto_diagnostic_group d; error ("duplicate label declaration %qE", name); locate_old_decl (b->decl); /* Just use the previous declaration. */ return b->decl; } label = make_label (input_location, name, false, &label_vars); C_DECLARED_LABEL_FLAG (label) = 1; /* Declared labels go in the current scope. */ bind_label (name, label, current_scope, label_vars); return label; } /* When we define a label, issue any appropriate warnings if there are any gotos earlier in the function which jump to this label. */ static void check_earlier_gotos (tree label, struct c_label_vars* label_vars) { unsigned int ix; struct c_goto_bindings *g; FOR_EACH_VEC_SAFE_ELT (label_vars->gotos, ix, g) { struct c_binding *b; struct c_scope *scope; /* We have a goto to this label. The goto is going forward. In g->scope, the goto is going to skip any binding which was defined after g->bindings_in_scope. */ if (g->goto_bindings.scope->has_jump_unsafe_decl) { for (b = g->goto_bindings.scope->bindings; b != g->goto_bindings.bindings_in_scope; b = b->prev) { if (decl_jump_unsafe (b->decl)) warn_about_goto (g->loc, label, b->decl); } } /* We also need to warn about decls defined in any scopes between the scope of the label and the scope of the goto. */ for (scope = label_vars->label_bindings.scope; scope != g->goto_bindings.scope; scope = scope->outer) { gcc_assert (scope != NULL); if (scope->has_jump_unsafe_decl) { if (scope == label_vars->label_bindings.scope) b = label_vars->label_bindings.bindings_in_scope; else b = scope->bindings; for (; b != NULL; b = b->prev) { if (decl_jump_unsafe (b->decl)) warn_about_goto (g->loc, label, b->decl); } } } if (g->goto_bindings.stmt_exprs > 0) { error_at (g->loc, "jump into statement expression"); inform (DECL_SOURCE_LOCATION (label), "label %qD defined here", label); } } /* Now that the label is defined, we will issue warnings about subsequent gotos to this label when we see them. */ vec_safe_truncate (label_vars->gotos, 0); label_vars->gotos = NULL; } /* Define a label, specifying the location in the source file. Return the LABEL_DECL node for the label, if the definition is valid. Otherwise return NULL_TREE. */ tree define_label (location_t location, tree name) { /* Find any preexisting label with this name. It is an error if that label has already been defined in this function, or if there is a containing function with a declared label with the same name. */ tree label = I_LABEL_DECL (name); if (label && ((DECL_CONTEXT (label) == current_function_decl && DECL_INITIAL (label) != NULL_TREE) || (DECL_CONTEXT (label) != current_function_decl && C_DECLARED_LABEL_FLAG (label)))) { auto_diagnostic_group d; error_at (location, "duplicate label %qD", label); locate_old_decl (label); return NULL_TREE; } else if (label && DECL_CONTEXT (label) == current_function_decl) { struct c_label_vars *label_vars = I_LABEL_BINDING (name)->u.label; /* The label has been used or declared already in this function, but not defined. Update its location to point to this definition. */ DECL_SOURCE_LOCATION (label) = location; set_spot_bindings (&label_vars->label_bindings, true); /* Issue warnings as required about any goto statements from earlier in the function. */ check_earlier_gotos (label, label_vars); } else { struct c_label_vars *label_vars; /* No label binding for that identifier; make one. */ label = make_label (location, name, true, &label_vars); /* Ordinary labels go in the current function scope. */ bind_label (name, label, current_function_scope, label_vars); } if (!in_system_header_at (input_location) && lookup_name (name)) warning_at (location, OPT_Wtraditional, "traditional C lacks a separate namespace " "for labels, identifier %qE conflicts", name); /* Mark label as having been defined. */ DECL_INITIAL (label) = error_mark_node; return label; } /* Get the bindings for a new switch statement. This is used to issue warnings as appropriate for jumps from the switch to case or default labels. */ struct c_spot_bindings * c_get_switch_bindings (void) { struct c_spot_bindings *switch_bindings; switch_bindings = XNEW (struct c_spot_bindings); set_spot_bindings (switch_bindings, true); return switch_bindings; } void c_release_switch_bindings (struct c_spot_bindings *bindings) { gcc_assert (bindings->stmt_exprs == 0 && !bindings->left_stmt_expr); XDELETE (bindings); } /* This is called at the point of a case or default label to issue warnings about decls as needed. It returns true if it found an error, not just a warning. */ bool c_check_switch_jump_warnings (struct c_spot_bindings *switch_bindings, location_t switch_loc, location_t case_loc) { bool saw_error; struct c_scope *scope; saw_error = false; for (scope = current_scope; scope != switch_bindings->scope; scope = scope->outer) { struct c_binding *b; gcc_assert (scope != NULL); if (!scope->has_jump_unsafe_decl) continue; for (b = scope->bindings; b != NULL; b = b->prev) { if (decl_jump_unsafe (b->decl)) { if (variably_modified_type_p (TREE_TYPE (b->decl), NULL_TREE)) { saw_error = true; error_at (case_loc, ("switch jumps into scope of identifier with " "variably modified type")); } else warning_at (case_loc, OPT_Wjump_misses_init, "switch jumps over variable initialization"); inform (switch_loc, "switch starts here"); inform (DECL_SOURCE_LOCATION (b->decl), "%qD declared here", b->decl); } } } if (switch_bindings->stmt_exprs > 0) { saw_error = true; error_at (case_loc, "switch jumps into statement expression"); inform (switch_loc, "switch starts here"); } return saw_error; } /* Given NAME, an IDENTIFIER_NODE, return the structure (or union or enum) definition for that name. If THISLEVEL_ONLY is nonzero, searches only the current_scope. CODE says which kind of type the caller wants; it is RECORD_TYPE or UNION_TYPE or ENUMERAL_TYPE. If PLOC is not NULL and this returns non-null, it sets *PLOC to the location where the tag was defined. If the wrong kind of type is found, an error is reported. */ static tree lookup_tag (enum tree_code code, tree name, bool thislevel_only, location_t *ploc) { struct c_binding *b = I_TAG_BINDING (name); bool thislevel = false; if (!b || !b->decl) return NULL_TREE; /* We only care about whether it's in this level if thislevel_only was set or it might be a type clash. */ if (thislevel_only || TREE_CODE (b->decl) != code) { /* For our purposes, a tag in the external scope is the same as a tag in the file scope. (Primarily relevant to Objective-C and its builtin structure tags, which get pushed before the file scope is created.) */ if (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) thislevel = true; } if (thislevel_only && !thislevel) return NULL_TREE; if (TREE_CODE (b->decl) != code) { /* Definition isn't the kind we were looking for. */ pending_invalid_xref = name; pending_invalid_xref_location = input_location; /* If in the same binding level as a declaration as a tag of a different type, this must not be allowed to shadow that tag, so give the error immediately. (For example, "struct foo; union foo;" is invalid.) */ if (thislevel) pending_xref_error (); } if (ploc != NULL) *ploc = b->locus; return b->decl; } /* Return true if a definition exists for NAME with code CODE. */ bool tag_exists_p (enum tree_code code, tree name) { struct c_binding *b = I_TAG_BINDING (name); if (b == NULL || b->decl == NULL_TREE) return false; return TREE_CODE (b->decl) == code; } /* Print an error message now for a recent invalid struct, union or enum cross reference. We don't print them immediately because they are not invalid when used in the `struct foo;' construct for shadowing. */ void pending_xref_error (void) { if (pending_invalid_xref != NULL_TREE) error_at (pending_invalid_xref_location, "%qE defined as wrong kind of tag", pending_invalid_xref); pending_invalid_xref = NULL_TREE; } /* Look up NAME in the current scope and its superiors in the namespace of variables, functions and typedefs. Return a ..._DECL node of some kind representing its definition, or return NULL_TREE if it is undefined. */ tree lookup_name (tree name) { struct c_binding *b = I_SYMBOL_BINDING (name); if (b && !b->invisible) { maybe_record_typedef_use (b->decl); return b->decl; } return NULL_TREE; } /* Similar to `lookup_name' but look only at the indicated scope. */ static tree lookup_name_in_scope (tree name, struct c_scope *scope) { struct c_binding *b; for (b = I_SYMBOL_BINDING (name); b; b = b->shadowed) if (B_IN_SCOPE (b, scope)) return b->decl; return NULL_TREE; } /* Look for the closest match for NAME within the currently valid scopes. This finds the identifier with the lowest Levenshtein distance to NAME. If there are multiple candidates with equal minimal distance, the first one found is returned. Scopes are searched from innermost outwards, and within a scope in reverse order of declaration, thus benefiting candidates "near" to the current scope. The function also looks for similar macro names to NAME, since a misspelled macro name will not be expanded, and hence looks like an identifier to the C frontend. It also looks for start_typename keywords, to detect "singed" vs "signed" typos. Use LOC for any deferred diagnostics. */ name_hint lookup_name_fuzzy (tree name, enum lookup_name_fuzzy_kind kind, location_t loc) { gcc_assert (TREE_CODE (name) == IDENTIFIER_NODE); /* First, try some well-known names in the C standard library, in case the user forgot a #include. */ const char *header_hint = get_c_stdlib_header_for_name (IDENTIFIER_POINTER (name)); if (header_hint) return name_hint (NULL, new suggest_missing_header (loc, IDENTIFIER_POINTER (name), header_hint)); /* Only suggest names reserved for the implementation if NAME begins with an underscore. */ bool consider_implementation_names = (IDENTIFIER_POINTER (name)[0] == '_'); best_match<tree, tree> bm (name); /* Look within currently valid scopes. */ for (c_scope *scope = current_scope; scope; scope = scope->outer) for (c_binding *binding = scope->bindings; binding; binding = binding->prev) { if (!binding->id || binding->invisible) continue; if (binding->decl == error_mark_node) continue; /* Don't use bindings from implicitly declared functions, as they were likely misspellings themselves. */ if (TREE_CODE (binding->decl) == FUNCTION_DECL) if (C_DECL_IMPLICIT (binding->decl)) continue; /* Don't suggest names that are reserved for use by the implementation, unless NAME began with an underscore. */ if (!consider_implementation_names) { const char *suggestion_str = IDENTIFIER_POINTER (binding->id); if (name_reserved_for_implementation_p (suggestion_str)) continue; } switch (kind) { case FUZZY_LOOKUP_TYPENAME: if (TREE_CODE (binding->decl) != TYPE_DECL) continue; break; case FUZZY_LOOKUP_FUNCTION_NAME: if (TREE_CODE (binding->decl) != FUNCTION_DECL) { /* Allow function pointers. */ if ((VAR_P (binding->decl) || TREE_CODE (binding->decl) == PARM_DECL) && TREE_CODE (TREE_TYPE (binding->decl)) == POINTER_TYPE && (TREE_CODE (TREE_TYPE (TREE_TYPE (binding->decl))) == FUNCTION_TYPE)) break; continue; } break; default: break; } bm.consider (binding->id); } /* Consider macros: if the user misspelled a macro name e.g. "SOME_MACRO" as: x = SOME_OTHER_MACRO (y); then "SOME_OTHER_MACRO" will survive to the frontend and show up as a misspelled identifier. Use the best distance so far so that a candidate is only set if a macro is better than anything so far. This allows early rejection (without calculating the edit distance) of macro names that must have distance >= bm.get_best_distance (), and means that we only get a non-NULL result for best_macro_match if it's better than any of the identifiers already checked, which avoids needless creation of identifiers for macro hashnodes. */ best_macro_match bmm (name, bm.get_best_distance (), parse_in); cpp_hashnode *best_macro = bmm.get_best_meaningful_candidate (); /* If a macro is the closest so far to NAME, use it, creating an identifier tree node for it. */ if (best_macro) { const char *id = (const char *)best_macro->ident.str; tree macro_as_identifier = get_identifier_with_length (id, best_macro->ident.len); bm.set_best_so_far (macro_as_identifier, bmm.get_best_distance (), bmm.get_best_candidate_length ()); } /* Try the "start_typename" keywords to detect "singed" vs "signed" typos. */ if (kind == FUZZY_LOOKUP_TYPENAME) { for (unsigned i = 0; i < num_c_common_reswords; i++) { const c_common_resword *resword = &c_common_reswords[i]; if (!c_keyword_starts_typename (resword->rid)) continue; tree resword_identifier = ridpointers [resword->rid]; if (!resword_identifier) continue; gcc_assert (TREE_CODE (resword_identifier) == IDENTIFIER_NODE); bm.consider (resword_identifier); } } tree best = bm.get_best_meaningful_candidate (); if (best) return name_hint (IDENTIFIER_POINTER (best), NULL); else return name_hint (NULL, NULL); } /* Table of supported standard (C2x) attributes. */ const struct attribute_spec std_attribute_table[] = { /* { name, min_len, max_len, decl_req, type_req, fn_type_req, affects_type_identity, handler, exclude } */ { "deprecated", 0, 1, false, false, false, false, handle_deprecated_attribute, NULL }, { "fallthrough", 0, 0, false, false, false, false, handle_fallthrough_attribute, NULL }, { "maybe_unused", 0, 0, false, false, false, false, handle_unused_attribute, NULL }, { NULL, 0, 0, false, false, false, false, NULL, NULL } }; /* Create the predefined scalar types of C, and some nodes representing standard constants (0, 1, (void *) 0). Initialize the global scope. Make definitions for built-in primitive functions. */ void c_init_decl_processing (void) { location_t save_loc = input_location; /* Initialize reserved words for parser. */ c_parse_init (); register_scoped_attributes (std_attribute_table, NULL); current_function_decl = NULL_TREE; gcc_obstack_init (&parser_obstack); /* Make the externals scope. */ push_scope (); external_scope = current_scope; /* Declarations from c_common_nodes_and_builtins must not be associated with this input file, lest we get differences between using and not using preprocessed headers. */ input_location = BUILTINS_LOCATION; c_common_nodes_and_builtins (); /* In C, comparisons and TRUTH_* expressions have type int. */ truthvalue_type_node = integer_type_node; truthvalue_true_node = integer_one_node; truthvalue_false_node = integer_zero_node; /* Even in C99, which has a real boolean type. */ pushdecl (build_decl (UNKNOWN_LOCATION, TYPE_DECL, get_identifier ("_Bool"), boolean_type_node)); input_location = save_loc; make_fname_decl = c_make_fname_decl; start_fname_decls (); } /* Create the VAR_DECL at LOC for __FUNCTION__ etc. ID is the name to give the decl, NAME is the initialization string and TYPE_DEP indicates whether NAME depended on the type of the function. As we don't yet implement delayed emission of static data, we mark the decl as emitted so it is not placed in the output. Anything using it must therefore pull out the STRING_CST initializer directly. FIXME. */ static tree c_make_fname_decl (location_t loc, tree id, int type_dep) { const char *name = fname_as_string (type_dep); tree decl, type, init; size_t length = strlen (name); type = build_array_type (char_type_node, build_index_type (size_int (length))); type = c_build_qualified_type (type, TYPE_QUAL_CONST); decl = build_decl (loc, VAR_DECL, id, type); TREE_STATIC (decl) = 1; TREE_READONLY (decl) = 1; DECL_ARTIFICIAL (decl) = 1; init = build_string (length + 1, name); free (CONST_CAST (char *, name)); TREE_TYPE (init) = type; DECL_INITIAL (decl) = init; TREE_USED (decl) = 1; if (current_function_decl /* For invalid programs like this: void foo() const char* p = __FUNCTION__; the __FUNCTION__ is believed to appear in K&R style function parameter declarator. In that case we still don't have function_scope. */ && current_function_scope) { DECL_CONTEXT (decl) = current_function_decl; bind (id, decl, current_function_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } finish_decl (decl, loc, init, NULL_TREE, NULL_TREE); return decl; } tree c_builtin_function (tree decl) { tree type = TREE_TYPE (decl); tree id = DECL_NAME (decl); const char *name = IDENTIFIER_POINTER (id); C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type); /* Should never be called on a symbol with a preexisting meaning. */ gcc_assert (!I_SYMBOL_BINDING (id)); bind (id, decl, external_scope, /*invisible=*/true, /*nested=*/false, UNKNOWN_LOCATION); /* Builtins in the implementation namespace are made visible without needing to be explicitly declared. See push_file_scope. */ if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1]))) { DECL_CHAIN (decl) = visible_builtins; visible_builtins = decl; } return decl; } tree c_builtin_function_ext_scope (tree decl) { tree type = TREE_TYPE (decl); tree id = DECL_NAME (decl); const char *name = IDENTIFIER_POINTER (id); C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type); if (external_scope) bind (id, decl, external_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); /* Builtins in the implementation namespace are made visible without needing to be explicitly declared. See push_file_scope. */ if (name[0] == '_' && (name[1] == '_' || ISUPPER (name[1]))) { DECL_CHAIN (decl) = visible_builtins; visible_builtins = decl; } return decl; } /* Implement LANG_HOOKS_SIMULATE_BUILTIN_FUNCTION_DECL. */ tree c_simulate_builtin_function_decl (tree decl) { tree type = TREE_TYPE (decl); C_DECL_BUILTIN_PROTOTYPE (decl) = prototype_p (type); return pushdecl (decl); } /* Warn about attributes in a context where they are unused (attribute-declarations, except for the "fallthrough" case, and attributes on statements). */ void c_warn_unused_attributes (tree attrs) { for (tree t = attrs; t != NULL_TREE; t = TREE_CHAIN (t)) if (get_attribute_namespace (t) == NULL_TREE) /* The specifications of standard attributes mean this is a constraint violation. */ pedwarn (input_location, OPT_Wattributes, "%qE attribute ignored", get_attribute_name (t)); else warning (OPT_Wattributes, "%qE attribute ignored", get_attribute_name (t)); } /* Warn for standard attributes being applied to a type that is not being defined, where that is a constraint violation, and return a list of attributes with them removed. */ tree c_warn_type_attributes (tree attrs) { tree *attr_ptr = &attrs; while (*attr_ptr) if (get_attribute_namespace (*attr_ptr) == NULL_TREE) { pedwarn (input_location, OPT_Wattributes, "%qE attribute ignored", get_attribute_name (*attr_ptr)); *attr_ptr = TREE_CHAIN (*attr_ptr); } else attr_ptr = &TREE_CHAIN (*attr_ptr); return attrs; } /* Called when a declaration is seen that contains no names to declare. If its type is a reference to a structure, union or enum inherited from a containing scope, shadow that tag name for the current scope with a forward reference. If its type defines a new named structure or union or defines an enum, it is valid but we need not do anything here. Otherwise, it is an error. */ void shadow_tag (const struct c_declspecs *declspecs) { shadow_tag_warned (declspecs, 0); } /* WARNED is 1 if we have done a pedwarn, 2 if we have done a warning, but no pedwarn. */ void shadow_tag_warned (const struct c_declspecs *declspecs, int warned) { bool found_tag = false; if (declspecs->type && !declspecs->default_int_p && !declspecs->typedef_p) { tree value = declspecs->type; enum tree_code code = TREE_CODE (value); if (code == RECORD_TYPE || code == UNION_TYPE || code == ENUMERAL_TYPE) /* Used to test also that TYPE_SIZE (value) != 0. That caused warning for `struct foo;' at top level in the file. */ { tree name = TYPE_NAME (value); tree t; found_tag = true; if (declspecs->restrict_p) { error ("invalid use of %<restrict%>"); warned = 1; } if (name == NULL_TREE) { if (warned != 1 && code != ENUMERAL_TYPE) /* Empty unnamed enum OK */ { pedwarn (input_location, 0, "unnamed struct/union that defines no instances"); warned = 1; } } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && declspecs->typespec_kind != ctsk_tagfirstref_attrs && declspecs->storage_class != csc_none) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with storage class specifier " "does not redeclare tag"); warned = 1; pending_xref_error (); } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && declspecs->typespec_kind != ctsk_tagfirstref_attrs && (declspecs->const_p || declspecs->volatile_p || declspecs->atomic_p || declspecs->restrict_p || declspecs->address_space)) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with type qualifier " "does not redeclare tag"); warned = 1; pending_xref_error (); } else if (declspecs->typespec_kind != ctsk_tagdef && declspecs->typespec_kind != ctsk_tagfirstref && declspecs->typespec_kind != ctsk_tagfirstref_attrs && declspecs->alignas_p) { if (warned != 1) pedwarn (input_location, 0, "empty declaration with %<_Alignas%> " "does not redeclare tag"); warned = 1; pending_xref_error (); } else { pending_invalid_xref = NULL_TREE; t = lookup_tag (code, name, true, NULL); if (t == NULL_TREE) { t = make_node (code); pushtag (input_location, name, t); } } } else { if (warned != 1 && !in_system_header_at (input_location)) { pedwarn (input_location, 0, "useless type name in empty declaration"); warned = 1; } } } else if (warned != 1 && !in_system_header_at (input_location) && declspecs->typedef_p) { pedwarn (input_location, 0, "useless type name in empty declaration"); warned = 1; } pending_invalid_xref = NULL_TREE; if (declspecs->inline_p) { error ("%<inline%> in empty declaration"); warned = 1; } if (declspecs->noreturn_p) { error ("%<_Noreturn%> in empty declaration"); warned = 1; } if (current_scope == file_scope && declspecs->storage_class == csc_auto) { error ("%<auto%> in file-scope empty declaration"); warned = 1; } if (current_scope == file_scope && declspecs->storage_class == csc_register) { error ("%<register%> in file-scope empty declaration"); warned = 1; } if (!warned && !in_system_header_at (input_location) && declspecs->storage_class != csc_none) { warning (0, "useless storage class specifier in empty declaration"); warned = 2; } if (!warned && !in_system_header_at (input_location) && declspecs->thread_p) { warning (0, "useless %qs in empty declaration", declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); warned = 2; } if (!warned && !in_system_header_at (input_location) && (declspecs->const_p || declspecs->volatile_p || declspecs->atomic_p || declspecs->restrict_p || declspecs->address_space)) { warning (0, "useless type qualifier in empty declaration"); warned = 2; } if (!warned && !in_system_header_at (input_location) && declspecs->alignas_p) { warning (0, "useless %<_Alignas%> in empty declaration"); warned = 2; } if (found_tag && warned == 2 && (declspecs->typespec_kind == ctsk_tagref_attrs || declspecs->typespec_kind == ctsk_tagfirstref_attrs)) { /* Standard attributes after the "struct" or "union" keyword are only permitted when the contents of the type are defined, or in the form "struct-or-union attribute-specifier-sequence identifier;". If the ';' was not present, attributes were diagnosed in the parser. Here, ensure that any other useless elements of the declaration result in a pedwarn, not just a warning. Forward declarations of enum types are not part of standard C, but handle them the same. */ pedwarn (input_location, 0, "invalid use of attributes in empty declaration"); warned = 1; } if (warned != 1) { if (declspecs->declspecs_seen_p && !declspecs->non_std_attrs_seen_p) /* An attribute declaration (but not a fallthrough attribute declaration, which was handled separately); warn if there are any attributes being ignored (but not if the attributes were empty). */ c_warn_unused_attributes (declspecs->attrs); else if (!found_tag) pedwarn (input_location, 0, "empty declaration"); } } /* Return the qualifiers from SPECS as a bitwise OR of TYPE_QUAL_* bits. SPECS represents declaration specifiers that the grammar only permits to contain type qualifiers and attributes. */ int quals_from_declspecs (const struct c_declspecs *specs) { int quals = ((specs->const_p ? TYPE_QUAL_CONST : 0) | (specs->volatile_p ? TYPE_QUAL_VOLATILE : 0) | (specs->restrict_p ? TYPE_QUAL_RESTRICT : 0) | (specs->atomic_p ? TYPE_QUAL_ATOMIC : 0) | (ENCODE_QUAL_ADDR_SPACE (specs->address_space))); gcc_assert (!specs->type && !specs->decl_attr && specs->typespec_word == cts_none && specs->storage_class == csc_none && !specs->typedef_p && !specs->explicit_signed_p && !specs->deprecated_p && !specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p && !specs->inline_p && !specs->noreturn_p && !specs->thread_p); return quals; } /* Construct an array declarator. LOC is the location of the beginning of the array (usually the opening brace). EXPR is the expression inside [], or NULL_TREE. QUALS are the type qualifiers inside the [] (to be applied to the pointer to which a parameter array is converted). STATIC_P is true if "static" is inside the [], false otherwise. VLA_UNSPEC_P is true if the array is [*], a VLA of unspecified length which is nevertheless a complete type, false otherwise. The field for the contained declarator is left to be filled in by set_array_declarator_inner. */ struct c_declarator * build_array_declarator (location_t loc, tree expr, struct c_declspecs *quals, bool static_p, bool vla_unspec_p) { struct c_declarator *declarator = XOBNEW (&parser_obstack, struct c_declarator); declarator->id_loc = loc; declarator->kind = cdk_array; declarator->declarator = 0; declarator->u.array.dimen = expr; if (quals) { declarator->u.array.attrs = quals->attrs; declarator->u.array.quals = quals_from_declspecs (quals); } else { declarator->u.array.attrs = NULL_TREE; declarator->u.array.quals = 0; } declarator->u.array.static_p = static_p; declarator->u.array.vla_unspec_p = vla_unspec_p; if (static_p || quals != NULL) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support %<static%> or type " "qualifiers in parameter array declarators"); if (vla_unspec_p) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support %<[*]%> array declarators"); if (vla_unspec_p) { if (!current_scope->parm_flag) { /* C99 6.7.5.2p4 */ error_at (loc, "%<[*]%> not allowed in other than " "function prototype scope"); declarator->u.array.vla_unspec_p = false; return NULL; } current_scope->had_vla_unspec = true; } return declarator; } /* Set the contained declarator of an array declarator. DECL is the declarator, as constructed by build_array_declarator; INNER is what appears on the left of the []. */ struct c_declarator * set_array_declarator_inner (struct c_declarator *decl, struct c_declarator *inner) { decl->declarator = inner; return decl; } /* INIT is a constructor that forms DECL's initializer. If the final element initializes a flexible array field, add the size of that initializer to DECL's size. */ static void add_flexible_array_elts_to_size (tree decl, tree init) { tree elt, type; if (vec_safe_is_empty (CONSTRUCTOR_ELTS (init))) return; elt = CONSTRUCTOR_ELTS (init)->last ().value; type = TREE_TYPE (elt); if (TREE_CODE (type) == ARRAY_TYPE && TYPE_SIZE (type) == NULL_TREE && TYPE_DOMAIN (type) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE) { complete_array_type (&type, elt, false); DECL_SIZE (decl) = size_binop (PLUS_EXPR, DECL_SIZE (decl), TYPE_SIZE (type)); DECL_SIZE_UNIT (decl) = size_binop (PLUS_EXPR, DECL_SIZE_UNIT (decl), TYPE_SIZE_UNIT (type)); } } /* Decode a "typename", such as "int **", returning a ..._TYPE node. Set *EXPR, if EXPR not NULL, to any expression to be evaluated before the type name, and set *EXPR_CONST_OPERANDS, if EXPR_CONST_OPERANDS not NULL, to indicate whether the type name may appear in a constant expression. */ tree groktypename (struct c_type_name *type_name, tree *expr, bool *expr_const_operands) { tree type; tree attrs = type_name->specs->attrs; type_name->specs->attrs = NULL_TREE; type = grokdeclarator (type_name->declarator, type_name->specs, TYPENAME, false, NULL, &attrs, expr, expr_const_operands, DEPRECATED_NORMAL); /* Apply attributes. */ attrs = c_warn_type_attributes (attrs); decl_attributes (&type, attrs, 0); return type; } /* Wrapper for decl_attributes that adds some implicit attributes to VAR_DECLs or FUNCTION_DECLs. */ static tree c_decl_attributes (tree *node, tree attributes, int flags) { /* Add implicit "omp declare target" attribute if requested. */ if (current_omp_declare_target_attribute && ((VAR_P (*node) && is_global_var (*node)) || TREE_CODE (*node) == FUNCTION_DECL)) { if (VAR_P (*node) && !lang_hooks.types.omp_mappable_type (TREE_TYPE (*node))) attributes = tree_cons (get_identifier ("omp declare target implicit"), NULL_TREE, attributes); else { attributes = tree_cons (get_identifier ("omp declare target"), NULL_TREE, attributes); attributes = tree_cons (get_identifier ("omp declare target block"), NULL_TREE, attributes); } } /* Look up the current declaration with all the attributes merged so far so that attributes on the current declaration that's about to be pushed that conflict with the former can be detected, diagnosed, and rejected as appropriate. */ tree last_decl = lookup_name (DECL_NAME (*node)); if (!last_decl) last_decl = lookup_name_in_scope (DECL_NAME (*node), external_scope); return decl_attributes (node, attributes, flags, last_decl); } /* Decode a declarator in an ordinary declaration or data definition. This is called as soon as the type information and variable name have been parsed, before parsing the initializer if any. Here we create the ..._DECL node, fill in its type, and put it on the list of decls for the current context. The ..._DECL node is returned as the value. Exception: for arrays where the length is not specified, the type is left null, to be filled in by `finish_decl'. Function definitions do not come here; they go to start_function instead. However, external and forward declarations of functions do go through here. Structure field declarations are done by grokfield and not through here. */ tree start_decl (struct c_declarator *declarator, struct c_declspecs *declspecs, bool initialized, tree attributes) { tree decl; tree tem; tree expr = NULL_TREE; enum deprecated_states deprecated_state = DEPRECATED_NORMAL; /* An object declared as __attribute__((deprecated)) suppresses warnings of uses of other deprecated items. */ if (lookup_attribute ("deprecated", attributes)) deprecated_state = DEPRECATED_SUPPRESS; decl = grokdeclarator (declarator, declspecs, NORMAL, initialized, NULL, &attributes, &expr, NULL, deprecated_state); if (!decl || decl == error_mark_node) return NULL_TREE; if (expr) add_stmt (fold_convert (void_type_node, expr)); if (TREE_CODE (decl) != FUNCTION_DECL && MAIN_NAME_P (DECL_NAME (decl)) && TREE_PUBLIC (decl)) warning (OPT_Wmain, "%q+D is usually a function", decl); if (initialized) /* Is it valid for this decl to have an initializer at all? If not, set INITIALIZED to zero, which will indirectly tell 'finish_decl' to ignore the initializer once it is parsed. */ switch (TREE_CODE (decl)) { case TYPE_DECL: error ("typedef %qD is initialized (use %<__typeof__%> instead)", decl); initialized = false; break; case FUNCTION_DECL: error ("function %qD is initialized like a variable", decl); initialized = false; break; case PARM_DECL: /* DECL_INITIAL in a PARM_DECL is really DECL_ARG_TYPE. */ error ("parameter %qD is initialized", decl); initialized = false; break; default: /* Don't allow initializations for incomplete types except for arrays which might be completed by the initialization. */ /* This can happen if the array size is an undefined macro. We already gave a warning, so we don't need another one. */ if (TREE_TYPE (decl) == error_mark_node) initialized = false; else if (COMPLETE_TYPE_P (TREE_TYPE (decl))) { /* A complete type is ok if size is fixed. */ if (!poly_int_tree_p (TYPE_SIZE (TREE_TYPE (decl))) || C_DECL_VARIABLE_SIZE (decl)) { error ("variable-sized object may not be initialized"); initialized = false; } } else if (TREE_CODE (TREE_TYPE (decl)) != ARRAY_TYPE) { error ("variable %qD has initializer but incomplete type", decl); initialized = false; } else if (C_DECL_VARIABLE_SIZE (decl)) { /* Although C99 is unclear about whether incomplete arrays of VLAs themselves count as VLAs, it does not make sense to permit them to be initialized given that ordinary VLAs may not be initialized. */ error ("variable-sized object may not be initialized"); initialized = false; } } if (initialized) { if (current_scope == file_scope) TREE_STATIC (decl) = 1; /* Tell 'pushdecl' this is an initialized decl even though we don't yet have the initializer expression. Also tell 'finish_decl' it may store the real initializer. */ DECL_INITIAL (decl) = error_mark_node; } /* If this is a function declaration, write a record describing it to the prototypes file (if requested). */ if (TREE_CODE (decl) == FUNCTION_DECL) gen_aux_info_record (decl, 0, 0, prototype_p (TREE_TYPE (decl))); /* ANSI specifies that a tentative definition which is not merged with a non-tentative definition behaves exactly like a definition with an initializer equal to zero. (Section 3.7.2) -fno-common gives strict ANSI behavior, though this tends to break a large body of code that grew up without this rule. Thread-local variables are never common, since there's no entrenched body of code to break, and it allows more efficient variable references in the presence of dynamic linking. */ if (VAR_P (decl) && !initialized && TREE_PUBLIC (decl) && !DECL_THREAD_LOCAL_P (decl) && !flag_no_common) DECL_COMMON (decl) = 1; /* Set attributes here so if duplicate decl, will have proper attributes. */ c_decl_attributes (&decl, attributes, 0); /* Handle gnu_inline attribute. */ if (declspecs->inline_p && !flag_gnu89_inline && TREE_CODE (decl) == FUNCTION_DECL && (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl)) || current_function_decl)) { if (declspecs->storage_class == csc_auto && current_scope != file_scope) ; else if (declspecs->storage_class != csc_static) DECL_EXTERNAL (decl) = !DECL_EXTERNAL (decl); } if (TREE_CODE (decl) == FUNCTION_DECL && targetm.calls.promote_prototypes (TREE_TYPE (decl))) { struct c_declarator *ce = declarator; if (ce->kind == cdk_pointer) ce = declarator->declarator; if (ce->kind == cdk_function) { tree args = ce->u.arg_info->parms; for (; args; args = DECL_CHAIN (args)) { tree type = TREE_TYPE (args); if (type && INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (args) = c_type_promotes_to (type); } } } if (TREE_CODE (decl) == FUNCTION_DECL && DECL_DECLARED_INLINE_P (decl) && DECL_UNINLINABLE (decl) && lookup_attribute ("noinline", DECL_ATTRIBUTES (decl))) warning (OPT_Wattributes, "inline function %q+D given attribute %qs", decl, "noinline"); /* C99 6.7.4p3: An inline definition of a function with external linkage shall not contain a definition of a modifiable object with static storage duration... */ if (VAR_P (decl) && current_scope != file_scope && TREE_STATIC (decl) && !TREE_READONLY (decl) && DECL_DECLARED_INLINE_P (current_function_decl) && DECL_EXTERNAL (current_function_decl)) record_inline_static (input_location, current_function_decl, decl, csi_modifiable); if (c_dialect_objc () && VAR_OR_FUNCTION_DECL_P (decl)) objc_check_global_decl (decl); /* Add this decl to the current scope. TEM may equal DECL or it may be a previous decl of the same name. */ tem = pushdecl (decl); if (initialized && DECL_EXTERNAL (tem)) { DECL_EXTERNAL (tem) = 0; TREE_STATIC (tem) = 1; } return tem; } /* Subroutine of finish_decl. TYPE is the type of an uninitialized object DECL or the non-array element type if DECL is an uninitialized array. If that type has a const member, diagnose this. */ static void diagnose_uninitialized_cst_member (tree decl, tree type) { tree field; for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field)) { tree field_type; if (TREE_CODE (field) != FIELD_DECL) continue; field_type = strip_array_types (TREE_TYPE (field)); if (TYPE_QUALS (field_type) & TYPE_QUAL_CONST) { warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, "uninitialized const member in %qT is invalid in C++", strip_array_types (TREE_TYPE (decl))); inform (DECL_SOURCE_LOCATION (field), "%qD should be initialized", field); } if (RECORD_OR_UNION_TYPE_P (field_type)) diagnose_uninitialized_cst_member (decl, field_type); } } /* Finish processing of a declaration; install its initial value. If ORIGTYPE is not NULL_TREE, it is the original type of INIT. If the length of an array type is not known before, it must be determined now, from the initial value, or it is an error. INIT_LOC is the location of the initial value. */ void finish_decl (tree decl, location_t init_loc, tree init, tree origtype, tree asmspec_tree) { tree type; bool was_incomplete = (DECL_SIZE (decl) == NULL_TREE); const char *asmspec = 0; /* If a name was specified, get the string. */ if (VAR_OR_FUNCTION_DECL_P (decl) && DECL_FILE_SCOPE_P (decl)) asmspec_tree = maybe_apply_renaming_pragma (decl, asmspec_tree); if (asmspec_tree) asmspec = TREE_STRING_POINTER (asmspec_tree); if (VAR_P (decl) && TREE_STATIC (decl) && global_bindings_p ()) /* So decl is a global variable. Record the types it uses so that we can decide later to emit debug info for them. */ record_types_used_by_current_var_decl (decl); /* If `start_decl' didn't like having an initialization, ignore it now. */ if (init != NULL_TREE && DECL_INITIAL (decl) == NULL_TREE) init = NULL_TREE; /* Don't crash if parm is initialized. */ if (TREE_CODE (decl) == PARM_DECL) init = NULL_TREE; if (init) store_init_value (init_loc, decl, init, origtype); if (c_dialect_objc () && (VAR_OR_FUNCTION_DECL_P (decl) || TREE_CODE (decl) == FIELD_DECL)) objc_check_decl (decl); type = TREE_TYPE (decl); /* Deduce size of array from initialization, if not already known. This is only needed for an initialization in the current scope; it must not be done for a file-scope initialization of a declaration with external linkage, redeclared in an inner scope with the outer declaration shadowed in an intermediate scope. */ if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE && TREE_CODE (decl) != TYPE_DECL && !(TREE_PUBLIC (decl) && current_scope != file_scope)) { bool do_default = (TREE_STATIC (decl) /* Even if pedantic, an external linkage array may have incomplete type at first. */ ? pedantic && !TREE_PUBLIC (decl) : !DECL_EXTERNAL (decl)); int failure = complete_array_type (&TREE_TYPE (decl), DECL_INITIAL (decl), do_default); /* Get the completed type made by complete_array_type. */ type = TREE_TYPE (decl); switch (failure) { case 1: error ("initializer fails to determine size of %q+D", decl); break; case 2: if (do_default) error ("array size missing in %q+D", decl); break; case 3: error ("zero or negative size array %q+D", decl); break; case 0: /* For global variables, update the copy of the type that exists in the binding. */ if (TREE_PUBLIC (decl)) { struct c_binding *b_ext = I_SYMBOL_BINDING (DECL_NAME (decl)); while (b_ext && !B_IN_EXTERNAL_SCOPE (b_ext)) b_ext = b_ext->shadowed; if (b_ext && TREE_CODE (decl) == TREE_CODE (b_ext->decl)) { if (b_ext->u.type && comptypes (b_ext->u.type, type)) b_ext->u.type = composite_type (b_ext->u.type, type); else b_ext->u.type = type; } } break; default: gcc_unreachable (); } if (DECL_INITIAL (decl)) TREE_TYPE (DECL_INITIAL (decl)) = type; relayout_decl (decl); } /* Look for braced array initializers for character arrays and recursively convert them into STRING_CSTs. */ if (tree init = DECL_INITIAL (decl)) DECL_INITIAL (decl) = braced_lists_to_strings (type, init); if (VAR_P (decl)) { if (init && TREE_CODE (init) == CONSTRUCTOR) add_flexible_array_elts_to_size (decl, init); complete_flexible_array_elts (DECL_INITIAL (decl)); if (is_global_var (decl)) { type_context_kind context = (DECL_THREAD_LOCAL_P (decl) ? TCTX_THREAD_STORAGE : TCTX_STATIC_STORAGE); if (!verify_type_context (input_location, context, TREE_TYPE (decl))) TREE_TYPE (decl) = error_mark_node; } if (DECL_SIZE (decl) == NULL_TREE && TREE_TYPE (decl) != error_mark_node && COMPLETE_TYPE_P (TREE_TYPE (decl))) layout_decl (decl, 0); if (DECL_SIZE (decl) == NULL_TREE /* Don't give an error if we already gave one earlier. */ && TREE_TYPE (decl) != error_mark_node && (TREE_STATIC (decl) /* A static variable with an incomplete type is an error if it is initialized. Also if it is not file scope. Otherwise, let it through, but if it is not `extern' then it may cause an error message later. */ ? (DECL_INITIAL (decl) != NULL_TREE || !DECL_FILE_SCOPE_P (decl)) /* An automatic variable with an incomplete type is an error. */ : !DECL_EXTERNAL (decl))) { error ("storage size of %q+D isn%'t known", decl); TREE_TYPE (decl) = error_mark_node; } if ((RECORD_OR_UNION_TYPE_P (TREE_TYPE (decl)) || TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE) && DECL_SIZE (decl) == NULL_TREE && TREE_STATIC (decl)) incomplete_record_decls.safe_push (decl); if (is_global_var (decl) && DECL_SIZE (decl) != NULL_TREE && TREE_TYPE (decl) != error_mark_node) { if (TREE_CODE (DECL_SIZE (decl)) == INTEGER_CST) constant_expression_warning (DECL_SIZE (decl)); else { error ("storage size of %q+D isn%'t constant", decl); TREE_TYPE (decl) = error_mark_node; } } if (TREE_USED (type)) { TREE_USED (decl) = 1; DECL_READ_P (decl) = 1; } } /* If this is a function and an assembler name is specified, reset DECL_RTL so we can give it its new name. Also, update builtin_decl if it was a normal built-in. */ if (TREE_CODE (decl) == FUNCTION_DECL && asmspec) { if (DECL_BUILT_IN_CLASS (decl) == BUILT_IN_NORMAL) set_builtin_user_assembler_name (decl, asmspec); set_user_assembler_name (decl, asmspec); } /* If #pragma weak was used, mark the decl weak now. */ maybe_apply_pragma_weak (decl); /* Output the assembler code and/or RTL code for variables and functions, unless the type is an undefined structure or union. If not, it will get done when the type is completed. */ if (VAR_OR_FUNCTION_DECL_P (decl)) { /* Determine the ELF visibility. */ if (TREE_PUBLIC (decl)) c_determine_visibility (decl); /* This is a no-op in c-lang.c or something real in objc-act.c. */ if (c_dialect_objc ()) objc_check_decl (decl); if (asmspec) { /* If this is not a static variable, issue a warning. It doesn't make any sense to give an ASMSPEC for an ordinary, non-register local variable. Historically, GCC has accepted -- but ignored -- the ASMSPEC in this case. */ if (!DECL_FILE_SCOPE_P (decl) && VAR_P (decl) && !C_DECL_REGISTER (decl) && !TREE_STATIC (decl)) warning (0, "ignoring %<asm%> specifier for non-static local " "variable %q+D", decl); else set_user_assembler_name (decl, asmspec); } if (DECL_FILE_SCOPE_P (decl)) { if (DECL_INITIAL (decl) == NULL_TREE || DECL_INITIAL (decl) == error_mark_node) /* Don't output anything when a tentative file-scope definition is seen. But at end of compilation, do output code for them. */ DECL_DEFER_OUTPUT (decl) = 1; if (asmspec && VAR_P (decl) && C_DECL_REGISTER (decl)) DECL_HARD_REGISTER (decl) = 1; rest_of_decl_compilation (decl, true, 0); } else { /* In conjunction with an ASMSPEC, the `register' keyword indicates that we should place the variable in a particular register. */ if (asmspec && C_DECL_REGISTER (decl)) { DECL_HARD_REGISTER (decl) = 1; /* This cannot be done for a structure with volatile fields, on which DECL_REGISTER will have been reset. */ if (!DECL_REGISTER (decl)) error ("cannot put object with volatile field into register"); } if (TREE_CODE (decl) != FUNCTION_DECL) { /* If we're building a variable sized type, and we might be reachable other than via the top of the current binding level, then create a new BIND_EXPR so that we deallocate the object at the right time. */ /* Note that DECL_SIZE can be null due to errors. */ if (DECL_SIZE (decl) && !TREE_CONSTANT (DECL_SIZE (decl)) && STATEMENT_LIST_HAS_LABEL (cur_stmt_list)) { tree bind; bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; add_stmt (bind); BIND_EXPR_BODY (bind) = push_stmt_list (); } add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl)); } } if (!DECL_FILE_SCOPE_P (decl)) { /* Recompute the RTL of a local array now if it used to be an incomplete type. */ if (was_incomplete && !is_global_var (decl)) { /* If we used it already as memory, it must stay in memory. */ TREE_ADDRESSABLE (decl) = TREE_USED (decl); /* If it's still incomplete now, no init will save it. */ if (DECL_SIZE (decl) == NULL_TREE) DECL_INITIAL (decl) = NULL_TREE; } } } if (TREE_CODE (decl) == TYPE_DECL) { if (!DECL_FILE_SCOPE_P (decl) && variably_modified_type_p (TREE_TYPE (decl), NULL_TREE)) add_stmt (build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl)); rest_of_decl_compilation (decl, DECL_FILE_SCOPE_P (decl), 0); } /* Install a cleanup (aka destructor) if one was given. */ if (VAR_P (decl) && !TREE_STATIC (decl)) { tree attr = lookup_attribute ("cleanup", DECL_ATTRIBUTES (decl)); if (attr) { tree cleanup_id = TREE_VALUE (TREE_VALUE (attr)); tree cleanup_decl = lookup_name (cleanup_id); tree cleanup; vec<tree, va_gc> *v; /* Build "cleanup(&decl)" for the destructor. */ cleanup = build_unary_op (input_location, ADDR_EXPR, decl, false); vec_alloc (v, 1); v->quick_push (cleanup); cleanup = c_build_function_call_vec (DECL_SOURCE_LOCATION (decl), vNULL, cleanup_decl, v, NULL); vec_free (v); /* Don't warn about decl unused; the cleanup uses it. */ TREE_USED (decl) = 1; TREE_USED (cleanup_decl) = 1; DECL_READ_P (decl) = 1; push_cleanup (decl, cleanup, false); } } if (warn_cxx_compat && VAR_P (decl) && !DECL_EXTERNAL (decl) && DECL_INITIAL (decl) == NULL_TREE) { type = strip_array_types (type); if (TREE_READONLY (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, "uninitialized %<const %D%> is invalid in C++", decl); else if (RECORD_OR_UNION_TYPE_P (type) && C_TYPE_FIELDS_READONLY (type)) diagnose_uninitialized_cst_member (decl, type); } if (flag_openmp && VAR_P (decl) && lookup_attribute ("omp declare target implicit", DECL_ATTRIBUTES (decl))) { DECL_ATTRIBUTES (decl) = remove_attribute ("omp declare target implicit", DECL_ATTRIBUTES (decl)); if (!lang_hooks.types.omp_mappable_type (TREE_TYPE (decl))) error ("%q+D in declare target directive does not have mappable type", decl); else if (!lookup_attribute ("omp declare target", DECL_ATTRIBUTES (decl)) && !lookup_attribute ("omp declare target link", DECL_ATTRIBUTES (decl))) DECL_ATTRIBUTES (decl) = tree_cons (get_identifier ("omp declare target"), NULL_TREE, DECL_ATTRIBUTES (decl)); } invoke_plugin_callbacks (PLUGIN_FINISH_DECL, decl); } /* Given a parsed parameter declaration, decode it into a PARM_DECL. EXPR is NULL or a pointer to an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ tree grokparm (const struct c_parm *parm, tree *expr) { tree attrs = parm->attrs; tree decl = grokdeclarator (parm->declarator, parm->specs, PARM, false, NULL, &attrs, expr, NULL, DEPRECATED_NORMAL); decl_attributes (&decl, attrs, 0); return decl; } /* Given a parsed parameter declaration, decode it into a PARM_DECL and push that on the current scope. EXPR is a pointer to an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ void push_parm_decl (const struct c_parm *parm, tree *expr) { tree attrs = parm->attrs; tree decl; decl = grokdeclarator (parm->declarator, parm->specs, PARM, false, NULL, &attrs, expr, NULL, DEPRECATED_NORMAL); if (decl && DECL_P (decl)) DECL_SOURCE_LOCATION (decl) = parm->loc; decl_attributes (&decl, attrs, 0); decl = pushdecl (decl); finish_decl (decl, input_location, NULL_TREE, NULL_TREE, NULL_TREE); } /* Mark all the parameter declarations to date as forward decls. Also diagnose use of this extension. */ void mark_forward_parm_decls (void) { struct c_binding *b; if (pedantic && !current_scope->warned_forward_parm_decls) { pedwarn (input_location, OPT_Wpedantic, "ISO C forbids forward parameter declarations"); current_scope->warned_forward_parm_decls = true; } for (b = current_scope->bindings; b; b = b->prev) if (TREE_CODE (b->decl) == PARM_DECL) TREE_ASM_WRITTEN (b->decl) = 1; } /* Build a COMPOUND_LITERAL_EXPR. TYPE is the type given in the compound literal, which may be an incomplete array type completed by the initializer; INIT is a CONSTRUCTOR at LOC that initializes the compound literal. NON_CONST is true if the initializers contain something that cannot occur in a constant expression. If ALIGNAS_ALIGN is nonzero, it is the (valid) alignment for this compound literal, as specified with _Alignas. */ tree build_compound_literal (location_t loc, tree type, tree init, bool non_const, unsigned int alignas_align) { /* We do not use start_decl here because we have a type, not a declarator; and do not use finish_decl because the decl should be stored inside the COMPOUND_LITERAL_EXPR rather than added elsewhere as a DECL_EXPR. */ tree decl; tree complit; tree stmt; if (type == error_mark_node || init == error_mark_node) return error_mark_node; decl = build_decl (loc, VAR_DECL, NULL_TREE, type); DECL_EXTERNAL (decl) = 0; TREE_PUBLIC (decl) = 0; TREE_STATIC (decl) = (current_scope == file_scope); DECL_CONTEXT (decl) = current_function_decl; TREE_USED (decl) = 1; DECL_READ_P (decl) = 1; DECL_ARTIFICIAL (decl) = 1; DECL_IGNORED_P (decl) = 1; C_DECL_COMPOUND_LITERAL_P (decl) = 1; TREE_TYPE (decl) = type; c_apply_type_quals_to_decl (TYPE_QUALS (strip_array_types (type)), decl); if (alignas_align) { SET_DECL_ALIGN (decl, alignas_align * BITS_PER_UNIT); DECL_USER_ALIGN (decl) = 1; } store_init_value (loc, decl, init, NULL_TREE); if (TREE_CODE (type) == ARRAY_TYPE && !COMPLETE_TYPE_P (type)) { int failure = complete_array_type (&TREE_TYPE (decl), DECL_INITIAL (decl), true); /* If complete_array_type returns 3, it means that the initial value of the compound literal is empty. Allow it. */ gcc_assert (failure == 0 || failure == 3); type = TREE_TYPE (decl); TREE_TYPE (DECL_INITIAL (decl)) = type; } if (type == error_mark_node || !COMPLETE_TYPE_P (type)) { c_incomplete_type_error (loc, NULL_TREE, type); return error_mark_node; } if (TREE_STATIC (decl) && !verify_type_context (loc, TCTX_STATIC_STORAGE, type)) return error_mark_node; stmt = build_stmt (DECL_SOURCE_LOCATION (decl), DECL_EXPR, decl); complit = build1 (COMPOUND_LITERAL_EXPR, type, stmt); TREE_SIDE_EFFECTS (complit) = 1; layout_decl (decl, 0); if (TREE_STATIC (decl)) { /* This decl needs a name for the assembler output. */ set_compound_literal_name (decl); DECL_DEFER_OUTPUT (decl) = 1; DECL_COMDAT (decl) = 1; pushdecl (decl); rest_of_decl_compilation (decl, 1, 0); } else if (current_function_decl && !current_scope->parm_flag) pushdecl (decl); if (non_const) { complit = build2 (C_MAYBE_CONST_EXPR, type, NULL, complit); C_MAYBE_CONST_EXPR_NON_CONST (complit) = 1; } return complit; } /* Check the type of a compound literal. Here we just check that it is valid for C++. */ void check_compound_literal_type (location_t loc, struct c_type_name *type_name) { if (warn_cxx_compat && (type_name->specs->typespec_kind == ctsk_tagdef || type_name->specs->typespec_kind == ctsk_tagfirstref || type_name->specs->typespec_kind == ctsk_tagfirstref_attrs)) warning_at (loc, OPT_Wc___compat, "defining a type in a compound literal is invalid in C++"); } /* Performs sanity checks on the TYPE and WIDTH of the bit-field NAME, replacing with appropriate values if they are invalid. */ static void check_bitfield_type_and_width (location_t loc, tree *type, tree *width, tree orig_name) { tree type_mv; unsigned int max_width; unsigned HOST_WIDE_INT w; const char *name = (orig_name ? identifier_to_locale (IDENTIFIER_POINTER (orig_name)) : _("<anonymous>")); /* Detect and ignore out of range field width and process valid field widths. */ if (!INTEGRAL_TYPE_P (TREE_TYPE (*width))) { error_at (loc, "bit-field %qs width not an integer constant", name); *width = integer_one_node; } else { if (TREE_CODE (*width) != INTEGER_CST) { *width = c_fully_fold (*width, false, NULL); if (TREE_CODE (*width) == INTEGER_CST) pedwarn (loc, OPT_Wpedantic, "bit-field %qs width not an integer constant expression", name); } if (TREE_CODE (*width) != INTEGER_CST) { error_at (loc, "bit-field %qs width not an integer constant", name); *width = integer_one_node; } constant_expression_warning (*width); if (tree_int_cst_sgn (*width) < 0) { error_at (loc, "negative width in bit-field %qs", name); *width = integer_one_node; } else if (integer_zerop (*width) && orig_name) { error_at (loc, "zero width for bit-field %qs", name); *width = integer_one_node; } } /* Detect invalid bit-field type. */ if (TREE_CODE (*type) != INTEGER_TYPE && TREE_CODE (*type) != BOOLEAN_TYPE && TREE_CODE (*type) != ENUMERAL_TYPE) { error_at (loc, "bit-field %qs has invalid type", name); *type = unsigned_type_node; } if (TYPE_WARN_IF_NOT_ALIGN (*type)) { error_at (loc, "cannot declare bit-field %qs with %<warn_if_not_aligned%> type", name); *type = unsigned_type_node; } type_mv = TYPE_MAIN_VARIANT (*type); if (!in_system_header_at (input_location) && type_mv != integer_type_node && type_mv != unsigned_type_node && type_mv != boolean_type_node) pedwarn_c90 (loc, OPT_Wpedantic, "type of bit-field %qs is a GCC extension", name); max_width = TYPE_PRECISION (*type); if (compare_tree_int (*width, max_width) > 0) { error_at (loc, "width of %qs exceeds its type", name); w = max_width; *width = build_int_cst (integer_type_node, w); } else w = tree_to_uhwi (*width); if (TREE_CODE (*type) == ENUMERAL_TYPE) { struct lang_type *lt = TYPE_LANG_SPECIFIC (*type); if (!lt || w < tree_int_cst_min_precision (lt->enum_min, TYPE_SIGN (*type)) || w < tree_int_cst_min_precision (lt->enum_max, TYPE_SIGN (*type))) warning_at (loc, 0, "%qs is narrower than values of its type", name); } } /* Print warning about variable length array if necessary. */ static void warn_variable_length_array (tree name, tree size) { if (TREE_CONSTANT (size)) { if (name) pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids array %qE whose size " "cannot be evaluated", name); else pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids array " "whose size cannot be evaluated"); } else { if (name) pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids variable length array %qE", name); else pedwarn_c90 (input_location, OPT_Wvla, "ISO C90 forbids variable " "length array"); } } /* Print warning about defaulting to int if necessary. */ static void warn_defaults_to (location_t location, int opt, const char *gmsgid, ...) { diagnostic_info diagnostic; va_list ap; rich_location richloc (line_table, location); va_start (ap, gmsgid); diagnostic_set_info (&diagnostic, gmsgid, &ap, &richloc, flag_isoc99 ? DK_PEDWARN : DK_WARNING); diagnostic.option_index = opt; diagnostic_report_diagnostic (global_dc, &diagnostic); va_end (ap); } /* Returns the smallest location != UNKNOWN_LOCATION in LOCATIONS, considering only those c_declspec_words found in LIST, which must be terminated by cdw_number_of_elements. */ static location_t smallest_type_quals_location (const location_t *locations, const c_declspec_word *list) { location_t loc = UNKNOWN_LOCATION; while (*list != cdw_number_of_elements) { location_t newloc = locations[*list]; if (loc == UNKNOWN_LOCATION || (newloc != UNKNOWN_LOCATION && newloc < loc)) loc = newloc; list++; } return loc; } /* Given declspecs and a declarator, determine the name and type of the object declared and construct a ..._DECL node for it. (In one case we can return a ..._TYPE node instead. For invalid input we sometimes return NULL_TREE.) DECLSPECS is a c_declspecs structure for the declaration specifiers. DECL_CONTEXT says which syntactic context this declaration is in: NORMAL for most contexts. Make a VAR_DECL or FUNCTION_DECL or TYPE_DECL. FUNCDEF for a function definition. Like NORMAL but a few different error messages in each case. Return value may be zero meaning this definition is too screwy to try to parse. PARM for a parameter declaration (either within a function prototype or before a function body). Make a PARM_DECL, or return void_type_node. TYPENAME if for a typename (in a cast or sizeof). Don't make a DECL node; just return the ..._TYPE node. FIELD for a struct or union field; make a FIELD_DECL. INITIALIZED is true if the decl has an initializer. WIDTH is non-NULL for bit-fields, and is a pointer to an INTEGER_CST node representing the width of the bit-field. DECL_ATTRS points to the list of attributes that should be added to this decl. Any nested attributes that belong on the decl itself will be added to this list. If EXPR is not NULL, any expressions that need to be evaluated as part of evaluating variably modified types will be stored in *EXPR. If EXPR_CONST_OPERANDS is not NULL, *EXPR_CONST_OPERANDS will be set to indicate whether operands in *EXPR can be used in constant expressions. DEPRECATED_STATE is a deprecated_states value indicating whether deprecation warnings should be suppressed. In the TYPENAME case, DECLARATOR is really an absolute declarator. It may also be so in the PARM case, for a prototype where the argument type is specified but not the name. This function is where the complicated C meanings of `static' and `extern' are interpreted. */ static tree grokdeclarator (const struct c_declarator *declarator, struct c_declspecs *declspecs, enum decl_context decl_context, bool initialized, tree *width, tree *decl_attrs, tree *expr, bool *expr_const_operands, enum deprecated_states deprecated_state) { tree type = declspecs->type; bool threadp = declspecs->thread_p; enum c_storage_class storage_class = declspecs->storage_class; int constp; int restrictp; int volatilep; int atomicp; int type_quals = TYPE_UNQUALIFIED; tree name = NULL_TREE; bool funcdef_flag = false; bool funcdef_syntax = false; bool size_varies = false; tree decl_attr = declspecs->decl_attr; int array_ptr_quals = TYPE_UNQUALIFIED; tree array_ptr_attrs = NULL_TREE; bool array_parm_static = false; bool array_parm_vla_unspec_p = false; tree returned_attrs = NULL_TREE; tree decl_id_attrs = NULL_TREE; bool bitfield = width != NULL; tree element_type; tree orig_qual_type = NULL; size_t orig_qual_indirect = 0; struct c_arg_info *arg_info = 0; addr_space_t as1, as2, address_space; location_t loc = UNKNOWN_LOCATION; tree expr_dummy; bool expr_const_operands_dummy; enum c_declarator_kind first_non_attr_kind; unsigned int alignas_align = 0; if (TREE_CODE (type) == ERROR_MARK) return error_mark_node; if (expr == NULL) { expr = &expr_dummy; expr_dummy = NULL_TREE; } if (expr_const_operands == NULL) expr_const_operands = &expr_const_operands_dummy; if (declspecs->expr) { if (*expr) *expr = build2 (COMPOUND_EXPR, TREE_TYPE (declspecs->expr), *expr, declspecs->expr); else *expr = declspecs->expr; } *expr_const_operands = declspecs->expr_const_operands; if (decl_context == FUNCDEF) funcdef_flag = true, decl_context = NORMAL; /* Look inside a declarator for the name being declared and get it as an IDENTIFIER_NODE, for an error message. */ { const struct c_declarator *decl = declarator; first_non_attr_kind = cdk_attrs; while (decl) switch (decl->kind) { case cdk_array: loc = decl->id_loc; /* FALL THRU. */ case cdk_function: case cdk_pointer: funcdef_syntax = (decl->kind == cdk_function); if (first_non_attr_kind == cdk_attrs) first_non_attr_kind = decl->kind; decl = decl->declarator; break; case cdk_attrs: decl = decl->declarator; break; case cdk_id: loc = decl->id_loc; if (decl->u.id.id) name = decl->u.id.id; decl_id_attrs = decl->u.id.attrs; if (first_non_attr_kind == cdk_attrs) first_non_attr_kind = decl->kind; decl = 0; break; default: gcc_unreachable (); } if (name == NULL_TREE) { gcc_assert (decl_context == PARM || decl_context == TYPENAME || (decl_context == FIELD && declarator->kind == cdk_id)); gcc_assert (!initialized); } } /* A function definition's declarator must have the form of a function declarator. */ if (funcdef_flag && !funcdef_syntax) return NULL_TREE; /* If this looks like a function definition, make it one, even if it occurs where parms are expected. Then store_parm_decls will reject it and not use it as a parm. */ if (decl_context == NORMAL && !funcdef_flag && current_scope->parm_flag) decl_context = PARM; if (declspecs->deprecated_p && deprecated_state != DEPRECATED_SUPPRESS) warn_deprecated_use (declspecs->type, declspecs->decl_attr); if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope && variably_modified_type_p (type, NULL_TREE)) { if (name) error_at (loc, "variably modified %qE at file scope", name); else error_at (loc, "variably modified field at file scope"); type = integer_type_node; } size_varies = C_TYPE_VARIABLE_SIZE (type) != 0; /* Diagnose defaulting to "int". */ if (declspecs->default_int_p && !in_system_header_at (input_location)) { /* Issue a warning if this is an ISO C 99 program or if -Wreturn-type and this is a function, or if -Wimplicit; prefer the former warning since it is more explicit. */ if ((warn_implicit_int || warn_return_type > 0 || flag_isoc99) && funcdef_flag) warn_about_return_type = 1; else { if (name) warn_defaults_to (loc, OPT_Wimplicit_int, "type defaults to %<int%> in declaration " "of %qE", name); else warn_defaults_to (loc, OPT_Wimplicit_int, "type defaults to %<int%> in type name"); } } /* Adjust the type if a bit-field is being declared, -funsigned-bitfields applied and the type is not explicitly "signed". */ if (bitfield && !flag_signed_bitfields && !declspecs->explicit_signed_p && TREE_CODE (type) == INTEGER_TYPE) type = unsigned_type_for (type); /* Figure out the type qualifiers for the declaration. There are two ways a declaration can become qualified. One is something like `const int i' where the `const' is explicit. Another is something like `typedef const int CI; CI i' where the type of the declaration contains the `const'. A third possibility is that there is a type qualifier on the element type of a typedefed array type, in which case we should extract that qualifier so that c_apply_type_quals_to_decl receives the full list of qualifiers to work with (C90 is not entirely clear about whether duplicate qualifiers should be diagnosed in this case, but it seems most appropriate to do so). */ element_type = strip_array_types (type); constp = declspecs->const_p + TYPE_READONLY (element_type); restrictp = declspecs->restrict_p + TYPE_RESTRICT (element_type); volatilep = declspecs->volatile_p + TYPE_VOLATILE (element_type); atomicp = declspecs->atomic_p + TYPE_ATOMIC (element_type); as1 = declspecs->address_space; as2 = TYPE_ADDR_SPACE (element_type); address_space = ADDR_SPACE_GENERIC_P (as1)? as2 : as1; if (constp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<const%>"); if (restrictp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<restrict%>"); if (volatilep > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<volatile%>"); if (atomicp > 1) pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %<_Atomic%>"); if (!ADDR_SPACE_GENERIC_P (as1) && !ADDR_SPACE_GENERIC_P (as2) && as1 != as2) error_at (loc, "conflicting named address spaces (%s vs %s)", c_addr_space_name (as1), c_addr_space_name (as2)); if ((TREE_CODE (type) == ARRAY_TYPE || first_non_attr_kind == cdk_array) && TYPE_QUALS (element_type)) { orig_qual_type = type; type = TYPE_MAIN_VARIANT (type); } type_quals = ((constp ? TYPE_QUAL_CONST : 0) | (restrictp ? TYPE_QUAL_RESTRICT : 0) | (volatilep ? TYPE_QUAL_VOLATILE : 0) | (atomicp ? TYPE_QUAL_ATOMIC : 0) | ENCODE_QUAL_ADDR_SPACE (address_space)); if (type_quals != TYPE_QUALS (element_type)) orig_qual_type = NULL_TREE; /* Applying the _Atomic qualifier to an array type (through the use of typedefs or typeof) must be detected here. If the qualifier is introduced later, any appearance of applying it to an array is actually applying it to an element of that array. */ if (declspecs->atomic_p && TREE_CODE (type) == ARRAY_TYPE) error_at (loc, "%<_Atomic%>-qualified array type"); /* Warn about storage classes that are invalid for certain kinds of declarations (parameters, typenames, etc.). */ if (funcdef_flag && (threadp || storage_class == csc_auto || storage_class == csc_register || storage_class == csc_typedef)) { if (storage_class == csc_auto) pedwarn (loc, (current_scope == file_scope) ? 0 : OPT_Wpedantic, "function definition declared %<auto%>"); if (storage_class == csc_register) error_at (loc, "function definition declared %<register%>"); if (storage_class == csc_typedef) error_at (loc, "function definition declared %<typedef%>"); if (threadp) error_at (loc, "function definition declared %qs", declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); threadp = false; if (storage_class == csc_auto || storage_class == csc_register || storage_class == csc_typedef) storage_class = csc_none; } else if (decl_context != NORMAL && (storage_class != csc_none || threadp)) { if (decl_context == PARM && storage_class == csc_register) ; else { switch (decl_context) { case FIELD: if (name) error_at (loc, "storage class specified for structure " "field %qE", name); else error_at (loc, "storage class specified for structure field"); break; case PARM: if (name) error_at (loc, "storage class specified for parameter %qE", name); else error_at (loc, "storage class specified for unnamed parameter"); break; default: error_at (loc, "storage class specified for typename"); break; } storage_class = csc_none; threadp = false; } } else if (storage_class == csc_extern && initialized && !funcdef_flag) { /* 'extern' with initialization is invalid if not at file scope. */ if (current_scope == file_scope) { /* It is fine to have 'extern const' when compiling at C and C++ intersection. */ if (!(warn_cxx_compat && constp)) warning_at (loc, 0, "%qE initialized and declared %<extern%>", name); } else error_at (loc, "%qE has both %<extern%> and initializer", name); } else if (current_scope == file_scope) { if (storage_class == csc_auto) error_at (loc, "file-scope declaration of %qE specifies %<auto%>", name); if (pedantic && storage_class == csc_register) pedwarn (input_location, OPT_Wpedantic, "file-scope declaration of %qE specifies %<register%>", name); } else { if (storage_class == csc_extern && funcdef_flag) error_at (loc, "nested function %qE declared %<extern%>", name); else if (threadp && storage_class == csc_none) { error_at (loc, "function-scope %qE implicitly auto and declared " "%qs", name, declspecs->thread_gnu_p ? "__thread" : "_Thread_local"); threadp = false; } } /* Now figure out the structure of the declarator proper. Descend through it, creating more complex types, until we reach the declared identifier (or NULL_TREE, in an absolute declarator). At each stage we maintain an unqualified version of the type together with any qualifiers that should be applied to it with c_build_qualified_type; this way, array types including multidimensional array types are first built up in unqualified form and then the qualified form is created with TYPE_MAIN_VARIANT pointing to the unqualified form. */ while (declarator && declarator->kind != cdk_id) { if (type == error_mark_node) { declarator = declarator->declarator; continue; } /* Each level of DECLARATOR is either a cdk_array (for ...[..]), a cdk_pointer (for *...), a cdk_function (for ...(...)), a cdk_attrs (for nested attributes), or a cdk_id (for the name being declared or the place in an absolute declarator where the name was omitted). For the last case, we have just exited the loop. At this point, TYPE is the type of elements of an array, or for a function to return, or for a pointer to point to. After this sequence of ifs, TYPE is the type of the array or function or pointer, and DECLARATOR has had its outermost layer removed. */ if (array_ptr_quals != TYPE_UNQUALIFIED || array_ptr_attrs != NULL_TREE || array_parm_static) { /* Only the innermost declarator (making a parameter be of array type which is converted to pointer type) may have static or type qualifiers. */ error_at (loc, "static or type qualifiers in non-parameter array declarator"); array_ptr_quals = TYPE_UNQUALIFIED; array_ptr_attrs = NULL_TREE; array_parm_static = false; } switch (declarator->kind) { case cdk_attrs: { /* A declarator with embedded attributes. */ tree attrs = declarator->u.attrs; const struct c_declarator *inner_decl; int attr_flags = 0; declarator = declarator->declarator; /* Standard attribute syntax precisely defines what entity an attribute in each position appertains to, so only apply laxity about positioning to GNU attribute syntax. Standard attributes applied to a function or array declarator apply exactly to that type; standard attributes applied to the identifier apply to the declaration rather than to the type, and are specified using a cdk_id declarator rather than using cdk_attrs. */ inner_decl = declarator; while (inner_decl->kind == cdk_attrs) inner_decl = inner_decl->declarator; if (!cxx11_attribute_p (attrs)) { if (inner_decl->kind == cdk_id) attr_flags |= (int) ATTR_FLAG_DECL_NEXT; else if (inner_decl->kind == cdk_function) attr_flags |= (int) ATTR_FLAG_FUNCTION_NEXT; else if (inner_decl->kind == cdk_array) attr_flags |= (int) ATTR_FLAG_ARRAY_NEXT; } attrs = c_warn_type_attributes (attrs); returned_attrs = decl_attributes (&type, chainon (returned_attrs, attrs), attr_flags); break; } case cdk_array: { tree itype = NULL_TREE; tree size = declarator->u.array.dimen; /* The index is a signed object `sizetype' bits wide. */ tree index_type = c_common_signed_type (sizetype); array_ptr_quals = declarator->u.array.quals; array_ptr_attrs = declarator->u.array.attrs; array_parm_static = declarator->u.array.static_p; array_parm_vla_unspec_p = declarator->u.array.vla_unspec_p; declarator = declarator->declarator; /* Check for some types that there cannot be arrays of. */ if (VOID_TYPE_P (type)) { if (name) error_at (loc, "declaration of %qE as array of voids", name); else error_at (loc, "declaration of type name as array of voids"); type = error_mark_node; } if (TREE_CODE (type) == FUNCTION_TYPE) { if (name) error_at (loc, "declaration of %qE as array of functions", name); else error_at (loc, "declaration of type name as array of " "functions"); type = error_mark_node; } if (pedantic && !in_system_header_at (input_location) && flexible_array_type_p (type)) pedwarn (loc, OPT_Wpedantic, "invalid use of structure with flexible array member"); if (size == error_mark_node) type = error_mark_node; if (type == error_mark_node) continue; if (!verify_type_context (loc, TCTX_ARRAY_ELEMENT, type)) { type = error_mark_node; continue; } /* If size was specified, set ITYPE to a range-type for that size. Otherwise, ITYPE remains null. finish_decl may figure it out from an initial value. */ if (size) { bool size_maybe_const = true; bool size_int_const = (TREE_CODE (size) == INTEGER_CST && !TREE_OVERFLOW (size)); bool this_size_varies = false; /* Strip NON_LVALUE_EXPRs since we aren't using as an lvalue. */ STRIP_TYPE_NOPS (size); if (!INTEGRAL_TYPE_P (TREE_TYPE (size))) { if (name) error_at (loc, "size of array %qE has non-integer type", name); else error_at (loc, "size of unnamed array has non-integer type"); size = integer_one_node; size_int_const = true; } /* This can happen with enum forward declaration. */ else if (!COMPLETE_TYPE_P (TREE_TYPE (size))) { if (name) error_at (loc, "size of array %qE has incomplete type", name); else error_at (loc, "size of unnamed array has incomplete " "type"); size = integer_one_node; size_int_const = true; } size = c_fully_fold (size, false, &size_maybe_const); if (pedantic && size_maybe_const && integer_zerop (size)) { if (name) pedwarn (loc, OPT_Wpedantic, "ISO C forbids zero-size array %qE", name); else pedwarn (loc, OPT_Wpedantic, "ISO C forbids zero-size array"); } if (TREE_CODE (size) == INTEGER_CST && size_maybe_const) { constant_expression_warning (size); if (tree_int_cst_sgn (size) < 0) { if (name) error_at (loc, "size of array %qE is negative", name); else error_at (loc, "size of unnamed array is negative"); size = integer_one_node; size_int_const = true; } /* Handle a size folded to an integer constant but not an integer constant expression. */ if (!size_int_const) { /* If this is a file scope declaration of an ordinary identifier, this is invalid code; diagnosing it here and not subsequently treating the type as variable-length avoids more confusing diagnostics later. */ if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope) pedwarn (input_location, 0, "variably modified %qE at file scope", name); else this_size_varies = size_varies = true; warn_variable_length_array (name, size); } } else if ((decl_context == NORMAL || decl_context == FIELD) && current_scope == file_scope) { error_at (loc, "variably modified %qE at file scope", name); size = integer_one_node; } else { /* Make sure the array size remains visibly nonconstant even if it is (eg) a const variable with known value. */ this_size_varies = size_varies = true; warn_variable_length_array (name, size); if (sanitize_flags_p (SANITIZE_VLA) && current_function_decl != NULL_TREE && decl_context == NORMAL) { /* Evaluate the array size only once. */ size = save_expr (size); size = c_fully_fold (size, false, NULL); size = fold_build2 (COMPOUND_EXPR, TREE_TYPE (size), ubsan_instrument_vla (loc, size), size); } } if (integer_zerop (size) && !this_size_varies) { /* A zero-length array cannot be represented with an unsigned index type, which is what we'll get with build_index_type. Create an open-ended range instead. */ itype = build_range_type (sizetype, size, NULL_TREE); } else { /* Arrange for the SAVE_EXPR on the inside of the MINUS_EXPR, which allows the -1 to get folded with the +1 that happens when building TYPE_SIZE. */ if (size_varies) size = save_expr (size); if (this_size_varies && TREE_CODE (size) == INTEGER_CST) size = build2 (COMPOUND_EXPR, TREE_TYPE (size), integer_zero_node, size); /* Compute the maximum valid index, that is, size - 1. Do the calculation in index_type, so that if it is a variable the computations will be done in the proper mode. */ itype = fold_build2_loc (loc, MINUS_EXPR, index_type, convert (index_type, size), convert (index_type, size_one_node)); /* The above overflows when size does not fit in index_type. ??? While a size of INT_MAX+1 technically shouldn't cause an overflow (because we subtract 1), handling this case seems like an unnecessary complication. */ if (TREE_CODE (size) == INTEGER_CST && !int_fits_type_p (size, index_type)) { if (name) error_at (loc, "size of array %qE is too large", name); else error_at (loc, "size of unnamed array is too large"); type = error_mark_node; continue; } itype = build_index_type (itype); } if (this_size_varies) { if (TREE_SIDE_EFFECTS (size)) { if (*expr) *expr = build2 (COMPOUND_EXPR, TREE_TYPE (size), *expr, size); else *expr = size; } *expr_const_operands &= size_maybe_const; } } else if (decl_context == FIELD) { bool flexible_array_member = false; if (array_parm_vla_unspec_p) /* Field names can in fact have function prototype scope so [*] is disallowed here through making the field variably modified, not through being something other than a declaration with function prototype scope. */ size_varies = true; else { const struct c_declarator *t = declarator; while (t->kind == cdk_attrs) t = t->declarator; flexible_array_member = (t->kind == cdk_id); } if (flexible_array_member && !in_system_header_at (input_location)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not " "support flexible array members"); /* ISO C99 Flexible array members are effectively identical to GCC's zero-length array extension. */ if (flexible_array_member || array_parm_vla_unspec_p) itype = build_range_type (sizetype, size_zero_node, NULL_TREE); } else if (decl_context == PARM) { if (array_parm_vla_unspec_p) { itype = build_range_type (sizetype, size_zero_node, NULL_TREE); size_varies = true; } } else if (decl_context == TYPENAME) { if (array_parm_vla_unspec_p) { /* C99 6.7.5.2p4 */ warning (0, "%<[*]%> not in a declaration"); /* We use this to avoid messing up with incomplete array types of the same type, that would otherwise be modified below. */ itype = build_range_type (sizetype, size_zero_node, NULL_TREE); size_varies = true; } } /* Complain about arrays of incomplete types. */ if (!COMPLETE_TYPE_P (type)) { error_at (loc, "array type has incomplete element type %qT", type); /* See if we can be more helpful. */ if (TREE_CODE (type) == ARRAY_TYPE) { if (name) inform (loc, "declaration of %qE as multidimensional " "array must have bounds for all dimensions " "except the first", name); else inform (loc, "declaration of multidimensional array " "must have bounds for all dimensions except " "the first"); } type = error_mark_node; } else /* When itype is NULL, a shared incomplete array type is returned for all array of a given type. Elsewhere we make sure we don't complete that type before copying it, but here we want to make sure we don't ever modify the shared type, so we gcc_assert (itype) below. */ { addr_space_t as = DECODE_QUAL_ADDR_SPACE (type_quals); if (!ADDR_SPACE_GENERIC_P (as) && as != TYPE_ADDR_SPACE (type)) type = build_qualified_type (type, ENCODE_QUAL_ADDR_SPACE (as)); type = build_array_type (type, itype); } if (type != error_mark_node) { if (size_varies) { /* It is ok to modify type here even if itype is NULL: if size_varies, we're in a multi-dimensional array and the inner type has variable size, so the enclosing shared array type must too. */ if (size && TREE_CODE (size) == INTEGER_CST) type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); C_TYPE_VARIABLE_SIZE (type) = 1; } /* The GCC extension for zero-length arrays differs from ISO flexible array members in that sizeof yields zero. */ if (size && integer_zerop (size)) { gcc_assert (itype); type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_SIZE (type) = bitsize_zero_node; TYPE_SIZE_UNIT (type) = size_zero_node; SET_TYPE_STRUCTURAL_EQUALITY (type); } if (array_parm_vla_unspec_p) { gcc_assert (itype); /* The type is complete. C99 6.7.5.2p4 */ type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_SIZE (type) = bitsize_zero_node; TYPE_SIZE_UNIT (type) = size_zero_node; SET_TYPE_STRUCTURAL_EQUALITY (type); } if (!valid_array_size_p (loc, type, name)) type = error_mark_node; } if (decl_context != PARM && (array_ptr_quals != TYPE_UNQUALIFIED || array_ptr_attrs != NULL_TREE || array_parm_static)) { error_at (loc, "static or type qualifiers in non-parameter " "array declarator"); array_ptr_quals = TYPE_UNQUALIFIED; array_ptr_attrs = NULL_TREE; array_parm_static = false; } orig_qual_indirect++; break; } case cdk_function: { /* Say it's a definition only for the declarator closest to the identifier, apart possibly from some attributes. */ bool really_funcdef = false; tree arg_types; orig_qual_type = NULL_TREE; if (funcdef_flag) { const struct c_declarator *t = declarator->declarator; while (t->kind == cdk_attrs) t = t->declarator; really_funcdef = (t->kind == cdk_id); } /* Declaring a function type. Make sure we have a valid type for the function to return. */ if (type == error_mark_node) continue; size_varies = false; /* Warn about some types functions can't return. */ if (TREE_CODE (type) == FUNCTION_TYPE) { if (name) error_at (loc, "%qE declared as function returning a " "function", name); else error_at (loc, "type name declared as function " "returning a function"); type = integer_type_node; } if (TREE_CODE (type) == ARRAY_TYPE) { if (name) error_at (loc, "%qE declared as function returning an array", name); else error_at (loc, "type name declared as function returning " "an array"); type = integer_type_node; } /* Construct the function type and go to the next inner layer of declarator. */ arg_info = declarator->u.arg_info; arg_types = grokparms (arg_info, really_funcdef); /* Type qualifiers before the return type of the function qualify the return type, not the function type. */ if (type_quals) { const enum c_declspec_word ignored_quals_list[] = { cdw_const, cdw_volatile, cdw_restrict, cdw_address_space, cdw_atomic, cdw_number_of_elements }; location_t specs_loc = smallest_type_quals_location (declspecs->locations, ignored_quals_list); if (specs_loc == UNKNOWN_LOCATION) specs_loc = declspecs->locations[cdw_typedef]; if (specs_loc == UNKNOWN_LOCATION) specs_loc = loc; /* Type qualifiers on a function return type are normally permitted by the standard but have no effect, so give a warning at -Wreturn-type. Qualifiers on a void return type are banned on function definitions in ISO C; GCC used to used them for noreturn functions. The resolution of C11 DR#423 means qualifiers (other than _Atomic) are actually removed from the return type when determining the function type. */ int quals_used = type_quals; if (flag_isoc11) quals_used &= TYPE_QUAL_ATOMIC; if (quals_used && VOID_TYPE_P (type) && really_funcdef) pedwarn (specs_loc, 0, "function definition has qualified void " "return type"); else warning_at (specs_loc, OPT_Wignored_qualifiers, "type qualifiers ignored on function " "return type"); /* Ensure an error for restrict on invalid types; the DR#423 resolution is not entirely clear about this. */ if (flag_isoc11 && (type_quals & TYPE_QUAL_RESTRICT) && (!POINTER_TYPE_P (type) || !C_TYPE_OBJECT_OR_INCOMPLETE_P (TREE_TYPE (type)))) error_at (loc, "invalid use of %<restrict%>"); type = c_build_qualified_type (type, quals_used); } type_quals = TYPE_UNQUALIFIED; type = build_function_type (type, arg_types); declarator = declarator->declarator; /* Set the TYPE_CONTEXTs for each tagged type which is local to the formal parameter list of this FUNCTION_TYPE to point to the FUNCTION_TYPE node itself. */ { c_arg_tag *tag; unsigned ix; FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag) TYPE_CONTEXT (tag->type) = type; } break; } case cdk_pointer: { /* Merge any constancy or volatility into the target type for the pointer. */ if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); orig_qual_type = NULL_TREE; size_varies = false; /* When the pointed-to type involves components of variable size, care must be taken to ensure that the size evaluation code is emitted early enough to dominate all the possible later uses and late enough for the variables on which it depends to have been assigned. This is expected to happen automatically when the pointed-to type has a name/declaration of it's own, but special attention is required if the type is anonymous. We attach an artificial TYPE_DECL to such pointed-to type and arrange for it to be included in a DECL_EXPR. This forces the sizes evaluation at a safe point and ensures it is not deferred until e.g. within a deeper conditional context. PARM contexts have no enclosing statement list that can hold the DECL_EXPR, so we need to use a BIND_EXPR instead, and add it to the list of expressions that need to be evaluated. TYPENAME contexts do have an enclosing statement list, but it would be incorrect to use it, as the size should only be evaluated if the containing expression is evaluated. We might also be in the middle of an expression with side effects on the pointed-to type size "arguments" prior to the pointer declaration point and the fake TYPE_DECL in the enclosing context would force the size evaluation prior to the side effects. We therefore use BIND_EXPRs in TYPENAME contexts too. */ if (!TYPE_NAME (type) && variably_modified_type_p (type, NULL_TREE)) { tree bind = NULL_TREE; if (decl_context == TYPENAME || decl_context == PARM) { bind = build3 (BIND_EXPR, void_type_node, NULL_TREE, NULL_TREE, NULL_TREE); TREE_SIDE_EFFECTS (bind) = 1; BIND_EXPR_BODY (bind) = push_stmt_list (); push_scope (); } tree decl = build_decl (loc, TYPE_DECL, NULL_TREE, type); DECL_ARTIFICIAL (decl) = 1; pushdecl (decl); finish_decl (decl, loc, NULL_TREE, NULL_TREE, NULL_TREE); TYPE_NAME (type) = decl; if (bind) { pop_scope (); BIND_EXPR_BODY (bind) = pop_stmt_list (BIND_EXPR_BODY (bind)); if (*expr) *expr = build2 (COMPOUND_EXPR, void_type_node, *expr, bind); else *expr = bind; } } type = c_build_pointer_type (type); /* Process type qualifiers (such as const or volatile) that were given inside the `*'. */ type_quals = declarator->u.pointer_quals; declarator = declarator->declarator; break; } default: gcc_unreachable (); } } *decl_attrs = chainon (returned_attrs, *decl_attrs); *decl_attrs = chainon (decl_id_attrs, *decl_attrs); /* Now TYPE has the actual type, apart from any qualifiers in TYPE_QUALS. */ /* Warn about address space used for things other than static memory or pointers. */ address_space = DECODE_QUAL_ADDR_SPACE (type_quals); if (!ADDR_SPACE_GENERIC_P (address_space)) { if (decl_context == NORMAL) { switch (storage_class) { case csc_auto: error ("%qs combined with %<auto%> qualifier for %qE", c_addr_space_name (address_space), name); break; case csc_register: error ("%qs combined with %<register%> qualifier for %qE", c_addr_space_name (address_space), name); break; case csc_none: if (current_function_scope) { error ("%qs specified for auto variable %qE", c_addr_space_name (address_space), name); break; } break; case csc_static: case csc_extern: case csc_typedef: break; default: gcc_unreachable (); } } else if (decl_context == PARM && TREE_CODE (type) != ARRAY_TYPE) { if (name) error ("%qs specified for parameter %qE", c_addr_space_name (address_space), name); else error ("%qs specified for unnamed parameter", c_addr_space_name (address_space)); } else if (decl_context == FIELD) { if (name) error ("%qs specified for structure field %qE", c_addr_space_name (address_space), name); else error ("%qs specified for structure field", c_addr_space_name (address_space)); } } /* Check the type and width of a bit-field. */ if (bitfield) { check_bitfield_type_and_width (loc, &type, width, name); /* C11 makes it implementation-defined (6.7.2.1#5) whether atomic types are permitted for bit-fields; we have no code to make bit-field accesses atomic, so disallow them. */ if (type_quals & TYPE_QUAL_ATOMIC) { if (name) error_at (loc, "bit-field %qE has atomic type", name); else error_at (loc, "bit-field has atomic type"); type_quals &= ~TYPE_QUAL_ATOMIC; } } /* Reject invalid uses of _Alignas. */ if (declspecs->alignas_p) { if (storage_class == csc_typedef) error_at (loc, "alignment specified for typedef %qE", name); else if (storage_class == csc_register) error_at (loc, "alignment specified for %<register%> object %qE", name); else if (decl_context == PARM) { if (name) error_at (loc, "alignment specified for parameter %qE", name); else error_at (loc, "alignment specified for unnamed parameter"); } else if (bitfield) { if (name) error_at (loc, "alignment specified for bit-field %qE", name); else error_at (loc, "alignment specified for unnamed bit-field"); } else if (TREE_CODE (type) == FUNCTION_TYPE) error_at (loc, "alignment specified for function %qE", name); else if (declspecs->align_log != -1 && TYPE_P (type)) { alignas_align = 1U << declspecs->align_log; if (alignas_align < min_align_of_type (type)) { if (name) error_at (loc, "%<_Alignas%> specifiers cannot reduce " "alignment of %qE", name); else error_at (loc, "%<_Alignas%> specifiers cannot reduce " "alignment of unnamed field"); alignas_align = 0; } } } /* If this is declaring a typedef name, return a TYPE_DECL. */ if (storage_class == csc_typedef) { tree decl; if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); decl = build_decl (declarator->id_loc, TYPE_DECL, declarator->u.id.id, type); if (declspecs->explicit_signed_p) C_TYPEDEF_EXPLICITLY_SIGNED (decl) = 1; if (declspecs->inline_p) pedwarn (loc, 0,"typedef %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0,"typedef %q+D declared %<_Noreturn%>", decl); if (warn_cxx_compat && declarator->u.id.id != NULL_TREE) { struct c_binding *b = I_TAG_BINDING (declarator->u.id.id); if (b != NULL && b->decl != NULL_TREE && (B_IN_CURRENT_SCOPE (b) || (current_scope == file_scope && B_IN_EXTERNAL_SCOPE (b))) && TYPE_MAIN_VARIANT (b->decl) != TYPE_MAIN_VARIANT (type)) { auto_diagnostic_group d; if (warning_at (declarator->id_loc, OPT_Wc___compat, ("using %qD as both a typedef and a tag is " "invalid in C++"), decl) && b->locus != UNKNOWN_LOCATION) inform (b->locus, "originally defined here"); } } return decl; } /* If this is a type name (such as, in a cast or sizeof), compute the type and return it now. */ if (decl_context == TYPENAME) { /* Note that the grammar rejects storage classes in typenames and fields. */ gcc_assert (storage_class == csc_none && !threadp && !declspecs->inline_p && !declspecs->noreturn_p); if ((type_quals & TYPE_QUAL_ATOMIC) && TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && TREE_CODE (type) == FUNCTION_TYPE && type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids const or volatile function types"); if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); return type; } if (pedantic && decl_context == FIELD && variably_modified_type_p (type, NULL_TREE)) { /* C99 6.7.2.1p8 */ pedwarn (loc, OPT_Wpedantic, "a member of a structure or union cannot " "have a variably modified type"); } /* Aside from typedefs and type names (handle above), `void' at top level (not within pointer) is allowed only in public variables. We don't complain about parms either, but that is because a better error message can be made later. */ if (VOID_TYPE_P (type) && decl_context != PARM && !((decl_context != FIELD && TREE_CODE (type) != FUNCTION_TYPE) && (storage_class == csc_extern || (current_scope == file_scope && !(storage_class == csc_static || storage_class == csc_register))))) { error_at (loc, "variable or field %qE declared void", name); type = integer_type_node; } /* Now create the decl, which may be a VAR_DECL, a PARM_DECL or a FUNCTION_DECL, depending on DECL_CONTEXT and TYPE. */ { tree decl; if (decl_context == PARM) { tree promoted_type; bool array_parameter_p = false; /* A parameter declared as an array of T is really a pointer to T. One declared as a function is really a pointer to a function. */ if (TREE_CODE (type) == ARRAY_TYPE) { /* Transfer const-ness of array into that of type pointed to. */ type = TREE_TYPE (type); if (orig_qual_type != NULL_TREE) { if (orig_qual_indirect == 0) orig_qual_type = TREE_TYPE (orig_qual_type); else orig_qual_indirect--; } if (type_quals) type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); type = c_build_pointer_type (type); type_quals = array_ptr_quals; if (type_quals) type = c_build_qualified_type (type, type_quals); /* We don't yet implement attributes in this context. */ if (array_ptr_attrs != NULL_TREE) warning_at (loc, OPT_Wattributes, "attributes in parameter array declarator ignored"); size_varies = false; array_parameter_p = true; } else if (TREE_CODE (type) == FUNCTION_TYPE) { if (type_quals & TYPE_QUAL_ATOMIC) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (type_quals) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); if (type_quals) type = c_build_qualified_type (type, type_quals); type = c_build_pointer_type (type); type_quals = TYPE_UNQUALIFIED; } else if (type_quals) type = c_build_qualified_type (type, type_quals); decl = build_decl (declarator->id_loc, PARM_DECL, declarator->u.id.id, type); if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; C_ARRAY_PARAMETER (decl) = array_parameter_p; /* Compute the type actually passed in the parmlist, for the case where there is no prototype. (For example, shorts and chars are passed as ints.) When there is a prototype, this is overridden later. */ if (type == error_mark_node) promoted_type = type; else promoted_type = c_type_promotes_to (type); DECL_ARG_TYPE (decl) = promoted_type; if (declspecs->inline_p) pedwarn (loc, 0, "parameter %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0, "parameter %q+D declared %<_Noreturn%>", decl); } else if (decl_context == FIELD) { /* Note that the grammar rejects storage classes in typenames and fields. */ gcc_assert (storage_class == csc_none && !threadp && !declspecs->inline_p && !declspecs->noreturn_p); /* Structure field. It may not be a function. */ if (TREE_CODE (type) == FUNCTION_TYPE) { error_at (loc, "field %qE declared as a function", name); type = build_pointer_type (type); } else if (TREE_CODE (type) != ERROR_MARK && !COMPLETE_OR_UNBOUND_ARRAY_TYPE_P (type)) { if (name) error_at (loc, "field %qE has incomplete type", name); else error_at (loc, "unnamed field has incomplete type"); type = error_mark_node; } else if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE) { /* We have a flexible array member through a typedef. Set suitable range. Whether this is a correct position for a flexible array member will be determined elsewhere. */ if (!in_system_header_at (input_location)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not " "support flexible array members"); type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type)); TYPE_DOMAIN (type) = build_range_type (sizetype, size_zero_node, NULL_TREE); if (orig_qual_indirect == 0) orig_qual_type = NULL_TREE; } if (type != error_mark_node && !verify_type_context (loc, TCTX_FIELD, type)) type = error_mark_node; type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); decl = build_decl (declarator->id_loc, FIELD_DECL, declarator->u.id.id, type); DECL_NONADDRESSABLE_P (decl) = bitfield; if (bitfield && !declarator->u.id.id) { TREE_NO_WARNING (decl) = 1; DECL_PADDING_P (decl) = 1; } if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; } else if (TREE_CODE (type) == FUNCTION_TYPE) { if (storage_class == csc_register || threadp) { error_at (loc, "invalid storage class for function %qE", name); } else if (current_scope != file_scope) { /* Function declaration not at file scope. Storage classes other than `extern' are not allowed, C99 6.7.1p5, and `extern' makes no difference. However, GCC allows 'auto', perhaps with 'inline', to support nested functions. */ if (storage_class == csc_auto) pedwarn (loc, OPT_Wpedantic, "invalid storage class for function %qE", name); else if (storage_class == csc_static) { error_at (loc, "invalid storage class for function %qE", name); if (funcdef_flag) storage_class = declspecs->storage_class = csc_none; else return NULL_TREE; } } decl = build_decl (declarator->id_loc, FUNCTION_DECL, declarator->u.id.id, type); decl = build_decl_attribute_variant (decl, decl_attr); if (type_quals & TYPE_QUAL_ATOMIC) { error_at (loc, "%<_Atomic%>-qualified function type"); type_quals &= ~TYPE_QUAL_ATOMIC; } else if (pedantic && type_quals && !DECL_IN_SYSTEM_HEADER (decl)) pedwarn (loc, OPT_Wpedantic, "ISO C forbids qualified function types"); /* Every function declaration is an external reference (DECL_EXTERNAL) except for those which are not at file scope and are explicitly declared "auto". This is forbidden by standard C (C99 6.7.1p5) and is interpreted by GCC to signify a forward declaration of a nested function. */ if (storage_class == csc_auto && current_scope != file_scope) DECL_EXTERNAL (decl) = 0; /* In C99, a function which is declared 'inline' with 'extern' is not an external reference (which is confusing). It means that the later definition of the function must be output in this file, C99 6.7.4p6. In GNU C89, a function declared 'extern inline' is an external reference. */ else if (declspecs->inline_p && storage_class != csc_static) DECL_EXTERNAL (decl) = ((storage_class == csc_extern) == flag_gnu89_inline); else DECL_EXTERNAL (decl) = !initialized; /* Record absence of global scope for `static' or `auto'. */ TREE_PUBLIC (decl) = !(storage_class == csc_static || storage_class == csc_auto); /* For a function definition, record the argument information block where store_parm_decls will look for it. */ if (funcdef_flag) current_function_arg_info = arg_info; if (declspecs->default_int_p) C_FUNCTION_IMPLICIT_INT (decl) = 1; /* Record presence of `inline' and `_Noreturn', if it is reasonable. */ if (flag_hosted && MAIN_NAME_P (declarator->u.id.id)) { if (declspecs->inline_p) pedwarn (loc, 0, "cannot inline function %<main%>"); if (declspecs->noreturn_p) pedwarn (loc, 0, "%<main%> declared %<_Noreturn%>"); } else { if (declspecs->inline_p) /* Record that the function is declared `inline'. */ DECL_DECLARED_INLINE_P (decl) = 1; if (declspecs->noreturn_p) { if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 does not support %<_Noreturn%>"); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 does not support %<_Noreturn%>"); TREE_THIS_VOLATILE (decl) = 1; } } } else { /* It's a variable. */ /* An uninitialized decl with `extern' is a reference. */ int extern_ref = !initialized && storage_class == csc_extern; type = c_build_qualified_type (type, type_quals, orig_qual_type, orig_qual_indirect); /* C99 6.2.2p7: It is invalid (compile-time undefined behavior) to create an 'extern' declaration for a variable if there is a global declaration that is 'static' and the global declaration is not visible. (If the static declaration _is_ currently visible, the 'extern' declaration is taken to refer to that decl.) */ if (extern_ref && current_scope != file_scope) { tree global_decl = identifier_global_value (declarator->u.id.id); tree visible_decl = lookup_name (declarator->u.id.id); if (global_decl && global_decl != visible_decl && VAR_P (global_decl) && !TREE_PUBLIC (global_decl)) error_at (loc, "variable previously declared %<static%> " "redeclared %<extern%>"); } decl = build_decl (declarator->id_loc, VAR_DECL, declarator->u.id.id, type); if (size_varies) C_DECL_VARIABLE_SIZE (decl) = 1; if (declspecs->inline_p) pedwarn (loc, 0, "variable %q+D declared %<inline%>", decl); if (declspecs->noreturn_p) pedwarn (loc, 0, "variable %q+D declared %<_Noreturn%>", decl); /* At file scope, an initialized extern declaration may follow a static declaration. In that case, DECL_EXTERNAL will be reset later in start_decl. */ DECL_EXTERNAL (decl) = (storage_class == csc_extern); /* At file scope, the presence of a `static' or `register' storage class specifier, or the absence of all storage class specifiers makes this declaration a definition (perhaps tentative). Also, the absence of `static' makes it public. */ if (current_scope == file_scope) { TREE_PUBLIC (decl) = storage_class != csc_static; TREE_STATIC (decl) = !extern_ref; } /* Not at file scope, only `static' makes a static definition. */ else { TREE_STATIC (decl) = (storage_class == csc_static); TREE_PUBLIC (decl) = extern_ref; } if (threadp) set_decl_tls_model (decl, decl_default_tls_model (decl)); } if ((storage_class == csc_extern || (storage_class == csc_none && TREE_CODE (type) == FUNCTION_TYPE && !funcdef_flag)) && variably_modified_type_p (type, NULL_TREE)) { /* C99 6.7.5.2p2 */ if (TREE_CODE (type) == FUNCTION_TYPE) error_at (loc, "non-nested function with variably modified type"); else error_at (loc, "object with variably modified type must have " "no linkage"); } /* For nested functions disqualify ones taking VLAs by value from inlining since the middle-end cannot deal with this. ??? We should arrange for those to be passed by reference with emitting the copy on the caller side in the frontend. */ if (storage_class == csc_none && TREE_CODE (type) == FUNCTION_TYPE) for (tree al = TYPE_ARG_TYPES (type); al; al = TREE_CHAIN (al)) { tree arg = TREE_VALUE (al); if (arg != error_mark_node && C_TYPE_VARIABLE_SIZE (arg)) { DECL_UNINLINABLE (decl) = 1; break; } } /* Record `register' declaration for warnings on & and in case doing stupid register allocation. */ if (storage_class == csc_register) { C_DECL_REGISTER (decl) = 1; DECL_REGISTER (decl) = 1; } /* Record constancy and volatility. */ c_apply_type_quals_to_decl (type_quals, decl); /* Apply _Alignas specifiers. */ if (alignas_align) { SET_DECL_ALIGN (decl, alignas_align * BITS_PER_UNIT); DECL_USER_ALIGN (decl) = 1; } /* If a type has volatile components, it should be stored in memory. Otherwise, the fact that those components are volatile will be ignored, and would even crash the compiler. Of course, this only makes sense on VAR,PARM, and RESULT decl's. */ if (C_TYPE_FIELDS_VOLATILE (TREE_TYPE (decl)) && (VAR_P (decl) || TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == RESULT_DECL)) { /* It is not an error for a structure with volatile fields to be declared register, but reset DECL_REGISTER since it cannot actually go in a register. */ int was_reg = C_DECL_REGISTER (decl); C_DECL_REGISTER (decl) = 0; DECL_REGISTER (decl) = 0; c_mark_addressable (decl); C_DECL_REGISTER (decl) = was_reg; } /* This is the earliest point at which we might know the assembler name of a variable. Thus, if it's known before this, die horribly. */ gcc_assert (!HAS_DECL_ASSEMBLER_NAME_P (decl) || !DECL_ASSEMBLER_NAME_SET_P (decl)); if (warn_cxx_compat && VAR_P (decl) && TREE_PUBLIC (decl) && TREE_STATIC (decl) && (RECORD_OR_UNION_TYPE_P (TREE_TYPE (decl)) || TREE_CODE (TREE_TYPE (decl)) == ENUMERAL_TYPE) && TYPE_NAME (TREE_TYPE (decl)) == NULL_TREE) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wc___compat, ("non-local variable %qD with anonymous type is " "questionable in C++"), decl); return decl; } } /* Decode the parameter-list info for a function type or function definition. The argument is the value returned by `get_parm_info' (or made in c-parse.c if there is an identifier list instead of a parameter decl list). These two functions are separate because when a function returns or receives functions then each is called multiple times but the order of calls is different. The last call to `grokparms' is always the one that contains the formal parameter names of a function definition. Return a list of arg types to use in the FUNCTION_TYPE for this function. FUNCDEF_FLAG is true for a function definition, false for a mere declaration. A nonempty identifier-list gets an error message when FUNCDEF_FLAG is false. */ static tree grokparms (struct c_arg_info *arg_info, bool funcdef_flag) { tree arg_types = arg_info->types; if (funcdef_flag && arg_info->had_vla_unspec) { /* A function definition isn't function prototype scope C99 6.2.1p4. */ /* C99 6.7.5.2p4 */ error ("%<[*]%> not allowed in other than function prototype scope"); } if (arg_types == NULL_TREE && !funcdef_flag && !in_system_header_at (input_location)) warning (OPT_Wstrict_prototypes, "function declaration isn%'t a prototype"); if (arg_types == error_mark_node) /* Don't set TYPE_ARG_TYPES in this case. */ return NULL_TREE; else if (arg_types && TREE_CODE (TREE_VALUE (arg_types)) == IDENTIFIER_NODE) { if (!funcdef_flag) { pedwarn (input_location, 0, "parameter names (without types) in " "function declaration"); arg_info->parms = NULL_TREE; } else arg_info->parms = arg_info->types; arg_info->types = NULL_TREE; return NULL_TREE; } else { tree parm, type, typelt; unsigned int parmno; /* In C2X, convert () in a function definition to (void). */ if (flag_isoc2x && funcdef_flag && !arg_types && !arg_info->parms) arg_types = arg_info->types = void_list_node; /* If there is a parameter of incomplete type in a definition, this is an error. In a declaration this is valid, and a struct or union type may be completed later, before any calls or definition of the function. In the case where the tag was first declared within the parameter list, a warning has already been given. If a parameter has void type, then however the function cannot be defined or called, so warn. */ for (parm = arg_info->parms, typelt = arg_types, parmno = 1; parm; parm = DECL_CHAIN (parm), typelt = TREE_CHAIN (typelt), parmno++) { type = TREE_VALUE (typelt); if (type == error_mark_node) continue; if (!COMPLETE_TYPE_P (type)) { if (funcdef_flag) { if (DECL_NAME (parm)) error_at (input_location, "parameter %u (%q+D) has incomplete type", parmno, parm); else error_at (DECL_SOURCE_LOCATION (parm), "parameter %u has incomplete type", parmno); TREE_VALUE (typelt) = error_mark_node; TREE_TYPE (parm) = error_mark_node; arg_types = NULL_TREE; } else if (VOID_TYPE_P (type)) { if (DECL_NAME (parm)) warning_at (input_location, 0, "parameter %u (%q+D) has void type", parmno, parm); else warning_at (DECL_SOURCE_LOCATION (parm), 0, "parameter %u has void type", parmno); } } if (DECL_NAME (parm) && TREE_USED (parm)) warn_if_shadowing (parm); } return arg_types; } } /* Allocate and initialize a c_arg_info structure from the parser's obstack. */ struct c_arg_info * build_arg_info (void) { struct c_arg_info *ret = XOBNEW (&parser_obstack, struct c_arg_info); ret->parms = NULL_TREE; ret->tags = NULL; ret->types = NULL_TREE; ret->others = NULL_TREE; ret->pending_sizes = NULL; ret->had_vla_unspec = 0; return ret; } /* Take apart the current scope and return a c_arg_info structure with info on a parameter list just parsed. This structure is later fed to 'grokparms' and 'store_parm_decls'. ELLIPSIS being true means the argument list ended in '...' so don't append a sentinel (void_list_node) to the end of the type-list. EXPR is NULL or an expression that needs to be evaluated for the side effects of array size expressions in the parameters. */ struct c_arg_info * get_parm_info (bool ellipsis, tree expr) { struct c_binding *b = current_scope->bindings; struct c_arg_info *arg_info = build_arg_info (); tree parms = NULL_TREE; vec<c_arg_tag, va_gc> *tags = NULL; tree types = NULL_TREE; tree others = NULL_TREE; bool gave_void_only_once_err = false; arg_info->had_vla_unspec = current_scope->had_vla_unspec; /* The bindings in this scope must not get put into a block. We will take care of deleting the binding nodes. */ current_scope->bindings = 0; /* This function is only called if there was *something* on the parameter list. */ gcc_assert (b); /* A parameter list consisting solely of 'void' indicates that the function takes no arguments. But if the 'void' is qualified (by 'const' or 'volatile'), or has a storage class specifier ('register'), then the behavior is undefined; issue an error. Typedefs for 'void' are OK (see DR#157). */ if (b->prev == 0 /* one binding */ && TREE_CODE (b->decl) == PARM_DECL /* which is a parameter */ && !DECL_NAME (b->decl) /* anonymous */ && VOID_TYPE_P (TREE_TYPE (b->decl))) /* of void type */ { if (TYPE_QUALS (TREE_TYPE (b->decl)) != TYPE_UNQUALIFIED || C_DECL_REGISTER (b->decl)) error_at (b->locus, "%<void%> as only parameter may not be qualified"); /* There cannot be an ellipsis. */ if (ellipsis) error_at (b->locus, "%<void%> must be the only parameter"); arg_info->types = void_list_node; return arg_info; } if (!ellipsis) types = void_list_node; /* Break up the bindings list into parms, tags, types, and others; apply sanity checks; purge the name-to-decl bindings. */ while (b) { tree decl = b->decl; tree type = TREE_TYPE (decl); c_arg_tag tag; const char *keyword; switch (TREE_CODE (decl)) { case PARM_DECL: if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; } /* Check for forward decls that never got their actual decl. */ if (TREE_ASM_WRITTEN (decl)) error_at (b->locus, "parameter %q+D has just a forward declaration", decl); /* Check for (..., void, ...) and issue an error. */ else if (VOID_TYPE_P (type) && !DECL_NAME (decl)) { if (!gave_void_only_once_err) { error_at (b->locus, "%<void%> must be the only parameter"); gave_void_only_once_err = true; } } else { /* Valid parameter, add it to the list. */ DECL_CHAIN (decl) = parms; parms = decl; /* Since there is a prototype, args are passed in their declared types. The back end may override this later. */ DECL_ARG_TYPE (decl) = type; types = tree_cons (0, type, types); } break; case ENUMERAL_TYPE: keyword = "enum"; goto tag; case UNION_TYPE: keyword = "union"; goto tag; case RECORD_TYPE: keyword = "struct"; goto tag; tag: /* Types may not have tag-names, in which case the type appears in the bindings list with b->id NULL. */ if (b->id) { gcc_assert (I_TAG_BINDING (b->id) == b); I_TAG_BINDING (b->id) = b->shadowed; } /* Warn about any struct, union or enum tags defined in a parameter list. The scope of such types is limited to the parameter list, which is rarely if ever desirable (it's impossible to call such a function with type- correct arguments). An anonymous union parm type is meaningful as a GNU extension, so don't warn for that. */ if (TREE_CODE (decl) != UNION_TYPE || b->id != NULL_TREE) { if (b->id) /* The %s will be one of 'struct', 'union', or 'enum'. */ warning_at (b->locus, 0, "%<%s %E%> declared inside parameter list" " will not be visible outside of this definition or" " declaration", keyword, b->id); else /* The %s will be one of 'struct', 'union', or 'enum'. */ warning_at (b->locus, 0, "anonymous %s declared inside parameter list" " will not be visible outside of this definition or" " declaration", keyword); } tag.id = b->id; tag.type = decl; vec_safe_push (tags, tag); break; case FUNCTION_DECL: /* FUNCTION_DECLs appear when there is an implicit function declaration in the parameter list. */ gcc_assert (b->nested || seen_error ()); goto set_shadowed; case CONST_DECL: case TYPE_DECL: /* CONST_DECLs appear here when we have an embedded enum, and TYPE_DECLs appear here when we have an embedded struct or union. No warnings for this - we already warned about the type itself. */ /* When we reinsert this decl in the function body, we need to reconstruct whether it was marked as nested. */ gcc_assert (!b->nested); DECL_CHAIN (decl) = others; others = decl; /* fall through */ case ERROR_MARK: set_shadowed: /* error_mark_node appears here when we have an undeclared variable. Just throw it away. */ if (b->id) { gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; } break; /* Other things that might be encountered. */ case LABEL_DECL: case VAR_DECL: default: gcc_unreachable (); } b = free_binding_and_advance (b); } arg_info->parms = parms; arg_info->tags = tags; arg_info->types = types; arg_info->others = others; arg_info->pending_sizes = expr; return arg_info; } /* Get the struct, enum or union (CODE says which) with tag NAME. Define the tag as a forward-reference with location LOC if it is not defined. HAVE_STD_ATTRS says whether any standard attributes were present after the struct, union or enum keyword; ATTRS are the standard attributes present there. Return a c_typespec structure for the type specifier. */ struct c_typespec parser_xref_tag (location_t loc, enum tree_code code, tree name, bool have_std_attrs, tree attrs) { struct c_typespec ret; tree ref; location_t refloc; ret.expr = NULL_TREE; ret.expr_const_operands = true; /* If a cross reference is requested, look up the type already defined for this tag and return it. */ ref = lookup_tag (code, name, false, &refloc); /* If this is the right type of tag, return what we found. (This reference will be shadowed by shadow_tag later if appropriate.) If this is the wrong type of tag, do not return it. If it was the wrong type in the same scope, we will have had an error message already; if in a different scope and declaring a name, pending_xref_error will give an error message; but if in a different scope and not declaring a name, this tag should shadow the previous declaration of a different type of tag, and this would not work properly if we return the reference found. (For example, with "struct foo" in an outer scope, "union foo;" must shadow that tag with a new one of union type.) */ ret.kind = (ref ? (have_std_attrs ? ctsk_tagref_attrs : ctsk_tagref) : (have_std_attrs ? ctsk_tagfirstref_attrs : ctsk_tagfirstref)); if (ref && TREE_CODE (ref) == code) { decl_attributes (&ref, attrs, (int) ATTR_FLAG_TYPE_IN_PLACE); if (C_TYPE_DEFINED_IN_STRUCT (ref) && loc != UNKNOWN_LOCATION && warn_cxx_compat) { switch (code) { case ENUMERAL_TYPE: warning_at (loc, OPT_Wc___compat, ("enum type defined in struct or union " "is not visible in C++")); inform (refloc, "enum type defined here"); break; case RECORD_TYPE: warning_at (loc, OPT_Wc___compat, ("struct defined in struct or union " "is not visible in C++")); inform (refloc, "struct defined here"); break; case UNION_TYPE: warning_at (loc, OPT_Wc___compat, ("union defined in struct or union " "is not visible in C++")); inform (refloc, "union defined here"); break; default: gcc_unreachable(); } } ret.spec = ref; return ret; } /* If no such tag is yet defined, create a forward-reference node and record it as the "definition". When a real declaration of this type is found, the forward-reference will be altered into a real type. */ ref = make_node (code); if (code == ENUMERAL_TYPE) { /* Give the type a default layout like unsigned int to avoid crashing if it does not get defined. */ SET_TYPE_MODE (ref, TYPE_MODE (unsigned_type_node)); SET_TYPE_ALIGN (ref, TYPE_ALIGN (unsigned_type_node)); TYPE_USER_ALIGN (ref) = 0; TYPE_UNSIGNED (ref) = 1; TYPE_PRECISION (ref) = TYPE_PRECISION (unsigned_type_node); TYPE_MIN_VALUE (ref) = TYPE_MIN_VALUE (unsigned_type_node); TYPE_MAX_VALUE (ref) = TYPE_MAX_VALUE (unsigned_type_node); } pushtag (loc, name, ref); decl_attributes (&ref, attrs, (int) ATTR_FLAG_TYPE_IN_PLACE); ret.spec = ref; return ret; } /* Get the struct, enum or union (CODE says which) with tag NAME. Define the tag as a forward-reference if it is not defined. Return a tree for the type. */ tree xref_tag (enum tree_code code, tree name) { return parser_xref_tag (input_location, code, name, false, NULL_TREE).spec; } /* Make sure that the tag NAME is defined *in the current scope* at least as a forward reference. LOC is the location of the struct's definition. CODE says which kind of tag NAME ought to be. This stores the current value of the file static STRUCT_PARSE_INFO in *ENCLOSING_STRUCT_PARSE_INFO, and points STRUCT_PARSE_INFO at a new c_struct_parse_info structure. The old value of STRUCT_PARSE_INFO is restored in finish_struct. */ tree start_struct (location_t loc, enum tree_code code, tree name, class c_struct_parse_info **enclosing_struct_parse_info) { /* If there is already a tag defined at this scope (as a forward reference), just return it. */ tree ref = NULL_TREE; location_t refloc = UNKNOWN_LOCATION; if (name != NULL_TREE) ref = lookup_tag (code, name, true, &refloc); if (ref && TREE_CODE (ref) == code) { if (TYPE_STUB_DECL (ref)) refloc = DECL_SOURCE_LOCATION (TYPE_STUB_DECL (ref)); if (TYPE_SIZE (ref)) { if (code == UNION_TYPE) error_at (loc, "redefinition of %<union %E%>", name); else error_at (loc, "redefinition of %<struct %E%>", name); if (refloc != UNKNOWN_LOCATION) inform (refloc, "originally defined here"); /* Don't create structures using a name already in use. */ ref = NULL_TREE; } else if (C_TYPE_BEING_DEFINED (ref)) { if (code == UNION_TYPE) error_at (loc, "nested redefinition of %<union %E%>", name); else error_at (loc, "nested redefinition of %<struct %E%>", name); /* Don't bother to report "originally defined here" for a nested redefinition; the original definition should be obvious. */ /* Don't create structures that contain themselves. */ ref = NULL_TREE; } } /* Otherwise create a forward-reference just so the tag is in scope. */ if (ref == NULL_TREE || TREE_CODE (ref) != code) { ref = make_node (code); pushtag (loc, name, ref); } C_TYPE_BEING_DEFINED (ref) = 1; for (tree v = TYPE_MAIN_VARIANT (ref); v; v = TYPE_NEXT_VARIANT (v)) TYPE_PACKED (v) = flag_pack_struct; *enclosing_struct_parse_info = struct_parse_info; struct_parse_info = new c_struct_parse_info (); /* FIXME: This will issue a warning for a use of a type defined within a statement expr used within sizeof, et. al. This is not terribly serious as C++ doesn't permit statement exprs within sizeof anyhow. */ if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof)) warning_at (loc, OPT_Wc___compat, "defining type in %qs expression is invalid in C++", (in_sizeof ? "sizeof" : (in_typeof ? "typeof" : "alignof"))); return ref; } /* Process the specs, declarator and width (NULL if omitted) of a structure component, returning a FIELD_DECL node. WIDTH is non-NULL for bit-fields only, and is an INTEGER_CST node. DECL_ATTRS is as for grokdeclarator. LOC is the location of the structure component. This is done during the parsing of the struct declaration. The FIELD_DECL nodes are chained together and the lot of them are ultimately passed to `build_struct' to make the RECORD_TYPE node. */ tree grokfield (location_t loc, struct c_declarator *declarator, struct c_declspecs *declspecs, tree width, tree *decl_attrs) { tree value; if (declarator->kind == cdk_id && declarator->u.id.id == NULL_TREE && width == NULL_TREE) { /* This is an unnamed decl. If we have something of the form "union { list } ;" then this is the anonymous union extension. Similarly for struct. If this is something of the form "struct foo;", then If MS or Plan 9 extensions are enabled, this is handled as an anonymous struct. Otherwise this is a forward declaration of a structure tag. If this is something of the form "foo;" and foo is a TYPE_DECL, then If foo names a structure or union without a tag, then this is an anonymous struct (this is permitted by C11). If MS or Plan 9 extensions are enabled and foo names a structure, then again this is an anonymous struct. Otherwise this is an error. Oh what a horrid tangled web we weave. I wonder if MS consciously took this from Plan 9 or if it was an accident of implementation that took root before someone noticed the bug... */ tree type = declspecs->type; bool ok = false; if (RECORD_OR_UNION_TYPE_P (type) && (flag_ms_extensions || flag_plan9_extensions || !declspecs->typedef_p)) { if (flag_ms_extensions || flag_plan9_extensions) ok = true; else if (TYPE_NAME (type) == NULL) ok = true; else ok = false; } if (!ok) { pedwarn (loc, 0, "declaration does not declare anything"); return NULL_TREE; } if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 doesn%'t support unnamed structs/unions"); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 doesn%'t support unnamed structs/unions"); } value = grokdeclarator (declarator, declspecs, FIELD, false, width ? &width : NULL, decl_attrs, NULL, NULL, DEPRECATED_NORMAL); finish_decl (value, loc, NULL_TREE, NULL_TREE, NULL_TREE); DECL_INITIAL (value) = width; if (width) SET_DECL_C_BIT_FIELD (value); if (warn_cxx_compat && DECL_NAME (value) != NULL_TREE) { /* If we currently have a binding for this field, set the in_struct field in the binding, so that we warn about lookups which find it. */ struct c_binding *b = I_SYMBOL_BINDING (DECL_NAME (value)); if (b != NULL) { /* If the in_struct field is not yet set, push it on a list to be cleared when this struct is finished. */ if (!b->in_struct) { struct_parse_info->fields.safe_push (b); b->in_struct = 1; } } } return value; } /* Subroutine of detect_field_duplicates: return whether X and Y, which are both fields in the same struct, have duplicate field names. */ static bool is_duplicate_field (tree x, tree y) { if (DECL_NAME (x) != NULL_TREE && DECL_NAME (x) == DECL_NAME (y)) return true; /* When using -fplan9-extensions, an anonymous field whose name is a typedef can duplicate a field name. */ if (flag_plan9_extensions && (DECL_NAME (x) == NULL_TREE || DECL_NAME (y) == NULL_TREE)) { tree xt, xn, yt, yn; xt = TREE_TYPE (x); if (DECL_NAME (x) != NULL_TREE) xn = DECL_NAME (x); else if (RECORD_OR_UNION_TYPE_P (xt) && TYPE_NAME (xt) != NULL_TREE && TREE_CODE (TYPE_NAME (xt)) == TYPE_DECL) xn = DECL_NAME (TYPE_NAME (xt)); else xn = NULL_TREE; yt = TREE_TYPE (y); if (DECL_NAME (y) != NULL_TREE) yn = DECL_NAME (y); else if (RECORD_OR_UNION_TYPE_P (yt) && TYPE_NAME (yt) != NULL_TREE && TREE_CODE (TYPE_NAME (yt)) == TYPE_DECL) yn = DECL_NAME (TYPE_NAME (yt)); else yn = NULL_TREE; if (xn != NULL_TREE && xn == yn) return true; } return false; } /* Subroutine of detect_field_duplicates: add the fields of FIELDLIST to HTAB, giving errors for any duplicates. */ static void detect_field_duplicates_hash (tree fieldlist, hash_table<nofree_ptr_hash <tree_node> > *htab) { tree x, y; tree_node **slot; for (x = fieldlist; x ; x = DECL_CHAIN (x)) if ((y = DECL_NAME (x)) != NULL_TREE) { slot = htab->find_slot (y, INSERT); if (*slot) { error ("duplicate member %q+D", x); DECL_NAME (x) = NULL_TREE; } *slot = y; } else if (RECORD_OR_UNION_TYPE_P (TREE_TYPE (x))) { detect_field_duplicates_hash (TYPE_FIELDS (TREE_TYPE (x)), htab); /* When using -fplan9-extensions, an anonymous field whose name is a typedef can duplicate a field name. */ if (flag_plan9_extensions && TYPE_NAME (TREE_TYPE (x)) != NULL_TREE && TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL) { tree xn = DECL_NAME (TYPE_NAME (TREE_TYPE (x))); slot = htab->find_slot (xn, INSERT); if (*slot) error ("duplicate member %q+D", TYPE_NAME (TREE_TYPE (x))); *slot = xn; } } } /* Generate an error for any duplicate field names in FIELDLIST. Munge the list such that this does not present a problem later. */ static void detect_field_duplicates (tree fieldlist) { tree x, y; int timeout = 10; /* If the struct is the list of instance variables of an Objective-C class, then we need to check all the instance variables of superclasses when checking for duplicates (since you can't have an instance variable in a subclass with the same name as an instance variable in a superclass). We pass on this job to the Objective-C compiler. objc_detect_field_duplicates() will return false if we are not checking the list of instance variables and the C frontend should proceed with the standard field duplicate checks. If we are checking the list of instance variables, the ObjC frontend will do the check, emit the errors if needed, and then return true. */ if (c_dialect_objc ()) if (objc_detect_field_duplicates (false)) return; /* First, see if there are more than "a few" fields. This is trivially true if there are zero or one fields. */ if (!fieldlist || !DECL_CHAIN (fieldlist)) return; x = fieldlist; do { timeout--; if (DECL_NAME (x) == NULL_TREE && RECORD_OR_UNION_TYPE_P (TREE_TYPE (x))) timeout = 0; x = DECL_CHAIN (x); } while (timeout > 0 && x); /* If there were "few" fields and no anonymous structures or unions, avoid the overhead of allocating a hash table. Instead just do the nested traversal thing. */ if (timeout > 0) { for (x = DECL_CHAIN (fieldlist); x; x = DECL_CHAIN (x)) /* When using -fplan9-extensions, we can have duplicates between typedef names and fields. */ if (DECL_NAME (x) || (flag_plan9_extensions && DECL_NAME (x) == NULL_TREE && RECORD_OR_UNION_TYPE_P (TREE_TYPE (x)) && TYPE_NAME (TREE_TYPE (x)) != NULL_TREE && TREE_CODE (TYPE_NAME (TREE_TYPE (x))) == TYPE_DECL)) { for (y = fieldlist; y != x; y = TREE_CHAIN (y)) if (is_duplicate_field (y, x)) { error ("duplicate member %q+D", x); DECL_NAME (x) = NULL_TREE; } } } else { hash_table<nofree_ptr_hash <tree_node> > htab (37); detect_field_duplicates_hash (fieldlist, &htab); } } /* Finish up struct info used by -Wc++-compat. */ static void warn_cxx_compat_finish_struct (tree fieldlist, enum tree_code code, location_t record_loc) { unsigned int ix; tree x; struct c_binding *b; if (fieldlist == NULL_TREE) { if (code == RECORD_TYPE) warning_at (record_loc, OPT_Wc___compat, "empty struct has size 0 in C, size 1 in C++"); else warning_at (record_loc, OPT_Wc___compat, "empty union has size 0 in C, size 1 in C++"); } /* Set the C_TYPE_DEFINED_IN_STRUCT flag for each type defined in the current struct. We do this now at the end of the struct because the flag is used to issue visibility warnings, and we only want to issue those warnings if the type is referenced outside of the struct declaration. */ FOR_EACH_VEC_ELT (struct_parse_info->struct_types, ix, x) C_TYPE_DEFINED_IN_STRUCT (x) = 1; /* The TYPEDEFS_SEEN field of STRUCT_PARSE_INFO is a list of typedefs used when declaring fields in this struct. If the name of any of the fields is also a typedef name then the struct would not parse in C++, because the C++ lookup rules say that the typedef name would be looked up in the context of the struct, and would thus be the field rather than the typedef. */ if (!struct_parse_info->typedefs_seen.is_empty () && fieldlist != NULL_TREE) { /* Use a hash_set<tree> using the name of the typedef. We can use a hash_set<tree> because identifiers are interned. */ hash_set<tree> tset; FOR_EACH_VEC_ELT (struct_parse_info->typedefs_seen, ix, x) tset.add (DECL_NAME (x)); for (x = fieldlist; x != NULL_TREE; x = DECL_CHAIN (x)) { if (DECL_NAME (x) != NULL_TREE && tset.contains (DECL_NAME (x))) { warning_at (DECL_SOURCE_LOCATION (x), OPT_Wc___compat, ("using %qD as both field and typedef name is " "invalid in C++"), x); /* FIXME: It would be nice to report the location where the typedef name is used. */ } } } /* For each field which has a binding and which was not defined in an enclosing struct, clear the in_struct field. */ FOR_EACH_VEC_ELT (struct_parse_info->fields, ix, b) b->in_struct = 0; } /* Function to help qsort sort FIELD_DECLs by name order. */ static int field_decl_cmp (const void *x_p, const void *y_p) { const tree *const x = (const tree *) x_p; const tree *const y = (const tree *) y_p; if (DECL_NAME (*x) == DECL_NAME (*y)) /* A nontype is "greater" than a type. */ return (TREE_CODE (*y) == TYPE_DECL) - (TREE_CODE (*x) == TYPE_DECL); if (DECL_NAME (*x) == NULL_TREE) return -1; if (DECL_NAME (*y) == NULL_TREE) return 1; if (DECL_NAME (*x) < DECL_NAME (*y)) return -1; return 1; } /* If this structure or union completes the type of any previous variable declaration, lay it out and output its rtl. */ static void finish_incomplete_vars (tree incomplete_vars, bool toplevel) { for (tree x = incomplete_vars; x; x = TREE_CHAIN (x)) { tree decl = TREE_VALUE (x); if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE) layout_array_type (TREE_TYPE (decl)); if (TREE_CODE (decl) != TYPE_DECL) { relayout_decl (decl); if (c_dialect_objc ()) objc_check_decl (decl); rest_of_decl_compilation (decl, toplevel, 0); } } } /* Fill in the fields of a RECORD_TYPE or UNION_TYPE node, T. LOC is the location of the RECORD_TYPE or UNION_TYPE's definition. FIELDLIST is a chain of FIELD_DECL nodes for the fields. ATTRIBUTES are attributes to be applied to the structure. ENCLOSING_STRUCT_PARSE_INFO is the value of STRUCT_PARSE_INFO when the struct was started. */ tree finish_struct (location_t loc, tree t, tree fieldlist, tree attributes, class c_struct_parse_info *enclosing_struct_parse_info) { tree x; bool toplevel = file_scope == current_scope; /* If this type was previously laid out as a forward reference, make sure we lay it out again. */ TYPE_SIZE (t) = NULL_TREE; decl_attributes (&t, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); if (pedantic) { for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (DECL_NAME (x) != NULL_TREE) break; if (flag_isoc11 && RECORD_OR_UNION_TYPE_P (TREE_TYPE (x))) break; } if (x == NULL_TREE) { if (TREE_CODE (t) == UNION_TYPE) { if (fieldlist) pedwarn (loc, OPT_Wpedantic, "union has no named members"); else pedwarn (loc, OPT_Wpedantic, "union has no members"); } else { if (fieldlist) pedwarn (loc, OPT_Wpedantic, "struct has no named members"); else pedwarn (loc, OPT_Wpedantic, "struct has no members"); } } } /* Install struct as DECL_CONTEXT of each field decl. Also process specified field sizes, found in the DECL_INITIAL, storing 0 there after the type has been changed to precision equal to its width, rather than the precision of the specified standard type. (Correct layout requires the original type to have been preserved until now.) */ bool saw_named_field = false; for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (TREE_TYPE (x) == error_mark_node) continue; DECL_CONTEXT (x) = t; /* If any field is const, the structure type is pseudo-const. */ if (TREE_READONLY (x)) C_TYPE_FIELDS_READONLY (t) = 1; else { /* A field that is pseudo-const makes the structure likewise. */ tree t1 = strip_array_types (TREE_TYPE (x)); if (RECORD_OR_UNION_TYPE_P (t1) && C_TYPE_FIELDS_READONLY (t1)) C_TYPE_FIELDS_READONLY (t) = 1; } /* Any field that is volatile means variables of this type must be treated in some ways as volatile. */ if (TREE_THIS_VOLATILE (x)) C_TYPE_FIELDS_VOLATILE (t) = 1; /* Any field of nominal variable size implies structure is too. */ if (C_DECL_VARIABLE_SIZE (x)) C_TYPE_VARIABLE_SIZE (t) = 1; if (DECL_C_BIT_FIELD (x)) { unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (x)); DECL_SIZE (x) = bitsize_int (width); DECL_BIT_FIELD (x) = 1; } if (TYPE_PACKED (t) && (DECL_BIT_FIELD (x) || TYPE_ALIGN (TREE_TYPE (x)) > BITS_PER_UNIT)) DECL_PACKED (x) = 1; /* Detect flexible array member in an invalid context. */ if (TREE_CODE (TREE_TYPE (x)) == ARRAY_TYPE && TYPE_SIZE (TREE_TYPE (x)) == NULL_TREE && TYPE_DOMAIN (TREE_TYPE (x)) != NULL_TREE && TYPE_MAX_VALUE (TYPE_DOMAIN (TREE_TYPE (x))) == NULL_TREE) { if (TREE_CODE (t) == UNION_TYPE) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member in union"); TREE_TYPE (x) = error_mark_node; } else if (DECL_CHAIN (x) != NULL_TREE) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member not at end of struct"); TREE_TYPE (x) = error_mark_node; } else if (!saw_named_field) { error_at (DECL_SOURCE_LOCATION (x), "flexible array member in a struct with no named " "members"); TREE_TYPE (x) = error_mark_node; } } if (pedantic && TREE_CODE (t) == RECORD_TYPE && flexible_array_type_p (TREE_TYPE (x))) pedwarn (DECL_SOURCE_LOCATION (x), OPT_Wpedantic, "invalid use of structure with flexible array member"); if (DECL_NAME (x) || RECORD_OR_UNION_TYPE_P (TREE_TYPE (x))) saw_named_field = true; } detect_field_duplicates (fieldlist); /* Now we have the nearly final fieldlist. Record it, then lay out the structure or union (including the fields). */ TYPE_FIELDS (t) = fieldlist; maybe_apply_pragma_scalar_storage_order (t); layout_type (t); if (TYPE_SIZE_UNIT (t) && TREE_CODE (TYPE_SIZE_UNIT (t)) == INTEGER_CST && !TREE_OVERFLOW (TYPE_SIZE_UNIT (t)) && !valid_constant_size_p (TYPE_SIZE_UNIT (t))) error ("type %qT is too large", t); /* Give bit-fields their proper types and rewrite the type of array fields with scalar component if the enclosing type has reverse storage order. */ for (tree field = fieldlist; field; field = DECL_CHAIN (field)) { if (TREE_CODE (field) == FIELD_DECL && DECL_INITIAL (field) && TREE_TYPE (field) != error_mark_node) { unsigned HOST_WIDE_INT width = tree_to_uhwi (DECL_INITIAL (field)); tree type = TREE_TYPE (field); if (width != TYPE_PRECISION (type)) { TREE_TYPE (field) = c_build_bitfield_integer_type (width, TYPE_UNSIGNED (type)); SET_DECL_MODE (field, TYPE_MODE (TREE_TYPE (field))); } DECL_INITIAL (field) = NULL_TREE; } else if (TYPE_REVERSE_STORAGE_ORDER (t) && TREE_CODE (field) == FIELD_DECL && TREE_CODE (TREE_TYPE (field)) == ARRAY_TYPE) { tree ftype = TREE_TYPE (field); tree ctype = strip_array_types (ftype); if (!RECORD_OR_UNION_TYPE_P (ctype) && TYPE_MODE (ctype) != QImode) { tree fmain_type = TYPE_MAIN_VARIANT (ftype); tree *typep = &fmain_type; do { *typep = build_distinct_type_copy (*typep); TYPE_REVERSE_STORAGE_ORDER (*typep) = 1; typep = &TREE_TYPE (*typep); } while (TREE_CODE (*typep) == ARRAY_TYPE); TREE_TYPE (field) = c_build_qualified_type (fmain_type, TYPE_QUALS (ftype)); } } } /* Now we have the truly final field list. Store it in this type and in the variants. */ TYPE_FIELDS (t) = fieldlist; /* If there are lots of fields, sort so we can look through them fast. We arbitrarily consider 16 or more elts to be "a lot". */ { int len = 0; for (x = fieldlist; x; x = DECL_CHAIN (x)) { if (len > 15 || DECL_NAME (x) == NULL) break; len += 1; } if (len > 15) { tree *field_array; struct lang_type *space; struct sorted_fields_type *space2; len += list_length (x); /* Use the same allocation policy here that make_node uses, to ensure that this lives as long as the rest of the struct decl. All decls in an inline function need to be saved. */ space = ggc_cleared_alloc<struct lang_type> (); space2 = (sorted_fields_type *) ggc_internal_alloc (sizeof (struct sorted_fields_type) + len * sizeof (tree)); len = 0; space->s = space2; field_array = &space2->elts[0]; for (x = fieldlist; x; x = DECL_CHAIN (x)) { field_array[len++] = x; /* If there is anonymous struct or union, break out of the loop. */ if (DECL_NAME (x) == NULL) break; } /* Found no anonymous struct/union. Add the TYPE_LANG_SPECIFIC. */ if (x == NULL) { TYPE_LANG_SPECIFIC (t) = space; TYPE_LANG_SPECIFIC (t)->s->len = len; field_array = TYPE_LANG_SPECIFIC (t)->s->elts; qsort (field_array, len, sizeof (tree), field_decl_cmp); } } } /* If this was supposed to be a transparent union, but we can't make it one, warn and turn off the flag. */ if (TREE_CODE (t) == UNION_TYPE && TYPE_TRANSPARENT_AGGR (t) && (!TYPE_FIELDS (t) || TYPE_MODE (t) != DECL_MODE (TYPE_FIELDS (t)))) { TYPE_TRANSPARENT_AGGR (t) = 0; warning_at (loc, 0, "union cannot be made transparent"); } tree incomplete_vars = C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (t)); for (x = TYPE_MAIN_VARIANT (t); x; x = TYPE_NEXT_VARIANT (x)) { TYPE_FIELDS (x) = TYPE_FIELDS (t); TYPE_LANG_SPECIFIC (x) = TYPE_LANG_SPECIFIC (t); TYPE_TRANSPARENT_AGGR (x) = TYPE_TRANSPARENT_AGGR (t); C_TYPE_FIELDS_READONLY (x) = C_TYPE_FIELDS_READONLY (t); C_TYPE_FIELDS_VOLATILE (x) = C_TYPE_FIELDS_VOLATILE (t); C_TYPE_VARIABLE_SIZE (x) = C_TYPE_VARIABLE_SIZE (t); C_TYPE_INCOMPLETE_VARS (x) = NULL_TREE; } /* Update type location to the one of the definition, instead of e.g. a forward declaration. */ if (TYPE_STUB_DECL (t)) DECL_SOURCE_LOCATION (TYPE_STUB_DECL (t)) = loc; /* Finish debugging output for this type. */ rest_of_type_compilation (t, toplevel); finish_incomplete_vars (incomplete_vars, toplevel); /* If we're inside a function proper, i.e. not file-scope and not still parsing parameters, then arrange for the size of a variable sized type to be bound now. */ if (building_stmt_list_p () && variably_modified_type_p (t, NULL_TREE)) add_stmt (build_stmt (loc, DECL_EXPR, build_decl (loc, TYPE_DECL, NULL, t))); if (warn_cxx_compat) warn_cxx_compat_finish_struct (fieldlist, TREE_CODE (t), loc); delete struct_parse_info; struct_parse_info = enclosing_struct_parse_info; /* If this struct is defined inside a struct, add it to struct_types. */ if (warn_cxx_compat && struct_parse_info != NULL && !in_sizeof && !in_typeof && !in_alignof) struct_parse_info->struct_types.safe_push (t); return t; } static struct { gt_pointer_operator new_value; void *cookie; } resort_data; /* This routine compares two fields like field_decl_cmp but using the pointer operator in resort_data. */ static int resort_field_decl_cmp (const void *x_p, const void *y_p) { const tree *const x = (const tree *) x_p; const tree *const y = (const tree *) y_p; if (DECL_NAME (*x) == DECL_NAME (*y)) /* A nontype is "greater" than a type. */ return (TREE_CODE (*y) == TYPE_DECL) - (TREE_CODE (*x) == TYPE_DECL); if (DECL_NAME (*x) == NULL_TREE) return -1; if (DECL_NAME (*y) == NULL_TREE) return 1; { tree d1 = DECL_NAME (*x); tree d2 = DECL_NAME (*y); resort_data.new_value (&d1, resort_data.cookie); resort_data.new_value (&d2, resort_data.cookie); if (d1 < d2) return -1; } return 1; } /* Resort DECL_SORTED_FIELDS because pointers have been reordered. */ void resort_sorted_fields (void *obj, void * ARG_UNUSED (orig_obj), gt_pointer_operator new_value, void *cookie) { struct sorted_fields_type *sf = (struct sorted_fields_type *) obj; resort_data.new_value = new_value; resort_data.cookie = cookie; qsort (&sf->elts[0], sf->len, sizeof (tree), resort_field_decl_cmp); } /* Lay out the type T, and its element type, and so on. */ static void layout_array_type (tree t) { if (TREE_CODE (TREE_TYPE (t)) == ARRAY_TYPE) layout_array_type (TREE_TYPE (t)); layout_type (t); } /* Begin compiling the definition of an enumeration type. NAME is its name (or null if anonymous). LOC is the enum's location. Returns the type object, as yet incomplete. Also records info about it so that build_enumerator may be used to declare the individual values as they are read. */ tree start_enum (location_t loc, struct c_enum_contents *the_enum, tree name) { tree enumtype = NULL_TREE; location_t enumloc = UNKNOWN_LOCATION; /* If this is the real definition for a previous forward reference, fill in the contents in the same object that used to be the forward reference. */ if (name != NULL_TREE) enumtype = lookup_tag (ENUMERAL_TYPE, name, true, &enumloc); if (enumtype == NULL_TREE || TREE_CODE (enumtype) != ENUMERAL_TYPE) { enumtype = make_node (ENUMERAL_TYPE); pushtag (loc, name, enumtype); } /* Update type location to the one of the definition, instead of e.g. a forward declaration. */ else if (TYPE_STUB_DECL (enumtype)) { enumloc = DECL_SOURCE_LOCATION (TYPE_STUB_DECL (enumtype)); DECL_SOURCE_LOCATION (TYPE_STUB_DECL (enumtype)) = loc; } if (C_TYPE_BEING_DEFINED (enumtype)) error_at (loc, "nested redefinition of %<enum %E%>", name); C_TYPE_BEING_DEFINED (enumtype) = 1; if (TYPE_VALUES (enumtype) != NULL_TREE) { /* This enum is a named one that has been declared already. */ error_at (loc, "redeclaration of %<enum %E%>", name); if (enumloc != UNKNOWN_LOCATION) inform (enumloc, "originally defined here"); /* Completely replace its old definition. The old enumerators remain defined, however. */ TYPE_VALUES (enumtype) = NULL_TREE; } the_enum->enum_next_value = integer_zero_node; the_enum->enum_overflow = 0; if (flag_short_enums) for (tree v = TYPE_MAIN_VARIANT (enumtype); v; v = TYPE_NEXT_VARIANT (v)) TYPE_PACKED (v) = 1; /* FIXME: This will issue a warning for a use of a type defined within sizeof in a statement expr. This is not terribly serious as C++ doesn't permit statement exprs within sizeof anyhow. */ if (warn_cxx_compat && (in_sizeof || in_typeof || in_alignof)) warning_at (loc, OPT_Wc___compat, "defining type in %qs expression is invalid in C++", (in_sizeof ? "sizeof" : (in_typeof ? "typeof" : "alignof"))); return enumtype; } /* After processing and defining all the values of an enumeration type, install their decls in the enumeration type and finish it off. ENUMTYPE is the type object, VALUES a list of decl-value pairs, and ATTRIBUTES are the specified attributes. Returns ENUMTYPE. */ tree finish_enum (tree enumtype, tree values, tree attributes) { tree pair, tem; tree minnode = NULL_TREE, maxnode = NULL_TREE; int precision; signop sign; bool toplevel = (file_scope == current_scope); struct lang_type *lt; decl_attributes (&enumtype, attributes, (int) ATTR_FLAG_TYPE_IN_PLACE); /* Calculate the maximum value of any enumerator in this type. */ if (values == error_mark_node) minnode = maxnode = integer_zero_node; else { minnode = maxnode = TREE_VALUE (values); for (pair = TREE_CHAIN (values); pair; pair = TREE_CHAIN (pair)) { tree value = TREE_VALUE (pair); if (tree_int_cst_lt (maxnode, value)) maxnode = value; if (tree_int_cst_lt (value, minnode)) minnode = value; } } /* Construct the final type of this enumeration. It is the same as one of the integral types - the narrowest one that fits, except that normally we only go as narrow as int - and signed iff any of the values are negative. */ sign = (tree_int_cst_sgn (minnode) >= 0) ? UNSIGNED : SIGNED; precision = MAX (tree_int_cst_min_precision (minnode, sign), tree_int_cst_min_precision (maxnode, sign)); /* If the precision of the type was specified with an attribute and it was too small, give an error. Otherwise, use it. */ if (TYPE_PRECISION (enumtype) && lookup_attribute ("mode", attributes)) { if (precision > TYPE_PRECISION (enumtype)) { TYPE_PRECISION (enumtype) = 0; error ("specified mode too small for enumerated values"); } else precision = TYPE_PRECISION (enumtype); } else TYPE_PRECISION (enumtype) = 0; if (TYPE_PACKED (enumtype) || precision > TYPE_PRECISION (integer_type_node) || TYPE_PRECISION (enumtype)) { tem = c_common_type_for_size (precision, sign == UNSIGNED ? 1 : 0); if (tem == NULL) { warning (0, "enumeration values exceed range of largest integer"); tem = long_long_integer_type_node; } } else tem = sign == UNSIGNED ? unsigned_type_node : integer_type_node; TYPE_MIN_VALUE (enumtype) = TYPE_MIN_VALUE (tem); TYPE_MAX_VALUE (enumtype) = TYPE_MAX_VALUE (tem); TYPE_UNSIGNED (enumtype) = TYPE_UNSIGNED (tem); SET_TYPE_ALIGN (enumtype, TYPE_ALIGN (tem)); TYPE_SIZE (enumtype) = NULL_TREE; TYPE_PRECISION (enumtype) = TYPE_PRECISION (tem); layout_type (enumtype); if (values != error_mark_node) { /* Change the type of the enumerators to be the enum type. We need to do this irrespective of the size of the enum, for proper type checking. Replace the DECL_INITIALs of the enumerators, and the value slots of the list, with copies that have the enum type; they cannot be modified in place because they may be shared (e.g. integer_zero_node) Finally, change the purpose slots to point to the names of the decls. */ for (pair = values; pair; pair = TREE_CHAIN (pair)) { tree enu = TREE_PURPOSE (pair); tree ini = DECL_INITIAL (enu); TREE_TYPE (enu) = enumtype; /* The ISO C Standard mandates enumerators to have type int, even though the underlying type of an enum type is unspecified. However, GCC allows enumerators of any integer type as an extensions. build_enumerator() converts any enumerators that fit in an int to type int, to avoid promotions to unsigned types when comparing integers with enumerators that fit in the int range. When -pedantic is given, build_enumerator() would have already warned about those that don't fit. Here we convert the rest to the enumerator type. */ if (TREE_TYPE (ini) != integer_type_node) ini = convert (enumtype, ini); DECL_INITIAL (enu) = ini; TREE_PURPOSE (pair) = DECL_NAME (enu); TREE_VALUE (pair) = ini; } TYPE_VALUES (enumtype) = values; } /* Record the min/max values so that we can warn about bit-field enumerations that are too small for the values. */ lt = ggc_cleared_alloc<struct lang_type> (); lt->enum_min = minnode; lt->enum_max = maxnode; TYPE_LANG_SPECIFIC (enumtype) = lt; /* Fix up all variant types of this enum type. */ tree incomplete_vars = C_TYPE_INCOMPLETE_VARS (TYPE_MAIN_VARIANT (enumtype)); for (tem = TYPE_MAIN_VARIANT (enumtype); tem; tem = TYPE_NEXT_VARIANT (tem)) { C_TYPE_INCOMPLETE_VARS (tem) = NULL_TREE; if (tem == enumtype) continue; TYPE_VALUES (tem) = TYPE_VALUES (enumtype); TYPE_MIN_VALUE (tem) = TYPE_MIN_VALUE (enumtype); TYPE_MAX_VALUE (tem) = TYPE_MAX_VALUE (enumtype); TYPE_SIZE (tem) = TYPE_SIZE (enumtype); TYPE_SIZE_UNIT (tem) = TYPE_SIZE_UNIT (enumtype); SET_TYPE_MODE (tem, TYPE_MODE (enumtype)); TYPE_PRECISION (tem) = TYPE_PRECISION (enumtype); SET_TYPE_ALIGN (tem, TYPE_ALIGN (enumtype)); TYPE_USER_ALIGN (tem) = TYPE_USER_ALIGN (enumtype); TYPE_UNSIGNED (tem) = TYPE_UNSIGNED (enumtype); TYPE_LANG_SPECIFIC (tem) = TYPE_LANG_SPECIFIC (enumtype); } /* Finish debugging output for this type. */ rest_of_type_compilation (enumtype, toplevel); finish_incomplete_vars (incomplete_vars, toplevel); /* If this enum is defined inside a struct, add it to struct_types. */ if (warn_cxx_compat && struct_parse_info != NULL && !in_sizeof && !in_typeof && !in_alignof) struct_parse_info->struct_types.safe_push (enumtype); C_TYPE_BEING_DEFINED (enumtype) = 0; return enumtype; } /* Build and install a CONST_DECL for one value of the current enumeration type (one that was begun with start_enum). DECL_LOC is the location of the enumerator. LOC is the location of the '=' operator if any, DECL_LOC otherwise. Return a tree-list containing the CONST_DECL and its value. Assignment of sequential values by default is handled here. */ tree build_enumerator (location_t decl_loc, location_t loc, struct c_enum_contents *the_enum, tree name, tree value) { tree decl, type; /* Validate and default VALUE. */ if (value != NULL_TREE) { /* Don't issue more errors for error_mark_node (i.e. an undeclared identifier) - just ignore the value expression. */ if (value == error_mark_node) value = NULL_TREE; else if (!INTEGRAL_TYPE_P (TREE_TYPE (value))) { error_at (loc, "enumerator value for %qE is not an integer constant", name); value = NULL_TREE; } else { if (TREE_CODE (value) != INTEGER_CST) { value = c_fully_fold (value, false, NULL); if (TREE_CODE (value) == INTEGER_CST) pedwarn (loc, OPT_Wpedantic, "enumerator value for %qE is not an integer " "constant expression", name); } if (TREE_CODE (value) != INTEGER_CST) { error ("enumerator value for %qE is not an integer constant", name); value = NULL_TREE; } else { value = default_conversion (value); constant_expression_warning (value); } } } /* Default based on previous value. */ /* It should no longer be possible to have NON_LVALUE_EXPR in the default. */ if (value == NULL_TREE) { value = the_enum->enum_next_value; if (the_enum->enum_overflow) error_at (loc, "overflow in enumeration values"); } /* Even though the underlying type of an enum is unspecified, the type of enumeration constants is explicitly defined as int (6.4.4.3/2 in the C99 Standard). GCC allows any integer type as an extension. */ else if (!int_fits_type_p (value, integer_type_node)) pedwarn (loc, OPT_Wpedantic, "ISO C restricts enumerator values to range of %<int%>"); /* The ISO C Standard mandates enumerators to have type int, even though the underlying type of an enum type is unspecified. However, GCC allows enumerators of any integer type as an extensions. Here we convert any enumerators that fit in an int to type int, to avoid promotions to unsigned types when comparing integers with enumerators that fit in the int range. When -pedantic is given, we would have already warned about those that don't fit. We have to do this here rather than in finish_enum because this value may be used to define more enumerators. */ if (int_fits_type_p (value, integer_type_node)) value = convert (integer_type_node, value); /* Set basis for default for next value. */ the_enum->enum_next_value = build_binary_op (EXPR_LOC_OR_LOC (value, input_location), PLUS_EXPR, value, integer_one_node, false); the_enum->enum_overflow = tree_int_cst_lt (the_enum->enum_next_value, value); /* Now create a declaration for the enum value name. */ type = TREE_TYPE (value); type = c_common_type_for_size (MAX (TYPE_PRECISION (type), TYPE_PRECISION (integer_type_node)), (TYPE_PRECISION (type) >= TYPE_PRECISION (integer_type_node) && TYPE_UNSIGNED (type))); decl = build_decl (decl_loc, CONST_DECL, name, type); DECL_INITIAL (decl) = convert (type, value); pushdecl (decl); return tree_cons (decl, value, NULL_TREE); } /* Implement LANG_HOOKS_SIMULATE_ENUM_DECL. */ tree c_simulate_enum_decl (location_t loc, const char *name, vec<string_int_pair> values) { location_t saved_loc = input_location; input_location = loc; struct c_enum_contents the_enum; tree enumtype = start_enum (loc, &the_enum, get_identifier (name)); tree value_chain = NULL_TREE; string_int_pair *value; unsigned int i; FOR_EACH_VEC_ELT (values, i, value) { tree decl = build_enumerator (loc, loc, &the_enum, get_identifier (value->first), build_int_cst (integer_type_node, value->second)); TREE_CHAIN (decl) = value_chain; value_chain = decl; } finish_enum (enumtype, nreverse (value_chain), NULL_TREE); input_location = saved_loc; return enumtype; } /* Create the FUNCTION_DECL for a function definition. DECLSPECS, DECLARATOR and ATTRIBUTES are the parts of the declaration; they describe the function's name and the type it returns, but twisted together in a fashion that parallels the syntax of C. This function creates a binding context for the function body as well as setting up the FUNCTION_DECL in current_function_decl. Returns true on success. If the DECLARATOR is not suitable for a function (it defines a datum instead), we return false to report a parse error. */ bool start_function (struct c_declspecs *declspecs, struct c_declarator *declarator, tree attributes) { tree decl1, old_decl; tree restype, resdecl; location_t loc; current_function_returns_value = 0; /* Assume, until we see it does. */ current_function_returns_null = 0; current_function_returns_abnormally = 0; warn_about_return_type = 0; c_switch_stack = NULL; /* Indicate no valid break/continue context by setting these variables to some non-null, non-label value. We'll notice and emit the proper error message in c_finish_bc_stmt. */ c_break_label = c_cont_label = size_zero_node; decl1 = grokdeclarator (declarator, declspecs, FUNCDEF, true, NULL, &attributes, NULL, NULL, DEPRECATED_NORMAL); invoke_plugin_callbacks (PLUGIN_START_PARSE_FUNCTION, decl1); /* If the declarator is not suitable for a function definition, cause a syntax error. */ if (decl1 == NULL_TREE || TREE_CODE (decl1) != FUNCTION_DECL) return false; loc = DECL_SOURCE_LOCATION (decl1); /* A nested function is not global. */ if (current_function_decl != NULL_TREE) TREE_PUBLIC (decl1) = 0; c_decl_attributes (&decl1, attributes, 0); if (DECL_DECLARED_INLINE_P (decl1) && DECL_UNINLINABLE (decl1) && lookup_attribute ("noinline", DECL_ATTRIBUTES (decl1))) warning_at (loc, OPT_Wattributes, "inline function %qD given attribute %qs", decl1, "noinline"); /* Handle gnu_inline attribute. */ if (declspecs->inline_p && !flag_gnu89_inline && TREE_CODE (decl1) == FUNCTION_DECL && (lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (decl1)) || current_function_decl)) { if (declspecs->storage_class != csc_static) DECL_EXTERNAL (decl1) = !DECL_EXTERNAL (decl1); } announce_function (decl1); if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (TREE_TYPE (decl1)))) { error_at (loc, "return type is an incomplete type"); /* Make it return void instead. */ TREE_TYPE (decl1) = build_function_type (void_type_node, TYPE_ARG_TYPES (TREE_TYPE (decl1))); } if (warn_about_return_type) warn_defaults_to (loc, flag_isoc99 ? OPT_Wimplicit_int : (warn_return_type > 0 ? OPT_Wreturn_type : OPT_Wimplicit_int), "return type defaults to %<int%>"); /* Make the init_value nonzero so pushdecl knows this is not tentative. error_mark_node is replaced below (in pop_scope) with the BLOCK. */ DECL_INITIAL (decl1) = error_mark_node; /* If this definition isn't a prototype and we had a prototype declaration before, copy the arg type info from that prototype. */ old_decl = lookup_name_in_scope (DECL_NAME (decl1), current_scope); if (old_decl && TREE_CODE (old_decl) != FUNCTION_DECL) old_decl = NULL_TREE; current_function_prototype_locus = UNKNOWN_LOCATION; current_function_prototype_built_in = false; current_function_prototype_arg_types = NULL_TREE; if (!prototype_p (TREE_TYPE (decl1))) { if (old_decl != NULL_TREE && TREE_CODE (TREE_TYPE (old_decl)) == FUNCTION_TYPE && comptypes (TREE_TYPE (TREE_TYPE (decl1)), TREE_TYPE (TREE_TYPE (old_decl)))) { if (stdarg_p (TREE_TYPE (old_decl))) { auto_diagnostic_group d; warning_at (loc, 0, "%q+D defined as variadic function " "without prototype", decl1); locate_old_decl (old_decl); } TREE_TYPE (decl1) = composite_type (TREE_TYPE (old_decl), TREE_TYPE (decl1)); current_function_prototype_locus = DECL_SOURCE_LOCATION (old_decl); current_function_prototype_built_in = C_DECL_BUILTIN_PROTOTYPE (old_decl); current_function_prototype_arg_types = TYPE_ARG_TYPES (TREE_TYPE (decl1)); } if (TREE_PUBLIC (decl1)) { /* If there is an external prototype declaration of this function, record its location but do not copy information to this decl. This may be an invisible declaration (built-in or in a scope which has finished) or simply have more refined argument types than any declaration found above. */ struct c_binding *b; for (b = I_SYMBOL_BINDING (DECL_NAME (decl1)); b; b = b->shadowed) if (B_IN_SCOPE (b, external_scope)) break; if (b) { tree ext_decl, ext_type; ext_decl = b->decl; ext_type = b->u.type ? b->u.type : TREE_TYPE (ext_decl); if (TREE_CODE (ext_type) == FUNCTION_TYPE && comptypes (TREE_TYPE (TREE_TYPE (decl1)), TREE_TYPE (ext_type))) { current_function_prototype_locus = DECL_SOURCE_LOCATION (ext_decl); current_function_prototype_built_in = C_DECL_BUILTIN_PROTOTYPE (ext_decl); current_function_prototype_arg_types = TYPE_ARG_TYPES (ext_type); } } } } /* Optionally warn of old-fashioned def with no previous prototype. */ if (warn_strict_prototypes && old_decl != error_mark_node && !prototype_p (TREE_TYPE (decl1)) && C_DECL_ISNT_PROTOTYPE (old_decl)) warning_at (loc, OPT_Wstrict_prototypes, "function declaration isn%'t a prototype"); /* Optionally warn of any global def with no previous prototype. */ else if (warn_missing_prototypes && old_decl != error_mark_node && TREE_PUBLIC (decl1) && !MAIN_NAME_P (DECL_NAME (decl1)) && C_DECL_ISNT_PROTOTYPE (old_decl) && !DECL_DECLARED_INLINE_P (decl1)) warning_at (loc, OPT_Wmissing_prototypes, "no previous prototype for %qD", decl1); /* Optionally warn of any def with no previous prototype if the function has already been used. */ else if (warn_missing_prototypes && old_decl != NULL_TREE && old_decl != error_mark_node && TREE_USED (old_decl) && !prototype_p (TREE_TYPE (old_decl))) warning_at (loc, OPT_Wmissing_prototypes, "%qD was used with no prototype before its definition", decl1); /* Optionally warn of any global def with no previous declaration. */ else if (warn_missing_declarations && TREE_PUBLIC (decl1) && old_decl == NULL_TREE && !MAIN_NAME_P (DECL_NAME (decl1)) && !DECL_DECLARED_INLINE_P (decl1)) warning_at (loc, OPT_Wmissing_declarations, "no previous declaration for %qD", decl1); /* Optionally warn of any def with no previous declaration if the function has already been used. */ else if (warn_missing_declarations && old_decl != NULL_TREE && old_decl != error_mark_node && TREE_USED (old_decl) && C_DECL_IMPLICIT (old_decl)) warning_at (loc, OPT_Wmissing_declarations, "%qD was used with no declaration before its definition", decl1); /* This function exists in static storage. (This does not mean `static' in the C sense!) */ TREE_STATIC (decl1) = 1; /* This is the earliest point at which we might know the assembler name of the function. Thus, if it's set before this, die horribly. */ gcc_assert (!DECL_ASSEMBLER_NAME_SET_P (decl1)); /* If #pragma weak was used, mark the decl weak now. */ if (current_scope == file_scope) maybe_apply_pragma_weak (decl1); /* Warn for unlikely, improbable, or stupid declarations of `main'. */ if (warn_main && MAIN_NAME_P (DECL_NAME (decl1))) { if (TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (decl1))) != integer_type_node) pedwarn (loc, OPT_Wmain, "return type of %qD is not %<int%>", decl1); else if (TYPE_ATOMIC (TREE_TYPE (TREE_TYPE (decl1)))) pedwarn (loc, OPT_Wmain, "%<_Atomic%>-qualified return type of %qD", decl1); check_main_parameter_types (decl1); if (!TREE_PUBLIC (decl1)) pedwarn (loc, OPT_Wmain, "%qD is normally a non-static function", decl1); } /* Record the decl so that the function name is defined. If we already have a decl for this name, and it is a FUNCTION_DECL, use the old decl. */ current_function_decl = pushdecl (decl1); push_scope (); declare_parm_level (); restype = TREE_TYPE (TREE_TYPE (current_function_decl)); resdecl = build_decl (loc, RESULT_DECL, NULL_TREE, restype); DECL_ARTIFICIAL (resdecl) = 1; DECL_IGNORED_P (resdecl) = 1; DECL_RESULT (current_function_decl) = resdecl; start_fname_decls (); return true; } /* Subroutine of store_parm_decls which handles new-style function definitions (prototype format). The parms already have decls, so we need only record them as in effect and complain if any redundant old-style parm decls were written. */ static void store_parm_decls_newstyle (tree fndecl, const struct c_arg_info *arg_info) { tree decl; c_arg_tag *tag; unsigned ix; if (current_scope->bindings) { error_at (DECL_SOURCE_LOCATION (fndecl), "old-style parameter declarations in prototyped " "function definition"); /* Get rid of the old-style declarations. */ pop_scope (); push_scope (); } /* Don't issue this warning for nested functions, and don't issue this warning if we got here because ARG_INFO_TYPES was error_mark_node (this happens when a function definition has just an ellipsis in its parameter list). */ else if (!in_system_header_at (input_location) && !current_function_scope && arg_info->types != error_mark_node) warning_at (DECL_SOURCE_LOCATION (fndecl), OPT_Wtraditional, "traditional C rejects ISO C style function definitions"); /* Now make all the parameter declarations visible in the function body. We can bypass most of the grunt work of pushdecl. */ for (decl = arg_info->parms; decl; decl = DECL_CHAIN (decl)) { DECL_CONTEXT (decl) = current_function_decl; if (DECL_NAME (decl)) { bind (DECL_NAME (decl), decl, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); if (!TREE_USED (decl)) warn_if_shadowing (decl); } else error_at (DECL_SOURCE_LOCATION (decl), "parameter name omitted"); } /* Record the parameter list in the function declaration. */ DECL_ARGUMENTS (fndecl) = arg_info->parms; /* Now make all the ancillary declarations visible, likewise. */ for (decl = arg_info->others; decl; decl = DECL_CHAIN (decl)) { DECL_CONTEXT (decl) = current_function_decl; if (DECL_NAME (decl)) bind (DECL_NAME (decl), decl, current_scope, /*invisible=*/false, /*nested=*/(TREE_CODE (decl) == FUNCTION_DECL), UNKNOWN_LOCATION); } /* And all the tag declarations. */ FOR_EACH_VEC_SAFE_ELT_REVERSE (arg_info->tags, ix, tag) if (tag->id) bind (tag->id, tag->type, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } /* Subroutine of store_parm_decls which handles old-style function definitions (separate parameter list and declarations). */ static void store_parm_decls_oldstyle (tree fndecl, const struct c_arg_info *arg_info) { struct c_binding *b; tree parm, decl, last; tree parmids = arg_info->parms; hash_set<tree> seen_args; if (!in_system_header_at (input_location)) { if (flag_isoc2x) pedwarn (DECL_SOURCE_LOCATION (fndecl), OPT_Wold_style_definition, "old-style function definition"); else warning_at (DECL_SOURCE_LOCATION (fndecl), OPT_Wold_style_definition, "old-style function definition"); } if (current_scope->had_vla_unspec) error ("%<[*]%> not allowed in other than function prototype scope"); /* Match each formal parameter name with its declaration. Save each decl in the appropriate TREE_PURPOSE slot of the parmids chain. */ for (parm = parmids; parm; parm = TREE_CHAIN (parm)) { if (TREE_VALUE (parm) == NULL_TREE) { error_at (DECL_SOURCE_LOCATION (fndecl), "parameter name missing from parameter list"); TREE_PURPOSE (parm) = NULL_TREE; continue; } b = I_SYMBOL_BINDING (TREE_VALUE (parm)); if (b && B_IN_CURRENT_SCOPE (b)) { decl = b->decl; /* Skip erroneous parameters. */ if (decl == error_mark_node) continue; /* If we got something other than a PARM_DECL it is an error. */ if (TREE_CODE (decl) != PARM_DECL) { error_at (DECL_SOURCE_LOCATION (decl), "%qD declared as a non-parameter", decl); continue; } /* If the declaration is already marked, we have a duplicate name. Complain and ignore the duplicate. */ else if (seen_args.contains (decl)) { error_at (DECL_SOURCE_LOCATION (decl), "multiple parameters named %qD", decl); TREE_PURPOSE (parm) = NULL_TREE; continue; } /* If the declaration says "void", complain and turn it into an int. */ else if (VOID_TYPE_P (TREE_TYPE (decl))) { error_at (DECL_SOURCE_LOCATION (decl), "parameter %qD declared with void type", decl); TREE_TYPE (decl) = integer_type_node; DECL_ARG_TYPE (decl) = integer_type_node; layout_decl (decl, 0); } warn_if_shadowing (decl); } /* If no declaration found, default to int. */ else { /* FIXME diagnostics: This should be the location of the argument, not the FNDECL. E.g., for an old-style declaration int f10(v) { blah; } We should use the location of the V, not the F10. Unfortunately, the V is an IDENTIFIER_NODE which has no location. In the future we need locations for c_arg_info entries. See gcc.dg/Wshadow-3.c for an example of this problem. */ decl = build_decl (DECL_SOURCE_LOCATION (fndecl), PARM_DECL, TREE_VALUE (parm), integer_type_node); DECL_ARG_TYPE (decl) = TREE_TYPE (decl); pushdecl (decl); warn_if_shadowing (decl); if (flag_isoc99) pedwarn (DECL_SOURCE_LOCATION (decl), OPT_Wimplicit_int, "type of %qD defaults to %<int%>", decl); else warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wmissing_parameter_type, "type of %qD defaults to %<int%>", decl); } TREE_PURPOSE (parm) = decl; seen_args.add (decl); } /* Now examine the parms chain for incomplete declarations and declarations with no corresponding names. */ for (b = current_scope->bindings; b; b = b->prev) { parm = b->decl; if (TREE_CODE (parm) != PARM_DECL) continue; if (TREE_TYPE (parm) != error_mark_node && !COMPLETE_TYPE_P (TREE_TYPE (parm))) { error_at (DECL_SOURCE_LOCATION (parm), "parameter %qD has incomplete type", parm); TREE_TYPE (parm) = error_mark_node; } if (!seen_args.contains (parm)) { error_at (DECL_SOURCE_LOCATION (parm), "declaration for parameter %qD but no such parameter", parm); /* Pretend the parameter was not missing. This gets us to a standard state and minimizes further error messages. */ parmids = chainon (parmids, tree_cons (parm, 0, 0)); } } /* Chain the declarations together in the order of the list of names. Store that chain in the function decl, replacing the list of names. Update the current scope to match. */ DECL_ARGUMENTS (fndecl) = NULL_TREE; for (parm = parmids; parm; parm = TREE_CHAIN (parm)) if (TREE_PURPOSE (parm)) break; if (parm && TREE_PURPOSE (parm)) { last = TREE_PURPOSE (parm); DECL_ARGUMENTS (fndecl) = last; for (parm = TREE_CHAIN (parm); parm; parm = TREE_CHAIN (parm)) if (TREE_PURPOSE (parm)) { DECL_CHAIN (last) = TREE_PURPOSE (parm); last = TREE_PURPOSE (parm); } DECL_CHAIN (last) = NULL_TREE; } /* If there was a previous prototype, set the DECL_ARG_TYPE of each argument according to the type previously specified, and report any mismatches. */ if (current_function_prototype_arg_types) { tree type; for (parm = DECL_ARGUMENTS (fndecl), type = current_function_prototype_arg_types; parm || (type != NULL_TREE && TREE_VALUE (type) != error_mark_node && TYPE_MAIN_VARIANT (TREE_VALUE (type)) != void_type_node); parm = DECL_CHAIN (parm), type = TREE_CHAIN (type)) { if (parm == NULL_TREE || type == NULL_TREE || (TREE_VALUE (type) != error_mark_node && TYPE_MAIN_VARIANT (TREE_VALUE (type)) == void_type_node)) { if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (fndecl), 0, "number of arguments doesn%'t match " "built-in prototype"); else { /* FIXME diagnostics: This should be the location of FNDECL, but there is bug when a prototype is declared inside function context, but defined outside of it (e.g., gcc.dg/pr15698-2.c). In which case FNDECL gets the location of the prototype, not the definition. */ error_at (input_location, "number of arguments doesn%'t match prototype"); error_at (current_function_prototype_locus, "prototype declaration"); } break; } /* Type for passing arg must be consistent with that declared for the arg. ISO C says we take the unqualified type for parameters declared with qualified type. */ if (TREE_TYPE (parm) != error_mark_node && TREE_VALUE (type) != error_mark_node && ((TYPE_ATOMIC (DECL_ARG_TYPE (parm)) != TYPE_ATOMIC (TREE_VALUE (type))) || !comptypes (TYPE_MAIN_VARIANT (DECL_ARG_TYPE (parm)), TYPE_MAIN_VARIANT (TREE_VALUE (type))))) { if ((TYPE_ATOMIC (DECL_ARG_TYPE (parm)) == TYPE_ATOMIC (TREE_VALUE (type))) && (TYPE_MAIN_VARIANT (TREE_TYPE (parm)) == TYPE_MAIN_VARIANT (TREE_VALUE (type)))) { /* Adjust argument to match prototype. E.g. a previous `int foo(float);' prototype causes `int foo(x) float x; {...}' to be treated like `int foo(float x) {...}'. This is particularly useful for argument types like uid_t. */ DECL_ARG_TYPE (parm) = TREE_TYPE (parm); if (targetm.calls.promote_prototypes (TREE_TYPE (current_function_decl)) && INTEGRAL_TYPE_P (TREE_TYPE (parm)) && (TYPE_PRECISION (TREE_TYPE (parm)) < TYPE_PRECISION (integer_type_node))) DECL_ARG_TYPE (parm) = c_type_promotes_to (TREE_TYPE (parm)); /* ??? Is it possible to get here with a built-in prototype or will it always have been diagnosed as conflicting with an old-style definition and discarded? */ if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (parm), OPT_Wpedantic, "promoted argument %qD " "doesn%'t match built-in prototype", parm); else { pedwarn (DECL_SOURCE_LOCATION (parm), OPT_Wpedantic, "promoted argument %qD " "doesn%'t match prototype", parm); pedwarn (current_function_prototype_locus, OPT_Wpedantic, "prototype declaration"); } } else { if (current_function_prototype_built_in) warning_at (DECL_SOURCE_LOCATION (parm), 0, "argument %qD doesn%'t match " "built-in prototype", parm); else { error_at (DECL_SOURCE_LOCATION (parm), "argument %qD doesn%'t match prototype", parm); error_at (current_function_prototype_locus, "prototype declaration"); } } } } TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = NULL_TREE; } /* Otherwise, create a prototype that would match. */ else { tree actual = NULL_TREE, last = NULL_TREE, type; for (parm = DECL_ARGUMENTS (fndecl); parm; parm = DECL_CHAIN (parm)) { type = tree_cons (NULL_TREE, DECL_ARG_TYPE (parm), NULL_TREE); if (last) TREE_CHAIN (last) = type; else actual = type; last = type; } type = tree_cons (NULL_TREE, void_type_node, NULL_TREE); if (last) TREE_CHAIN (last) = type; else actual = type; /* We are going to assign a new value for the TYPE_ACTUAL_ARG_TYPES of the type of this function, but we need to avoid having this affect the types of other similarly-typed functions, so we must first force the generation of an identical (but separate) type node for the relevant function type. The new node we create will be a variant of the main variant of the original function type. */ TREE_TYPE (fndecl) = build_variant_type_copy (TREE_TYPE (fndecl)); TYPE_ACTUAL_ARG_TYPES (TREE_TYPE (fndecl)) = actual; } } /* Store parameter declarations passed in ARG_INFO into the current function declaration. */ void store_parm_decls_from (struct c_arg_info *arg_info) { current_function_arg_info = arg_info; store_parm_decls (); } /* Called by walk_tree to look for and update context-less labels or labels with context in the parent function. */ static tree set_labels_context_r (tree *tp, int *walk_subtrees, void *data) { tree ctx = static_cast<tree>(data); if (TREE_CODE (*tp) == LABEL_EXPR && (DECL_CONTEXT (LABEL_EXPR_LABEL (*tp)) == NULL_TREE || DECL_CONTEXT (LABEL_EXPR_LABEL (*tp)) == DECL_CONTEXT (ctx))) { DECL_CONTEXT (LABEL_EXPR_LABEL (*tp)) = ctx; *walk_subtrees = 0; } return NULL_TREE; } /* Store the parameter declarations into the current function declaration. This is called after parsing the parameter declarations, before digesting the body of the function. For an old-style definition, construct a prototype out of the old-style parameter declarations and inject it into the function's type. */ void store_parm_decls (void) { tree fndecl = current_function_decl; bool proto; /* The argument information block for FNDECL. */ struct c_arg_info *arg_info = current_function_arg_info; current_function_arg_info = 0; /* True if this definition is written with a prototype. In C2X, an empty argument list was converted to (void) in grokparms; in older C standard versions, it does not give the function a type with a prototype for future calls. */ proto = arg_info->types != 0; if (proto) store_parm_decls_newstyle (fndecl, arg_info); else store_parm_decls_oldstyle (fndecl, arg_info); /* The next call to push_scope will be a function body. */ next_is_function_body = true; /* Write a record describing this function definition to the prototypes file (if requested). */ gen_aux_info_record (fndecl, 1, 0, proto); /* Initialize the RTL code for the function. */ allocate_struct_function (fndecl, false); if (warn_unused_local_typedefs) cfun->language = ggc_cleared_alloc<language_function> (); /* Begin the statement tree for this function. */ DECL_SAVED_TREE (fndecl) = push_stmt_list (); /* ??? Insert the contents of the pending sizes list into the function to be evaluated. The only reason left to have this is void foo(int n, int array[n++]) because we throw away the array type in favor of a pointer type, and thus won't naturally see the SAVE_EXPR containing the increment. All other pending sizes would be handled by gimplify_parameters. */ if (arg_info->pending_sizes) { /* In very special circumstances, e.g. for code like _Atomic int i = 5; void f (int a[i += 2]) {} we need to execute the atomic assignment on function entry. But in this case, it is not just a straight store, it has the op= form, which means that build_atomic_assign has generated gotos, labels, etc. Because at that time the function decl for F has not been created yet, those labels do not have any function context. But we have the fndecl now, so update the labels accordingly. gimplify_expr would crash otherwise. Or with nested functions the labels could be created with parent function's context, while when the statement is emitted at the start of the nested function, it needs the nested function's context. */ walk_tree_without_duplicates (&arg_info->pending_sizes, set_labels_context_r, fndecl); add_stmt (arg_info->pending_sizes); } } /* Store PARM_DECLs in PARMS into scope temporarily. Used for c_finish_omp_declare_simd for function prototypes. No diagnostics should be done. */ void temp_store_parm_decls (tree fndecl, tree parms) { push_scope (); for (tree p = parms; p; p = DECL_CHAIN (p)) { DECL_CONTEXT (p) = fndecl; if (DECL_NAME (p)) bind (DECL_NAME (p), p, current_scope, /*invisible=*/false, /*nested=*/false, UNKNOWN_LOCATION); } } /* Undo what temp_store_parm_decls did. */ void temp_pop_parm_decls (void) { /* Clear all bindings in this temporary scope, so that pop_scope doesn't create a BLOCK. */ struct c_binding *b = current_scope->bindings; current_scope->bindings = NULL; for (; b; b = free_binding_and_advance (b)) { gcc_assert (TREE_CODE (b->decl) == PARM_DECL || b->decl == error_mark_node); gcc_assert (I_SYMBOL_BINDING (b->id) == b); I_SYMBOL_BINDING (b->id) = b->shadowed; if (b->shadowed && b->shadowed->u.type) TREE_TYPE (b->shadowed->decl) = b->shadowed->u.type; } pop_scope (); } /* Finish up a function declaration and compile that function all the way to assembler language output. Then free the storage for the function definition. This is called after parsing the body of the function definition. */ void finish_function (location_t end_loc) { tree fndecl = current_function_decl; if (c_dialect_objc ()) objc_finish_function (); if (TREE_CODE (fndecl) == FUNCTION_DECL && targetm.calls.promote_prototypes (TREE_TYPE (fndecl))) { tree args = DECL_ARGUMENTS (fndecl); for (; args; args = DECL_CHAIN (args)) { tree type = TREE_TYPE (args); if (INTEGRAL_TYPE_P (type) && TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)) DECL_ARG_TYPE (args) = c_type_promotes_to (type); } } if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node) BLOCK_SUPERCONTEXT (DECL_INITIAL (fndecl)) = fndecl; /* Must mark the RESULT_DECL as being in this function. */ if (DECL_RESULT (fndecl) && DECL_RESULT (fndecl) != error_mark_node) DECL_CONTEXT (DECL_RESULT (fndecl)) = fndecl; if (MAIN_NAME_P (DECL_NAME (fndecl)) && flag_hosted && TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (fndecl))) == integer_type_node && flag_isoc99) { /* Hack. We don't want the middle-end to warn that this return is unreachable, so we mark its location as special. Using UNKNOWN_LOCATION has the problem that it gets clobbered in annotate_one_with_locus. A cleaner solution might be to ensure ! should_carry_locus_p (stmt), but that needs a flag. */ c_finish_return (BUILTINS_LOCATION, integer_zero_node, NULL_TREE); } /* Tie off the statement tree for this function. */ DECL_SAVED_TREE (fndecl) = pop_stmt_list (DECL_SAVED_TREE (fndecl)); finish_fname_decls (); /* Complain if there's no return statement only if option specified on command line. */ if (warn_return_type > 0 && TREE_CODE (TREE_TYPE (TREE_TYPE (fndecl))) != VOID_TYPE && !current_function_returns_value && !current_function_returns_null /* Don't complain if we are no-return. */ && !current_function_returns_abnormally /* Don't complain if we are declared noreturn. */ && !TREE_THIS_VOLATILE (fndecl) /* Don't warn for main(). */ && !MAIN_NAME_P (DECL_NAME (fndecl)) /* Or if they didn't actually specify a return type. */ && !C_FUNCTION_IMPLICIT_INT (fndecl) /* Normally, with -Wreturn-type, flow will complain, but we might optimize out static functions. */ && !TREE_PUBLIC (fndecl) && targetm.warn_func_return (fndecl) && warning (OPT_Wreturn_type, "no return statement in function returning non-void")) TREE_NO_WARNING (fndecl) = 1; /* Complain about parameters that are only set, but never otherwise used. */ if (warn_unused_but_set_parameter) { tree decl; for (decl = DECL_ARGUMENTS (fndecl); decl; decl = DECL_CHAIN (decl)) if (TREE_USED (decl) && TREE_CODE (decl) == PARM_DECL && !DECL_READ_P (decl) && DECL_NAME (decl) && !DECL_ARTIFICIAL (decl) && !TREE_NO_WARNING (decl)) warning_at (DECL_SOURCE_LOCATION (decl), OPT_Wunused_but_set_parameter, "parameter %qD set but not used", decl); } /* Complain about locally defined typedefs that are not used in this function. */ maybe_warn_unused_local_typedefs (); /* Possibly warn about unused parameters. */ if (warn_unused_parameter) do_warn_unused_parameter (fndecl); /* Store the end of the function, so that we get good line number info for the epilogue. */ cfun->function_end_locus = end_loc; /* Finalize the ELF visibility for the function. */ c_determine_visibility (fndecl); /* For GNU C extern inline functions disregard inline limits. */ if (DECL_EXTERNAL (fndecl) && DECL_DECLARED_INLINE_P (fndecl) && (flag_gnu89_inline || lookup_attribute ("gnu_inline", DECL_ATTRIBUTES (fndecl)))) DECL_DISREGARD_INLINE_LIMITS (fndecl) = 1; /* Genericize before inlining. Delay genericizing nested functions until their parent function is genericized. Since finalizing requires GENERIC, delay that as well. */ if (DECL_INITIAL (fndecl) && DECL_INITIAL (fndecl) != error_mark_node && !undef_nested_function) { if (!decl_function_context (fndecl)) { invoke_plugin_callbacks (PLUGIN_PRE_GENERICIZE, fndecl); c_genericize (fndecl); /* ??? Objc emits functions after finalizing the compilation unit. This should be cleaned up later and this conditional removed. */ if (symtab->global_info_ready) { cgraph_node::add_new_function (fndecl, false); return; } cgraph_node::finalize_function (fndecl, false); } else { /* Register this function with cgraph just far enough to get it added to our parent's nested function list. Handy, since the C front end doesn't have such a list. */ (void) cgraph_node::get_create (fndecl); } } if (!decl_function_context (fndecl)) undef_nested_function = false; if (cfun->language != NULL) { ggc_free (cfun->language); cfun->language = NULL; } /* We're leaving the context of this function, so zap cfun. It's still in DECL_STRUCT_FUNCTION, and we'll restore it in tree_rest_of_compilation. */ set_cfun (NULL); invoke_plugin_callbacks (PLUGIN_FINISH_PARSE_FUNCTION, current_function_decl); current_function_decl = NULL; } /* Check the declarations given in a for-loop for satisfying the C99 constraints. If exactly one such decl is found, return it. LOC is the location of the opening parenthesis of the for loop. The last parameter allows you to control the "for loop initial declarations are only allowed in C99 mode". Normally, you should pass flag_isoc99 as that parameter. But in some cases (Objective-C foreach loop, for example) we want to run the checks in this function even if not in C99 mode, so we allow the caller to turn off the error about not being in C99 mode. */ tree check_for_loop_decls (location_t loc, bool turn_off_iso_c99_error) { struct c_binding *b; tree one_decl = NULL_TREE; int n_decls = 0; if (!turn_off_iso_c99_error) { static bool hint = true; /* If we get here, declarations have been used in a for loop without the C99 for loop scope. This doesn't make much sense, so don't allow it. */ error_at (loc, "%<for%> loop initial declarations " "are only allowed in C99 or C11 mode"); if (hint) { inform (loc, "use option %<-std=c99%>, %<-std=gnu99%>, %<-std=c11%> or " "%<-std=gnu11%> to compile your code"); hint = false; } return NULL_TREE; } else pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support %<for%> loop " "initial declarations"); /* C99 subclause 6.8.5 paragraph 3: [#3] The declaration part of a for statement shall only declare identifiers for objects having storage class auto or register. It isn't clear whether, in this sentence, "identifiers" binds to "shall only declare" or to "objects" - that is, whether all identifiers declared must be identifiers for objects, or whether the restriction only applies to those that are. (A question on this in comp.std.c in November 2000 received no answer.) We implement the strictest interpretation, to avoid creating an extension which later causes problems. */ for (b = current_scope->bindings; b; b = b->prev) { tree id = b->id; tree decl = b->decl; if (!id) continue; switch (TREE_CODE (decl)) { case VAR_DECL: { location_t decl_loc = DECL_SOURCE_LOCATION (decl); if (TREE_STATIC (decl)) error_at (decl_loc, "declaration of static variable %qD in %<for%> loop " "initial declaration", decl); else if (DECL_EXTERNAL (decl)) error_at (decl_loc, "declaration of %<extern%> variable %qD in %<for%> loop " "initial declaration", decl); } break; case RECORD_TYPE: error_at (loc, "%<struct %E%> declared in %<for%> loop initial " "declaration", id); break; case UNION_TYPE: error_at (loc, "%<union %E%> declared in %<for%> loop initial declaration", id); break; case ENUMERAL_TYPE: error_at (loc, "%<enum %E%> declared in %<for%> loop " "initial declaration", id); break; default: error_at (loc, "declaration of non-variable " "%qD in %<for%> loop initial declaration", decl); } n_decls++; one_decl = decl; } return n_decls == 1 ? one_decl : NULL_TREE; } /* Save and reinitialize the variables used during compilation of a C function. */ void c_push_function_context (void) { struct language_function *p = cfun->language; /* cfun->language might have been already allocated by the use of -Wunused-local-typedefs. In that case, just re-use it. */ if (p == NULL) cfun->language = p = ggc_cleared_alloc<language_function> (); p->base.x_stmt_tree = c_stmt_tree; c_stmt_tree.x_cur_stmt_list = vec_safe_copy (c_stmt_tree.x_cur_stmt_list); p->x_break_label = c_break_label; p->x_cont_label = c_cont_label; p->x_switch_stack = c_switch_stack; p->arg_info = current_function_arg_info; p->returns_value = current_function_returns_value; p->returns_null = current_function_returns_null; p->returns_abnormally = current_function_returns_abnormally; p->warn_about_return_type = warn_about_return_type; push_function_context (); } /* Restore the variables used during compilation of a C function. */ void c_pop_function_context (void) { struct language_function *p; pop_function_context (); p = cfun->language; /* When -Wunused-local-typedefs is in effect, cfun->languages is used to store data throughout the life time of the current cfun, So don't deallocate it. */ if (!warn_unused_local_typedefs) cfun->language = NULL; if (DECL_STRUCT_FUNCTION (current_function_decl) == 0 && DECL_SAVED_TREE (current_function_decl) == NULL_TREE) { /* Stop pointing to the local nodes about to be freed. */ /* But DECL_INITIAL must remain nonzero so we know this was an actual function definition. */ DECL_INITIAL (current_function_decl) = error_mark_node; DECL_ARGUMENTS (current_function_decl) = NULL_TREE; } c_stmt_tree = p->base.x_stmt_tree; p->base.x_stmt_tree.x_cur_stmt_list = NULL; c_break_label = p->x_break_label; c_cont_label = p->x_cont_label; c_switch_stack = p->x_switch_stack; current_function_arg_info = p->arg_info; current_function_returns_value = p->returns_value; current_function_returns_null = p->returns_null; current_function_returns_abnormally = p->returns_abnormally; warn_about_return_type = p->warn_about_return_type; } /* The functions below are required for functionality of doing function at once processing in the C front end. Currently these functions are not called from anywhere in the C front end, but as these changes continue, that will change. */ /* Returns the stmt_tree (if any) to which statements are currently being added. If there is no active statement-tree, NULL is returned. */ stmt_tree current_stmt_tree (void) { return &c_stmt_tree; } /* Return the global value of T as a symbol. */ tree identifier_global_value (tree t) { struct c_binding *b; for (b = I_SYMBOL_BINDING (t); b; b = b->shadowed) if (B_IN_FILE_SCOPE (b) || B_IN_EXTERNAL_SCOPE (b)) return b->decl; return NULL_TREE; } /* Return the global value of tag T as a symbol. */ tree identifier_global_tag (tree t) { struct c_binding *b; for (b = I_TAG_BINDING (t); b; b = b->shadowed) if (B_IN_FILE_SCOPE (b) || B_IN_EXTERNAL_SCOPE (b)) return b->decl; return NULL_TREE; } /* Returns true if NAME refers to a built-in function or function-like operator. */ bool names_builtin_p (const char *name) { tree id = get_identifier (name); if (tree decl = identifier_global_value (id)) return TREE_CODE (decl) == FUNCTION_DECL && DECL_IS_BUILTIN (decl); /* Also detect common reserved C words that aren't strictly built-in functions. */ switch (C_RID_CODE (id)) { case RID_BUILTIN_CONVERTVECTOR: case RID_BUILTIN_HAS_ATTRIBUTE: case RID_BUILTIN_SHUFFLE: case RID_CHOOSE_EXPR: case RID_OFFSETOF: case RID_TYPES_COMPATIBLE_P: return true; default: break; } return false; } /* In C, the only C-linkage public declaration is at file scope. */ tree c_linkage_bindings (tree name) { return identifier_global_value (name); } /* Record a builtin type for C. If NAME is non-NULL, it is the name used; otherwise the name is found in ridpointers from RID_INDEX. */ void record_builtin_type (enum rid rid_index, const char *name, tree type) { tree id, decl; if (name == 0) id = ridpointers[(int) rid_index]; else id = get_identifier (name); decl = build_decl (UNKNOWN_LOCATION, TYPE_DECL, id, type); pushdecl (decl); if (debug_hooks->type_decl) debug_hooks->type_decl (decl, false); } /* Build the void_list_node (void_type_node having been created). */ tree build_void_list_node (void) { tree t = build_tree_list (NULL_TREE, void_type_node); return t; } /* Return a c_parm structure with the given SPECS, ATTRS and DECLARATOR. */ struct c_parm * build_c_parm (struct c_declspecs *specs, tree attrs, struct c_declarator *declarator, location_t loc) { struct c_parm *ret = XOBNEW (&parser_obstack, struct c_parm); ret->specs = specs; ret->attrs = attrs; ret->declarator = declarator; ret->loc = loc; return ret; } /* Return a declarator with nested attributes. TARGET is the inner declarator to which these attributes apply. ATTRS are the attributes. */ struct c_declarator * build_attrs_declarator (tree attrs, struct c_declarator *target) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_attrs; ret->declarator = target; ret->u.attrs = attrs; return ret; } /* Return a declarator for a function with arguments specified by ARGS and return type specified by TARGET. */ struct c_declarator * build_function_declarator (struct c_arg_info *args, struct c_declarator *target) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_function; ret->declarator = target; ret->u.arg_info = args; return ret; } /* Return a declarator for the identifier IDENT (which may be NULL_TREE for an abstract declarator). */ struct c_declarator * build_id_declarator (tree ident) { struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); ret->kind = cdk_id; ret->declarator = 0; ret->u.id.id = ident; ret->u.id.attrs = NULL_TREE; /* Default value - may get reset to a more precise location. */ ret->id_loc = input_location; return ret; } /* Return something to represent absolute declarators containing a *. TARGET is the absolute declarator that the * contains. TYPE_QUALS_ATTRS is a structure for type qualifiers and attributes to apply to the pointer type. */ struct c_declarator * make_pointer_declarator (struct c_declspecs *type_quals_attrs, struct c_declarator *target) { tree attrs; int quals = 0; struct c_declarator *itarget = target; struct c_declarator *ret = XOBNEW (&parser_obstack, struct c_declarator); if (type_quals_attrs) { attrs = type_quals_attrs->attrs; quals = quals_from_declspecs (type_quals_attrs); if (attrs != NULL_TREE) itarget = build_attrs_declarator (attrs, target); } ret->kind = cdk_pointer; ret->declarator = itarget; ret->u.pointer_quals = quals; return ret; } /* Return a pointer to a structure for an empty list of declaration specifiers. */ struct c_declspecs * build_null_declspecs (void) { struct c_declspecs *ret = XOBNEW (&parser_obstack, struct c_declspecs); memset (ret, 0, sizeof *ret); ret->align_log = -1; ret->typespec_word = cts_none; ret->storage_class = csc_none; ret->expr_const_operands = true; ret->typespec_kind = ctsk_none; ret->address_space = ADDR_SPACE_GENERIC; return ret; } /* Add the address space ADDRSPACE to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_addrspace (location_t location, struct c_declspecs *specs, addr_space_t as) { specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; specs->non_std_attrs_seen_p = true; if (!ADDR_SPACE_GENERIC_P (specs->address_space) && specs->address_space != as) error ("incompatible address space qualifiers %qs and %qs", c_addr_space_name (as), c_addr_space_name (specs->address_space)); else { specs->address_space = as; specs->locations[cdw_address_space] = location; } return specs; } /* Add the type qualifier QUAL to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_qual (location_t loc, struct c_declspecs *specs, tree qual) { enum rid i; bool dupe = false; specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; specs->non_std_attrs_seen_p = true; gcc_assert (TREE_CODE (qual) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (qual)); i = C_RID_CODE (qual); location_t prev_loc = UNKNOWN_LOCATION; switch (i) { case RID_CONST: dupe = specs->const_p; specs->const_p = true; prev_loc = specs->locations[cdw_const]; specs->locations[cdw_const] = loc; break; case RID_VOLATILE: dupe = specs->volatile_p; specs->volatile_p = true; prev_loc = specs->locations[cdw_volatile]; specs->locations[cdw_volatile] = loc; break; case RID_RESTRICT: dupe = specs->restrict_p; specs->restrict_p = true; prev_loc = specs->locations[cdw_restrict]; specs->locations[cdw_restrict] = loc; break; case RID_ATOMIC: dupe = specs->atomic_p; specs->atomic_p = true; prev_loc = specs->locations[cdw_atomic]; specs->locations[cdw_atomic] = loc; break; default: gcc_unreachable (); } if (dupe) { bool warned = pedwarn_c90 (loc, OPT_Wpedantic, "duplicate %qE declaration specifier", qual); if (!warned && warn_duplicate_decl_specifier && prev_loc >= RESERVED_LOCATION_COUNT && !from_macro_expansion_at (prev_loc) && !from_macro_expansion_at (loc)) warning_at (loc, OPT_Wduplicate_decl_specifier, "duplicate %qE declaration specifier", qual); } return specs; } /* Add the type specifier TYPE to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_type (location_t loc, struct c_declspecs *specs, struct c_typespec spec) { tree type = spec.spec; specs->non_sc_seen_p = true; specs->declspecs_seen_p = true; specs->non_std_attrs_seen_p = true; specs->typespec_kind = spec.kind; if (TREE_DEPRECATED (type)) specs->deprecated_p = true; /* Handle type specifier keywords. */ if (TREE_CODE (type) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (type) && C_RID_CODE (type) != RID_CXX_COMPAT_WARN) { enum rid i = C_RID_CODE (type); if (specs->type) { error_at (loc, "two or more data types in declaration specifiers"); return specs; } if ((int) i <= (int) RID_LAST_MODIFIER) { /* "long", "short", "signed", "unsigned", "_Complex" or "_Sat". */ bool dupe = false; switch (i) { case RID_LONG: if (specs->long_long_p) { error_at (loc, "%<long long long%> is too long for GCC"); break; } if (specs->long_p) { if (specs->typespec_word == cts_double) { error_at (loc, ("both %<long long%> and %<double%> in " "declaration specifiers")); break; } pedwarn_c90 (loc, OPT_Wlong_long, "ISO C90 does not support %<long long%>"); specs->long_long_p = 1; specs->locations[cdw_long_long] = loc; break; } if (specs->short_p) error_at (loc, ("both %<long%> and %<short%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<long%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<long%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int_n) error_at (loc, ("both %<long%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<long%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<long%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<long%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_floatn_nx) error_at (loc, ("both %<long%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<long%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<long%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<long%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->long_p = true; specs->locations[cdw_long] = loc; } break; case RID_SHORT: dupe = specs->short_p; if (specs->long_p) error_at (loc, ("both %<long%> and %<short%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<short%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<short%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int_n) error_at (loc, ("both %<short%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<short%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<short%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<short%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<short%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_floatn_nx) error_at (loc, ("both %<short%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<short%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<short%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<short%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->short_p = true; specs->locations[cdw_short] = loc; } break; case RID_SIGNED: dupe = specs->signed_p; if (specs->unsigned_p) error_at (loc, ("both %<signed%> and %<unsigned%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<signed%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<signed%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<signed%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<signed%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<signed%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_floatn_nx) error_at (loc, ("both %<signed%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<signed%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<signed%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<signed%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->signed_p = true; specs->locations[cdw_signed] = loc; } break; case RID_UNSIGNED: dupe = specs->unsigned_p; if (specs->signed_p) error_at (loc, ("both %<signed%> and %<unsigned%> in " "declaration specifiers")); else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<unsigned%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<unsigned%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<unsigned%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<unsigned%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<unsigned%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_floatn_nx) error_at (loc, ("both %<unsigned%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<unsigned%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<unsigned%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<unsigned%> and %<_Decimal128%> in " "declaration specifiers")); else { specs->unsigned_p = true; specs->locations[cdw_unsigned] = loc; } break; case RID_COMPLEX: dupe = specs->complex_p; if (!in_system_header_at (loc)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support complex types"); if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<complex%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<complex%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<complex%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<complex%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<complex%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<complex%> and %<_Decimal128%> in " "declaration specifiers")); else if (specs->typespec_word == cts_fract) error_at (loc, ("both %<complex%> and %<_Fract%> in " "declaration specifiers")); else if (specs->typespec_word == cts_accum) error_at (loc, ("both %<complex%> and %<_Accum%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<complex%> and %<_Sat%> in " "declaration specifiers")); else { specs->complex_p = true; specs->locations[cdw_complex] = loc; } break; case RID_SAT: dupe = specs->saturating_p; pedwarn (loc, OPT_Wpedantic, "ISO C does not support saturating types"); if (specs->typespec_word == cts_int_n) { error_at (loc, ("both %<_Sat%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); } else if (specs->typespec_word == cts_auto_type) error_at (loc, ("both %<_Sat%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->typespec_word == cts_void) error_at (loc, ("both %<_Sat%> and %<void%> in " "declaration specifiers")); else if (specs->typespec_word == cts_bool) error_at (loc, ("both %<_Sat%> and %<_Bool%> in " "declaration specifiers")); else if (specs->typespec_word == cts_char) error_at (loc, ("both %<_Sat%> and %<char%> in " "declaration specifiers")); else if (specs->typespec_word == cts_int) error_at (loc, ("both %<_Sat%> and %<int%> in " "declaration specifiers")); else if (specs->typespec_word == cts_float) error_at (loc, ("both %<_Sat%> and %<float%> in " "declaration specifiers")); else if (specs->typespec_word == cts_double) error_at (loc, ("both %<_Sat%> and %<double%> in " "declaration specifiers")); else if (specs->typespec_word == cts_floatn_nx) error_at (loc, ("both %<_Sat%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->typespec_word == cts_dfloat32) error_at (loc, ("both %<_Sat%> and %<_Decimal32%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat64) error_at (loc, ("both %<_Sat%> and %<_Decimal64%> in " "declaration specifiers")); else if (specs->typespec_word == cts_dfloat128) error_at (loc, ("both %<_Sat%> and %<_Decimal128%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<_Sat%> and %<complex%> in " "declaration specifiers")); else { specs->saturating_p = true; specs->locations[cdw_saturating] = loc; } break; default: gcc_unreachable (); } if (dupe) error_at (loc, "duplicate %qE", type); return specs; } else { /* "void", "_Bool", "char", "int", "float", "double", "_FloatN", "_FloatNx", "_Decimal32", "__intN", "_Decimal64", "_Decimal128", "_Fract", "_Accum" or "__auto_type". */ if (specs->typespec_word != cts_none) { error_at (loc, "two or more data types in declaration specifiers"); return specs; } switch (i) { case RID_AUTO_TYPE: if (specs->long_p) error_at (loc, ("both %<long%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<__auto_type%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<__auto_type%> in " "declaration specifiers")); else { specs->typespec_word = cts_auto_type; specs->locations[cdw_typespec] = loc; } return specs; case RID_INT_N_0: case RID_INT_N_1: case RID_INT_N_2: case RID_INT_N_3: specs->int_n_idx = i - RID_INT_N_0; if (!in_system_header_at (input_location) /* If the INT_N type ends in "__", and so is of the format "__intN__", don't pedwarn. */ && (strncmp (IDENTIFIER_POINTER (type) + (IDENTIFIER_LENGTH (type) - 2), "__", 2) != 0)) pedwarn (loc, OPT_Wpedantic, "ISO C does not support %<__int%d%> types", int_n_data[specs->int_n_idx].bitsize); if (specs->long_p) error_at (loc, ("both %<__int%d%> and %<long%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<__int%d%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (specs->short_p) error_at (loc, ("both %<__int%d%> and %<short%> in " "declaration specifiers"), int_n_data[specs->int_n_idx].bitsize); else if (! int_n_enabled_p[specs->int_n_idx]) { specs->typespec_word = cts_int_n; error_at (loc, "%<__int%d%> is not supported on this target", int_n_data[specs->int_n_idx].bitsize); } else { specs->typespec_word = cts_int_n; specs->locations[cdw_typespec] = loc; } return specs; case RID_VOID: if (specs->long_p) error_at (loc, ("both %<long%> and %<void%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<void%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<void%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<void%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<void%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<void%> in " "declaration specifiers")); else { specs->typespec_word = cts_void; specs->locations[cdw_typespec] = loc; } return specs; case RID_BOOL: if (!in_system_header_at (loc)) pedwarn_c90 (loc, OPT_Wpedantic, "ISO C90 does not support boolean types"); if (specs->long_p) error_at (loc, ("both %<long%> and %<_Bool%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<_Bool%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<_Bool%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<_Bool%> in " "declaration specifiers")); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %<_Bool%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<_Bool%> in " "declaration specifiers")); else { specs->typespec_word = cts_bool; specs->locations[cdw_typespec] = loc; } return specs; case RID_CHAR: if (specs->long_p) error_at (loc, ("both %<long%> and %<char%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<char%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<char%> in " "declaration specifiers")); else { specs->typespec_word = cts_char; specs->locations[cdw_typespec] = loc; } return specs; case RID_INT: if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<int%> in " "declaration specifiers")); else { specs->typespec_word = cts_int; specs->locations[cdw_typespec] = loc; } return specs; case RID_FLOAT: if (specs->long_p) error_at (loc, ("both %<long%> and %<float%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<float%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<float%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<float%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<float%> in " "declaration specifiers")); else { specs->typespec_word = cts_float; specs->locations[cdw_typespec] = loc; } return specs; case RID_DOUBLE: if (specs->long_long_p) error_at (loc, ("both %<long long%> and %<double%> in " "declaration specifiers")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<double%> in " "declaration specifiers")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<double%> in " "declaration specifiers")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<double%> in " "declaration specifiers")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<double%> in " "declaration specifiers")); else { specs->typespec_word = cts_double; specs->locations[cdw_typespec] = loc; } return specs; CASE_RID_FLOATN_NX: specs->floatn_nx_idx = i - RID_FLOATN_NX_FIRST; if (!in_system_header_at (input_location)) pedwarn (loc, OPT_Wpedantic, "ISO C does not support the %<_Float%d%s%> type", floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); if (specs->long_p) error_at (loc, ("both %<long%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->short_p) error_at (loc, ("both %<short%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %<_Float%d%s%> in " "declaration specifiers"), floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); else if (FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx) == NULL_TREE) { specs->typespec_word = cts_floatn_nx; error_at (loc, "%<_Float%d%s%> is not supported on this target", floatn_nx_types[specs->floatn_nx_idx].n, (floatn_nx_types[specs->floatn_nx_idx].extended ? "x" : "")); } else { specs->typespec_word = cts_floatn_nx; specs->locations[cdw_typespec] = loc; } return specs; case RID_DFLOAT32: case RID_DFLOAT64: case RID_DFLOAT128: { const char *str; if (i == RID_DFLOAT32) str = "_Decimal32"; else if (i == RID_DFLOAT64) str = "_Decimal64"; else str = "_Decimal128"; if (specs->long_long_p) error_at (loc, ("both %<long long%> and %qs in " "declaration specifiers"), str); if (specs->long_p) error_at (loc, ("both %<long%> and %qs in " "declaration specifiers"), str); else if (specs->short_p) error_at (loc, ("both %<short%> and %qs in " "declaration specifiers"), str); else if (specs->signed_p) error_at (loc, ("both %<signed%> and %qs in " "declaration specifiers"), str); else if (specs->unsigned_p) error_at (loc, ("both %<unsigned%> and %qs in " "declaration specifiers"), str); else if (specs->complex_p) error_at (loc, ("both %<complex%> and %qs in " "declaration specifiers"), str); else if (specs->saturating_p) error_at (loc, ("both %<_Sat%> and %qs in " "declaration specifiers"), str); else if (i == RID_DFLOAT32) specs->typespec_word = cts_dfloat32; else if (i == RID_DFLOAT64) specs->typespec_word = cts_dfloat64; else specs->typespec_word = cts_dfloat128; specs->locations[cdw_typespec] = loc; } if (!targetm.decimal_float_supported_p ()) error_at (loc, ("decimal floating-point not supported " "for this target")); pedwarn_c11 (loc, OPT_Wpedantic, "ISO C does not support decimal floating-point " "before C2X"); return specs; case RID_FRACT: case RID_ACCUM: { const char *str; if (i == RID_FRACT) str = "_Fract"; else str = "_Accum"; if (specs->complex_p) error_at (loc, ("both %<complex%> and %qs in " "declaration specifiers"), str); else if (i == RID_FRACT) specs->typespec_word = cts_fract; else specs->typespec_word = cts_accum; specs->locations[cdw_typespec] = loc; } if (!targetm.fixed_point_supported_p ()) error_at (loc, "fixed-point types not supported for this target"); pedwarn (loc, OPT_Wpedantic, "ISO C does not support fixed-point types"); return specs; default: /* ObjC reserved word "id", handled below. */ break; } } } /* Now we have a typedef (a TYPE_DECL node), an identifier (some form of ObjC type, cases such as "int" and "long" being handled above), a TYPE (struct, union, enum and typeof specifiers) or an ERROR_MARK. In none of these cases may there have previously been any type specifiers. */ if (specs->type || specs->typespec_word != cts_none || specs->long_p || specs->short_p || specs->signed_p || specs->unsigned_p || specs->complex_p) error_at (loc, "two or more data types in declaration specifiers"); else if (TREE_CODE (type) == TYPE_DECL) { if (TREE_TYPE (type) == error_mark_node) ; /* Allow the type to default to int to avoid cascading errors. */ else { specs->type = TREE_TYPE (type); specs->decl_attr = DECL_ATTRIBUTES (type); specs->typedef_p = true; specs->explicit_signed_p = C_TYPEDEF_EXPLICITLY_SIGNED (type); specs->locations[cdw_typedef] = loc; /* If this typedef name is defined in a struct, then a C++ lookup would return a different value. */ if (warn_cxx_compat && I_SYMBOL_BINDING (DECL_NAME (type))->in_struct) warning_at (loc, OPT_Wc___compat, "C++ lookup of %qD would return a field, not a type", type); /* If we are parsing a struct, record that a struct field used a typedef. */ if (warn_cxx_compat && struct_parse_info != NULL) struct_parse_info->typedefs_seen.safe_push (type); } } else if (TREE_CODE (type) == IDENTIFIER_NODE) { tree t = lookup_name (type); if (!t || TREE_CODE (t) != TYPE_DECL) error_at (loc, "%qE fails to be a typedef or built in type", type); else if (TREE_TYPE (t) == error_mark_node) ; else { specs->type = TREE_TYPE (t); specs->locations[cdw_typespec] = loc; } } else { if (TREE_CODE (type) != ERROR_MARK && spec.kind == ctsk_typeof) { specs->typedef_p = true; specs->locations[cdw_typedef] = loc; if (spec.expr) { if (specs->expr) specs->expr = build2 (COMPOUND_EXPR, TREE_TYPE (spec.expr), specs->expr, spec.expr); else specs->expr = spec.expr; specs->expr_const_operands &= spec.expr_const_operands; } } specs->type = type; } return specs; } /* Add the storage class specifier or function specifier SCSPEC to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_scspec (location_t loc, struct c_declspecs *specs, tree scspec) { enum rid i; enum c_storage_class n = csc_none; bool dupe = false; specs->declspecs_seen_p = true; specs->non_std_attrs_seen_p = true; gcc_assert (TREE_CODE (scspec) == IDENTIFIER_NODE && C_IS_RESERVED_WORD (scspec)); i = C_RID_CODE (scspec); if (specs->non_sc_seen_p) warning (OPT_Wold_style_declaration, "%qE is not at beginning of declaration", scspec); switch (i) { case RID_INLINE: /* C99 permits duplicate inline. Although of doubtful utility, it seems simplest to permit it in gnu89 mode as well, as there is also little utility in maintaining this as a difference between gnu89 and C99 inline. */ dupe = false; specs->inline_p = true; specs->locations[cdw_inline] = loc; break; case RID_NORETURN: /* Duplicate _Noreturn is permitted. */ dupe = false; specs->noreturn_p = true; specs->locations[cdw_noreturn] = loc; break; case RID_THREAD: dupe = specs->thread_p; if (specs->storage_class == csc_auto) error ("%qE used with %<auto%>", scspec); else if (specs->storage_class == csc_register) error ("%qE used with %<register%>", scspec); else if (specs->storage_class == csc_typedef) error ("%qE used with %<typedef%>", scspec); else { specs->thread_p = true; specs->thread_gnu_p = (strcmp (IDENTIFIER_POINTER (scspec), "__thread") == 0); /* A diagnostic is not required for the use of this identifier in the implementation namespace; only diagnose it for the C11 spelling because of existing code using the other spelling. */ if (!specs->thread_gnu_p) { if (flag_isoc99) pedwarn_c99 (loc, OPT_Wpedantic, "ISO C99 does not support %qE", scspec); else pedwarn_c99 (loc, OPT_Wpedantic, "ISO C90 does not support %qE", scspec); } specs->locations[cdw_thread] = loc; } break; case RID_AUTO: n = csc_auto; break; case RID_EXTERN: n = csc_extern; /* Diagnose "__thread extern". */ if (specs->thread_p && specs->thread_gnu_p) error ("%<__thread%> before %<extern%>"); break; case RID_REGISTER: n = csc_register; break; case RID_STATIC: n = csc_static; /* Diagnose "__thread static". */ if (specs->thread_p && specs->thread_gnu_p) error ("%<__thread%> before %<static%>"); break; case RID_TYPEDEF: n = csc_typedef; break; default: gcc_unreachable (); } if (n != csc_none && n == specs->storage_class) dupe = true; if (dupe) { if (i == RID_THREAD) error ("duplicate %<_Thread_local%> or %<__thread%>"); else error ("duplicate %qE", scspec); } if (n != csc_none) { if (specs->storage_class != csc_none && n != specs->storage_class) { error ("multiple storage classes in declaration specifiers"); } else { specs->storage_class = n; specs->locations[cdw_storage_class] = loc; if (n != csc_extern && n != csc_static && specs->thread_p) { error ("%qs used with %qE", specs->thread_gnu_p ? "__thread" : "_Thread_local", scspec); specs->thread_p = false; } } } return specs; } /* Add the attributes ATTRS to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_attrs (location_t loc, struct c_declspecs *specs, tree attrs) { specs->attrs = chainon (attrs, specs->attrs); specs->locations[cdw_attributes] = loc; specs->declspecs_seen_p = true; /* In the case of standard attributes at the start of the declaration, the caller will reset this. */ specs->non_std_attrs_seen_p = true; return specs; } /* Add an _Alignas specifier (expression ALIGN, or type whose alignment is ALIGN) to the declaration specifiers SPECS, returning SPECS. */ struct c_declspecs * declspecs_add_alignas (location_t loc, struct c_declspecs *specs, tree align) { specs->alignas_p = true; specs->locations[cdw_alignas] = loc; if (align == error_mark_node) return specs; /* Only accept the alignment if it's valid and greater than the current one. Zero is invalid but by C11 required to be silently ignored. */ int align_log = check_user_alignment (align, false, /* warn_zero = */false); if (align_log > specs->align_log) specs->align_log = align_log; return specs; } /* Combine "long", "short", "signed", "unsigned" and "_Complex" type specifiers with any other type specifier to determine the resulting type. This is where ISO C checks on complex types are made, since "_Complex long" is a prefix of the valid ISO C type "_Complex long double". Also apply postfix standard attributes to modify the type. */ struct c_declspecs * finish_declspecs (struct c_declspecs *specs) { /* If a type was specified as a whole, we have no modifiers and are done. */ if (specs->type != NULL_TREE) { gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); /* Set a dummy type. */ if (TREE_CODE (specs->type) == ERROR_MARK) specs->type = integer_type_node; goto handle_postfix_attrs; } /* If none of "void", "_Bool", "char", "int", "float" or "double" has been specified, treat it as "int" unless "_Complex" is present and there are no other specifiers. If we just have "_Complex", it is equivalent to "_Complex double", but e.g. "_Complex short" is equivalent to "_Complex short int". */ if (specs->typespec_word == cts_none) { if (specs->saturating_p) { error_at (specs->locations[cdw_saturating], "%<_Sat%> is used without %<_Fract%> or %<_Accum%>"); if (!targetm.fixed_point_supported_p ()) error_at (specs->locations[cdw_saturating], "fixed-point types not supported for this target"); specs->typespec_word = cts_fract; } else if (specs->long_p || specs->short_p || specs->signed_p || specs->unsigned_p) { specs->typespec_word = cts_int; } else if (specs->complex_p) { specs->typespec_word = cts_double; pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support plain %<complex%> meaning " "%<double complex%>"); } else { specs->typespec_word = cts_int; specs->default_int_p = true; /* We don't diagnose this here because grokdeclarator will give more specific diagnostics according to whether it is a function definition. */ } } /* If "signed" was specified, record this to distinguish "int" and "signed int" in the case of a bit-field with -funsigned-bitfields. */ specs->explicit_signed_p = specs->signed_p; /* Now compute the actual type. */ switch (specs->typespec_word) { case cts_auto_type: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); /* Type to be filled in later. */ if (specs->postfix_attrs) error ("%<__auto_type%> followed by %<[[]]%> attributes"); break; case cts_void: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); specs->type = void_type_node; break; case cts_bool: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); specs->type = boolean_type_node; break; case cts_char: gcc_assert (!specs->long_p && !specs->short_p); gcc_assert (!(specs->signed_p && specs->unsigned_p)); if (specs->signed_p) specs->type = signed_char_type_node; else if (specs->unsigned_p) specs->type = unsigned_char_type_node; else specs->type = char_type_node; if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_int_n: gcc_assert (!specs->long_p && !specs->short_p && !specs->long_long_p); gcc_assert (!(specs->signed_p && specs->unsigned_p)); if (! int_n_enabled_p[specs->int_n_idx]) specs->type = integer_type_node; else specs->type = (specs->unsigned_p ? int_n_trees[specs->int_n_idx].unsigned_type : int_n_trees[specs->int_n_idx].signed_type); if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_int: gcc_assert (!(specs->long_p && specs->short_p)); gcc_assert (!(specs->signed_p && specs->unsigned_p)); if (specs->long_long_p) specs->type = (specs->unsigned_p ? long_long_unsigned_type_node : long_long_integer_type_node); else if (specs->long_p) specs->type = (specs->unsigned_p ? long_unsigned_type_node : long_integer_type_node); else if (specs->short_p) specs->type = (specs->unsigned_p ? short_unsigned_type_node : short_integer_type_node); else specs->type = (specs->unsigned_p ? unsigned_type_node : integer_type_node); if (specs->complex_p) { pedwarn (specs->locations[cdw_complex], OPT_Wpedantic, "ISO C does not support complex integer types"); specs->type = build_complex_type (specs->type); } break; case cts_float: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p); specs->type = (specs->complex_p ? complex_float_type_node : float_type_node); break; case cts_double: gcc_assert (!specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p); if (specs->long_p) { specs->type = (specs->complex_p ? complex_long_double_type_node : long_double_type_node); } else { specs->type = (specs->complex_p ? complex_double_type_node : double_type_node); } break; case cts_floatn_nx: gcc_assert (!specs->long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p); if (FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx) == NULL_TREE) specs->type = integer_type_node; else if (specs->complex_p) specs->type = COMPLEX_FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx); else specs->type = FLOATN_NX_TYPE_NODE (specs->floatn_nx_idx); break; case cts_dfloat32: case cts_dfloat64: case cts_dfloat128: gcc_assert (!specs->long_p && !specs->long_long_p && !specs->short_p && !specs->signed_p && !specs->unsigned_p && !specs->complex_p); if (!targetm.decimal_float_supported_p ()) specs->type = integer_type_node; else if (specs->typespec_word == cts_dfloat32) specs->type = dfloat32_type_node; else if (specs->typespec_word == cts_dfloat64) specs->type = dfloat64_type_node; else specs->type = dfloat128_type_node; break; case cts_fract: gcc_assert (!specs->complex_p); if (!targetm.fixed_point_supported_p ()) specs->type = integer_type_node; else if (specs->saturating_p) { if (specs->long_long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_long_fract_type_node : sat_long_long_fract_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_fract_type_node : sat_long_fract_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? sat_unsigned_short_fract_type_node : sat_short_fract_type_node; else specs->type = specs->unsigned_p ? sat_unsigned_fract_type_node : sat_fract_type_node; } else { if (specs->long_long_p) specs->type = specs->unsigned_p ? unsigned_long_long_fract_type_node : long_long_fract_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? unsigned_long_fract_type_node : long_fract_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? unsigned_short_fract_type_node : short_fract_type_node; else specs->type = specs->unsigned_p ? unsigned_fract_type_node : fract_type_node; } break; case cts_accum: gcc_assert (!specs->complex_p); if (!targetm.fixed_point_supported_p ()) specs->type = integer_type_node; else if (specs->saturating_p) { if (specs->long_long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_long_accum_type_node : sat_long_long_accum_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? sat_unsigned_long_accum_type_node : sat_long_accum_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? sat_unsigned_short_accum_type_node : sat_short_accum_type_node; else specs->type = specs->unsigned_p ? sat_unsigned_accum_type_node : sat_accum_type_node; } else { if (specs->long_long_p) specs->type = specs->unsigned_p ? unsigned_long_long_accum_type_node : long_long_accum_type_node; else if (specs->long_p) specs->type = specs->unsigned_p ? unsigned_long_accum_type_node : long_accum_type_node; else if (specs->short_p) specs->type = specs->unsigned_p ? unsigned_short_accum_type_node : short_accum_type_node; else specs->type = specs->unsigned_p ? unsigned_accum_type_node : accum_type_node; } break; default: gcc_unreachable (); } handle_postfix_attrs: if (specs->type != NULL) { specs->postfix_attrs = c_warn_type_attributes (specs->postfix_attrs); decl_attributes (&specs->type, specs->postfix_attrs, 0); specs->postfix_attrs = NULL_TREE; } return specs; } /* Perform final processing on one file scope's declarations (or the external scope's declarations), GLOBALS. */ static void c_write_global_declarations_1 (tree globals) { tree decl; bool reconsider; /* Process the decls in the order they were written. */ for (decl = globals; decl; decl = DECL_CHAIN (decl)) { /* Check for used but undefined static functions using the C standard's definition of "used", and set TREE_NO_WARNING so that check_global_declaration doesn't repeat the check. */ if (TREE_CODE (decl) == FUNCTION_DECL && DECL_INITIAL (decl) == NULL_TREE && DECL_EXTERNAL (decl) && !TREE_PUBLIC (decl)) { if (C_DECL_USED (decl)) { if (pedwarn (input_location, 0, "%q+F used but never defined", decl)) TREE_NO_WARNING (decl) = 1; } /* For -Wunused-function warn about unused static prototypes. */ else if (warn_unused_function && ! DECL_ARTIFICIAL (decl) && ! TREE_NO_WARNING (decl)) { if (warning (OPT_Wunused_function, "%q+F declared %<static%> but never defined", decl)) TREE_NO_WARNING (decl) = 1; } } wrapup_global_declaration_1 (decl); } do { reconsider = false; for (decl = globals; decl; decl = DECL_CHAIN (decl)) reconsider |= wrapup_global_declaration_2 (decl); } while (reconsider); } /* Preserve the external declarations scope across a garbage collect. */ static GTY(()) tree ext_block; /* Collect all references relevant to SOURCE_FILE. */ static void collect_all_refs (const char *source_file) { tree t; unsigned i; FOR_EACH_VEC_ELT (*all_translation_units, i, t) collect_ada_nodes (BLOCK_VARS (DECL_INITIAL (t)), source_file); collect_ada_nodes (BLOCK_VARS (ext_block), source_file); } /* Collect source file references at global level. */ static void collect_source_refs (void) { tree t; tree decls; tree decl; unsigned i; FOR_EACH_VEC_ELT (*all_translation_units, i, t) { decls = DECL_INITIAL (t); for (decl = BLOCK_VARS (decls); decl; decl = TREE_CHAIN (decl)) if (!DECL_IS_BUILTIN (decl)) collect_source_ref (DECL_SOURCE_FILE (decl)); } for (decl = BLOCK_VARS (ext_block); decl; decl = TREE_CHAIN (decl)) if (!DECL_IS_BUILTIN (decl)) collect_source_ref (DECL_SOURCE_FILE (decl)); } /* Perform any final parser cleanups and generate initial debugging information. */ void c_parse_final_cleanups (void) { tree t; unsigned i; /* We don't want to do this if generating a PCH. */ if (pch_file) return; timevar_stop (TV_PHASE_PARSING); timevar_start (TV_PHASE_DEFERRED); /* Do the Objective-C stuff. This is where all the Objective-C module stuff gets generated (symtab, class/protocol/selector lists etc). */ if (c_dialect_objc ()) objc_write_global_declarations (); /* Close the external scope. */ ext_block = pop_scope (); external_scope = 0; gcc_assert (!current_scope); /* Handle -fdump-ada-spec[-slim]. */ if (flag_dump_ada_spec || flag_dump_ada_spec_slim) { /* Build a table of files to generate specs for */ collect_source_ref (main_input_filename); if (!flag_dump_ada_spec_slim) collect_source_refs (); dump_ada_specs (collect_all_refs, NULL); } /* Process all file scopes in this compilation, and the external_scope, through wrapup_global_declarations. */ FOR_EACH_VEC_ELT (*all_translation_units, i, t) c_write_global_declarations_1 (BLOCK_VARS (DECL_INITIAL (t))); c_write_global_declarations_1 (BLOCK_VARS (ext_block)); timevar_stop (TV_PHASE_DEFERRED); timevar_start (TV_PHASE_PARSING); ext_block = NULL; } /* Register reserved keyword WORD as qualifier for address space AS. */ void c_register_addr_space (const char *word, addr_space_t as) { int rid = RID_FIRST_ADDR_SPACE + as; tree id; /* Address space qualifiers are only supported in C with GNU extensions enabled. */ if (c_dialect_objc () || flag_no_asm) return; id = get_identifier (word); C_SET_RID_CODE (id, rid); C_IS_RESERVED_WORD (id) = 1; ridpointers [rid] = id; } /* Return identifier to look up for omp declare reduction. */ tree c_omp_reduction_id (enum tree_code reduction_code, tree reduction_id) { const char *p = NULL; switch (reduction_code) { case PLUS_EXPR: p = "+"; break; case MULT_EXPR: p = "*"; break; case MINUS_EXPR: p = "-"; break; case BIT_AND_EXPR: p = "&"; break; case BIT_XOR_EXPR: p = "^"; break; case BIT_IOR_EXPR: p = "|"; break; case TRUTH_ANDIF_EXPR: p = "&&"; break; case TRUTH_ORIF_EXPR: p = "||"; break; case MIN_EXPR: p = "min"; break; case MAX_EXPR: p = "max"; break; default: break; } if (p == NULL) { if (TREE_CODE (reduction_id) != IDENTIFIER_NODE) return error_mark_node; p = IDENTIFIER_POINTER (reduction_id); } const char prefix[] = "omp declare reduction "; size_t lenp = sizeof (prefix); size_t len = strlen (p); char *name = XALLOCAVEC (char, lenp + len); memcpy (name, prefix, lenp - 1); memcpy (name + lenp - 1, p, len + 1); return get_identifier (name); } /* Lookup REDUCTION_ID in the current scope, or create an artificial VAR_DECL, bind it into the current scope and return it. */ tree c_omp_reduction_decl (tree reduction_id) { struct c_binding *b = I_SYMBOL_BINDING (reduction_id); if (b != NULL && B_IN_CURRENT_SCOPE (b)) return b->decl; tree decl = build_decl (BUILTINS_LOCATION, VAR_DECL, reduction_id, integer_type_node); DECL_ARTIFICIAL (decl) = 1; DECL_EXTERNAL (decl) = 1; TREE_STATIC (decl) = 1; TREE_PUBLIC (decl) = 0; bind (reduction_id, decl, current_scope, true, false, BUILTINS_LOCATION); return decl; } /* Lookup REDUCTION_ID in the first scope where it has entry for TYPE. */ tree c_omp_reduction_lookup (tree reduction_id, tree type) { struct c_binding *b = I_SYMBOL_BINDING (reduction_id); while (b) { tree t; for (t = DECL_INITIAL (b->decl); t; t = TREE_CHAIN (t)) if (comptypes (TREE_PURPOSE (t), type)) return TREE_VALUE (t); b = b->shadowed; } return error_mark_node; } /* Helper function called via walk_tree, to diagnose invalid #pragma omp declare reduction combiners or initializers. */ tree c_check_omp_declare_reduction_r (tree *tp, int *, void *data) { tree *vars = (tree *) data; if (SSA_VAR_P (*tp) && !DECL_ARTIFICIAL (*tp) && *tp != vars[0] && *tp != vars[1]) { location_t loc = DECL_SOURCE_LOCATION (vars[0]); if (strcmp (IDENTIFIER_POINTER (DECL_NAME (vars[0])), "omp_out") == 0) error_at (loc, "%<#pragma omp declare reduction%> combiner refers to " "variable %qD which is not %<omp_out%> nor %<omp_in%>", *tp); else error_at (loc, "%<#pragma omp declare reduction%> initializer refers " "to variable %qD which is not %<omp_priv%> nor " "%<omp_orig%>", *tp); return *tp; } return NULL_TREE; } bool c_check_in_current_scope (tree decl) { struct c_binding *b = I_SYMBOL_BINDING (DECL_NAME (decl)); return b != NULL && B_IN_CURRENT_SCOPE (b); } #include "gt-c-c-decl.h"
GB_unop__identity_fp64_uint8.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__identity_fp64_uint8 // op(A') function: GB_unop_tran__identity_fp64_uint8 // C type: double // A type: uint8_t // cast: double cij = (double) aij // unaryop: cij = aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ double z = (double) aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint8_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = (double) aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_FP64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__identity_fp64_uint8 ( double *Cx, // Cx and Ax may be aliased const uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint8_t aij = Ax [p] ; double z = (double) aij ; Cx [p] = z ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__identity_fp64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
NDArray.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #ifndef NDARRAY_H #define NDARRAY_H #include <initializer_list> #include <functional> #include <shape.h> #include "NativeOpExcutioner.h" #include <memory/Workspace.h> #include <indexing/NDIndex.h> #include <indexing/IndicesList.h> #include <graph/Intervals.h> #include <array/DataType.h> #include <stdint.h> #include <array/ArrayOptions.h> #include <array/ArrayType.h> #include <array/ResultSet.h> namespace nd4j { template<typename T> class ND4J_EXPORT NDArray; ND4J_EXPORT NDArray<float> operator-(const float, const NDArray<float>&); ND4J_EXPORT NDArray<float16> operator-(const float16, const NDArray<float16>&); ND4J_EXPORT NDArray<double> operator-(const double, const NDArray<double>&); ND4J_EXPORT NDArray<float> operator+(const float, const NDArray<float>&); ND4J_EXPORT NDArray<float16> operator+(const float16, const NDArray<float16>&); ND4J_EXPORT NDArray<double> operator+(const double, const NDArray<double>&); template<typename T> NDArray<T> mmul(const NDArray<T>&, const NDArray<T>&); template<typename T> class NDArray { protected: /** * if true then array doesn't own buffer and simply points to another's buffer */ bool _isView = false; /** * pointer on flattened data array in memory */ T *_buffer = nullptr; /** * contains shape info: matrix rank, numbers of elements per each dimension, dimensions strides, element-wise-stride, c-like or fortan-like order */ Nd4jLong *_shapeInfo = nullptr; /** * pointer on externally allocated memory where _buffer and _shapeInfo are stored */ nd4j::memory::Workspace* _workspace = nullptr; /** * alternative buffers for special computational devices (like GPUs for CUDA) */ T* _bufferD = nullptr; Nd4jLong *_shapeInfoD = nullptr; /** * indicates whether user allocates memory for _buffer/_shapeInfo by himself, in opposite case the memory must be allocated from outside */ bool _isShapeAlloc = false; bool _isBuffAlloc = false; /** * Field to store cached length */ Nd4jLong _length = -1L; /** * type of array elements */ DataType _dataType = DataType_FLOAT; std::string toStringValue(T value); public: static NDArray<T>* createEmpty(nd4j::memory::Workspace* workspace = nullptr); static NDArray<T>* valueOf(const std::initializer_list<Nd4jLong>& shape, const T value, const char order = 'c'); static NDArray<T>* valueOf(const std::vector<Nd4jLong>& shape, const T value, const char order = 'c'); static NDArray<T>* linspace(const T from, const T to, const Nd4jLong numElements); static NDArray<T>* scalar(const T value); /** * default constructor, do not allocate memory, memory for array is passed from outside */ NDArray(T *buffer = nullptr, Nd4jLong* shapeInfo = nullptr, nd4j::memory::Workspace* workspace = nullptr); NDArray(std::initializer_list<Nd4jLong> shape, nd4j::memory::Workspace* workspace = nullptr); /** * Constructor for scalar NDArray */ NDArray(T scalar); /** * copy constructor */ NDArray(const NDArray<T>& other); /** * move constructor */ NDArray(NDArray<T>&& other) noexcept; #ifndef __JAVACPP_HACK__ // this method only available out of javacpp /** * This constructor creates vector of T * * @param values */ NDArray(std::initializer_list<T> values, nd4j::memory::Workspace* workspace = nullptr); NDArray(std::vector<T> &values, nd4j::memory::Workspace* workspace = nullptr); #endif /** * constructor, create empty array stored at given workspace */ NDArray(nd4j::memory::Workspace* workspace); /** * this constructor creates new NDArray with shape matching "other" array, do not copy "other" elements into new array */ NDArray(const NDArray<T> *other, const bool copyStrides = false, nd4j::memory::Workspace* workspace = nullptr); /** * constructor creates new NDArray using shape information from "shapeInfo", set all elements in new array to be zeros, if copyStrides is true then use stride values from "shapeInfo", else calculate strides independently */ NDArray(const Nd4jLong* shapeInfo, const bool copyStrides = false, nd4j::memory::Workspace* workspace = nullptr); /** * this constructor creates new array using shape information contained in vector argument */ NDArray(const char order, const std::vector<Nd4jLong> &shape, nd4j::memory::Workspace* workspace = nullptr); /** * This constructor creates new array with elements copied from data and using shape information stored in shape * * PLEASE NOTE: data will be copied AS IS, without respect to specified order. You must ensure order match here. */ NDArray(const char order, const std::vector<Nd4jLong> &shape, const std::vector<T> &data, nd4j::memory::Workspace* workspace = nullptr); /** * this constructor creates new array using given buffer (without memory allocating) and shape information stored in shape */ NDArray(T *buffer, const char order, const std::vector<Nd4jLong> &shape , nd4j::memory::Workspace* workspace = nullptr); /** * copy assignment operator */ NDArray<T>& operator=(const NDArray<T>& other); /** * move assignment operator */ NDArray<T>& operator=(NDArray<T>&& other) noexcept; /** * assignment operator, assigns the same scalar to all array elements */ NDArray<T>& operator=(const T scalar); /** * operators for memory allocation and deletion */ void* operator new(size_t i); void operator delete(void* p); /** * method replaces existing buffer/shapeinfo, AND releases original pointers (if releaseExisting TRUE) */ void replacePointers(T *buffer, Nd4jLong *shapeInfo, const bool releaseExisting = true); /** * create a new array by replicating current array by repeats times along given dimension * dimension - dimension along which to repeat elements * repeats - number of repetitions */ NDArray<T>* repeat(int dimension, const std::vector<Nd4jLong>& repeats) const; /** * This method returns quantized copy of given array * * @param array * @return */ static NDArray<T> quantize(NDArray<T> &array); /** * This method returns quantized copy of given array * * @param array * @return */ static NDArray<T>* quantize(NDArray<T> *array); /** * fill target array by repeating current array * dimension - dimension along which to repeat elements */ void repeat(int dimension, NDArray<T>& target) const; /** * return _dataType; */ DataType dataType() const; /** * creates array which is view of this array */ NDArray<T>* getView(); /** * creates array which points on certain sub-range of this array, sub-range is defined by given indices */ NDArray<T> *subarray(IndicesList& indices) const; NDArray<T> *subarray(IndicesList& indices, std::vector<Nd4jLong>& strides) const; NDArray<T>* subarray(const std::initializer_list<NDIndex*>& idx) const; NDArray<T>* subarray(const Intervals& idx) const; /** * cast array elements to given dtype */ NDArray<T>* cast(DataType dtype); void cast(NDArray<T>* target, DataType dtype); /** * returns _workspace */ nd4j::memory::Workspace* getWorkspace() const { return _workspace; } /** * returns _buffer */ T* getBuffer() const; T* buffer(); /** * returns _shapeInfo */ Nd4jLong* shapeInfo(); Nd4jLong* getShapeInfo() const; /** * if _bufferD==nullptr return _buffer, else return _bufferD */ T* specialBuffer(); /** * Returns True if it's legally empty NDArray, or false otherwise * @return */ FORCEINLINE bool isEmpty() const; /** * if _shapeInfoD==nullptr return _shapeInfo, else return _shapeInfoD */ Nd4jLong* specialShapeInfo(); /** * set values for _bufferD and _shapeInfoD */ void setSpecialBuffers(T * buffer, Nd4jLong *shape); /** * permutes (in-place) the dimensions in array according to "dimensions" array */ bool permutei(const std::initializer_list<int>& dimensions); bool permutei(const std::vector<int>& dimensions); bool permutei(const int* dimensions, const int rank); bool permutei(const std::initializer_list<Nd4jLong>& dimensions); bool permutei(const std::vector<Nd4jLong>& dimensions); bool permutei(const Nd4jLong* dimensions, const int rank); bool isFinite(); bool hasNaNs(); bool hasInfs(); /** * permutes the dimensions in array according to "dimensions" array, new array points on _buffer of this array */ NDArray<T>* permute(const std::initializer_list<int>& dimensions) const; NDArray<T>* permute(const std::vector<int>& dimensions) const; NDArray<T>* permute(const int* dimensions, const int rank) const; void permute(const int* dimensions, const int rank, NDArray<T>& target) const; void permute(const std::vector<int>& dimensions, NDArray<T>& target) const; NDArray<T>* permute(const std::initializer_list<Nd4jLong>& dimensions) const; NDArray<T>* permute(const std::vector<Nd4jLong>& dimensions) const; NDArray<T>* permute(const Nd4jLong* dimensions, const int rank) const; void permute(const Nd4jLong* dimensions, const int rank, NDArray<T>& target) const; void permute(const std::vector<Nd4jLong>& dimensions, NDArray<T>& target) const; /** * This method streamlines given view or permuted array, and reallocates buffer */ void streamline(char order = 'a'); /** * check whether array is contiguous in memory */ bool isContiguous(); /** * prints information about array shape * msg - message to print out */ void printShapeInfo(const char * msg = nullptr) const; /** * prints buffer elements * msg - message to print out * limit - number of array elements to print out */ void printBuffer(const char* msg = nullptr, Nd4jLong limit = -1); /** * prints buffer elements, takes into account offset between elements (element-wise-stride) * msg - message to print out * limit - number of array elements to print out */ void printIndexedBuffer(const char* msg = nullptr, Nd4jLong limit = -1) const; std::string asIndexedString(Nd4jLong limit = -1); std::string asString(Nd4jLong limit = -1); /** * this method assigns values of given array to this one */ void assign(const NDArray<T>* other); /** * this method assigns values of given array to this one */ void assign(const NDArray<T>& other); /** * this method assigns given value to all elements in array */ void assign(const T value); /** * returns new copy of this array, optionally in different order */ NDArray<T> *dup(const char newOrder = 'a'); /** * returns sum of all elements of array */ T sumNumber() const; /** * returns mean number of array */ T meanNumber() const; /** * This method explicitly enforces new shape for this NDArray, old shape/stride information is lost */ void enforce(const std::initializer_list<Nd4jLong> &dimensions, char order = 'a'); void enforce(std::vector<Nd4jLong> &dimensions, char order = 'a'); /** * calculates sum along dimension(s) in this array and save it to created reduced array * dimensions - array of dimensions to calculate sum over * keepDims - if true then put unities in place of reduced dimensions */ NDArray<T> *sum(const std::vector<int> &dimensions) const; /** * method reduces array by excluding its shapes along dimensions present in given dimensions vector, result is stored in new array to be returned * dimensions - array of dimensions to reduce along * keepDims - if true then put unities in place of reduced dimensions */ template<typename OpName> NDArray<T>* reduceAlongDimension(const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; template<typename OpName> NDArray<T>* reduceAlongDimension(const std::initializer_list<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; template<typename OpName> NDArray<T> reduceAlongDims(const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false) const; /** * method reduces array by excluding its shapes along dimensions present in given dimensions vector * target - where to save result of reducing * dimensions - array of dimensions to reduce along * keepDims - if true then put unities in place of reduced dimensions * extras - extra parameters */ template<typename OpName> void reduceAlongDimension(NDArray<T>* target, const std::vector<int>& dimensions, const bool keepDims = false, const bool supportOldShapes = false, T *extras = nullptr) const; /** * return variance of array elements set * biasCorrected - if true bias correction will be applied */ template<typename OpName> T varianceNumber(bool biasCorrected = true); /** * apply scalar operation to array * extraParams - extra parameters for operation */ template<typename OpName> T reduceNumber(T *extraParams = nullptr) const; /** * returns element index which corresponds to some condition imposed by operation * extraParams - extra parameters for operation */ template<typename OpName> Nd4jLong indexReduceNumber(T *extraParams = nullptr); /** * returns index of max element in a given array (optionally: along given dimension(s)) * dimensions - optional vector with dimensions */ Nd4jLong argMax(std::initializer_list<int> dimensions = {}); /** * apply OpName transformation directly to array * extraParams - extra parameters for operation */ template<typename OpName> void applyTransform(T *extraParams = nullptr); /** * apply OpName transformation to array and store result in target * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyTransform(NDArray<T> *target, T *extraParams = nullptr); /** * apply OpName transformation to this array and store result in new array being returned * extraParams - extra parameters for operation */ template<typename OpName> NDArray<T> transform(T *extraParams = nullptr) const; /** * apply pairwise OpName transformation based on "this" and "other" arras elements, store result in this array * other - second array necessary for pairwise operation * extraParams - extra parameters for operation */ template<typename OpName> void applyPairwiseTransform(NDArray<T> *other, T *extraParams); /** * apply pairwise OpName transformation based on "this" and "other" arras elements, store result in target array * other - second array necessary for pairwise operation * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyPairwiseTransform(NDArray<T> *other, NDArray<T> *target, T *extraParams); /** * apply operation which requires broadcasting, broadcast a smaller array (tad) along bigger one (this) * tad - array to broadcast * dimensions - dimensions array to broadcast along * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyBroadcast(std::initializer_list<int> dimensions, const NDArray<T>* tad, NDArray<T>* target = nullptr, T* extraArgs = nullptr); template <typename OpName> void applyBroadcast(std::vector<int> &dimensions, const NDArray<T> *tad, NDArray<T> *target = nullptr, T *extraArgs = nullptr); /** * apply operation which requires broadcasting, broadcast one tensor along another, also this method checks the possibility of broadcasting * other - input array * extraParams - extra parameters for operation */ template <typename OpName> NDArray<T> applyTrueBroadcast(const NDArray<T>& other, T *extraArgs = nullptr) const; template <typename OpName> NDArray<T>* applyTrueBroadcast(const NDArray<T>* other, T *extraArgs = nullptr) const; /** * apply operation which requires broadcasting, broadcast one tensor along another, also this method checks the possibility of broadcasting * other - input array * target - where to store result * checkTargetShape - if true check whether target shape is suitable for broadcasting * extraParams - extra parameters for operation */ template <typename OpName> void applyTrueBroadcast(const NDArray<T>* other, NDArray<T>* target, const bool checkTargetShape = true, T *extraArgs = nullptr) const; /** * apply a scalar operation to an array * scalar - input scalar * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyScalar(T scalar, NDArray<T>* target = nullptr, T *extraParams = nullptr) const; /** * apply a scalar operation to an array * scalar - input array which is simple scalar * target - where to store result * extraParams - extra parameters for operation */ template<typename OpName> void applyScalar(NDArray<T>& scalar, NDArray<T>* target = nullptr, T *extraParams = nullptr) const; #ifndef __JAVACPP_HACK__ /** * apply operation "func" to an array * func - what operation to apply * target - where to store result */ void applyLambda(const std::function<T(T)>& func, NDArray<T>* target = nullptr); void applyIndexedLambda(const std::function<T(Nd4jLong, T)>& func, NDArray<T>* target = nullptr); /** * apply pairwise operation "func" to an array * other - input array * func - what pairwise operation to apply * target - where to store result */ void applyPairwiseLambda(const NDArray<T>* other, const std::function<T(T, T)>& func, NDArray<T>* target = nullptr); void applyIndexedPairwiseLambda(NDArray<T>* other, const std::function<T(Nd4jLong, T, T)>& func, NDArray<T>* target = nullptr); void applyTriplewiseLambda(NDArray<T>* second, NDArray<T> *third, const std::function<T(T, T, T)>& func, NDArray<T>* target = nullptr); #endif /** * apply OpName random operation to array * buffer - pointer on RandomBuffer * y - optional input array * z - optional input array * extraArgs - extra parameters for operation */ template<typename OpName> void applyRandom(nd4j::random::RandomBuffer *buffer, NDArray<T>* y = nullptr, NDArray<T>* z = nullptr, T* extraArgs = nullptr); /** * apply transpose operation to the copy of this array, that is this array remains unaffected */ NDArray<T>* transpose() const; NDArray<T> transp() const; /** * perform transpose operation and store result in target, this array remains unaffected * target - where to store result */ void transpose(NDArray<T>& target) const; /** * apply in-place transpose operation to this array, so this array becomes transposed */ void transposei(); /** * return array pointing on certain range of this array * index - the number of array to be returned among set of possible arrays * dimensions - array of dimensions to point on */ NDArray<T>* tensorAlongDimension(Nd4jLong index, const std::initializer_list<int>& dimensions) const; NDArray<T>* tensorAlongDimension(Nd4jLong index, const std::vector<int>& dimensions) const; /** * returns the number of arrays pointing on specified dimension(s) * dimensions - array of dimensions to point on */ Nd4jLong tensorsAlongDimension(const std::initializer_list<int> dimensions) const ; Nd4jLong tensorsAlongDimension(const std::vector<int>& dimensions) const ; /** * returns true if elements of two arrays are equal to within given epsilon value * other - input array to compare * eps - epsilon, this value defines the precision of elements comparison */ bool equalsTo(const NDArray<T> *other, T eps = (T) 1e-5f) const; bool equalsTo(NDArray<T> &other, T eps = (T) 1e-5f) const; /** * add given row vector to all rows of this array * row - row vector to add */ void addiRowVector(const NDArray<T> *row); /** * add given row vector to all rows of this array, store result in target * row - row vector to add * target - where to store result */ void addRowVector(const NDArray<T> *row, NDArray<T>* target) const; /** * subtract given row vector from all rows of this array, store result in target * row - row vector to subtract * target - where to store result */ void subRowVector(const NDArray<T> *row, NDArray<T>* target) const; /** * multiply all rows of this array on given row vector, store result in target * row - row vector to multiply on * target - where to store result */ void mulRowVector(const NDArray<T> *row, NDArray<T>* target) const; /** * divide all rows of this array on given row vector, store result in target * row - row vector to divide on * target - where to store result */ void divRowVector(const NDArray<T> *row, NDArray<T>* target) const; /** * add given column vector to all columns of this array, store result in target * column - column vector to add * target - where to store result */ void addColumnVector(const NDArray<T> *column, NDArray<T>* target) const; /** * add given column vector to all columns of this array, this array becomes affected (in-place operation) * column - column vector to add */ void addiColumnVector(const NDArray<T> *column); /** * multiply all columns of this array on given column vector, this array becomes affected (in-place operation) * column - column vector to multiply on */ void muliColumnVector(const NDArray<T> *column); /** * returns number of bytes used by _buffer & _shapeInfo */ Nd4jLong memoryFootprint(); /** * these methods suited for FlatBuffers use */ std::vector<T> getBufferAsVector(); std::vector<Nd4jLong> getShapeAsVector(); std::vector<Nd4jLong> getShapeInfoAsVector(); std::vector<int64_t> getShapeInfoAsFlatVector(); /** * set new order and shape in case of suitable array length (in-place operation) * order - order to set * shape - shape to set * * if there was permute applied before or there are weird strides, then new buffer is allocated for array */ bool reshapei(const char order, const std::initializer_list<Nd4jLong>& shape); bool reshapei(const char order, const std::vector<Nd4jLong>& shape); bool reshapei(const std::initializer_list<Nd4jLong>& shape); bool reshapei(const std::vector<Nd4jLong>& shape); /** * creates new array with corresponding order and shape, new array will point on _buffer of this array * order - order to set * shape - shape to set * * if permute have been applied before or there are weird strides, then new buffer is allocated for new array */ NDArray<T>* reshape(const char order, const std::vector<Nd4jLong>& shape) const; /** * calculate strides and set given order * order - order to set */ void updateStrides(const char order); /** * change an array by repeating it the number of times given by reps (in-place operation) * repeats - contains numbers of repetitions */ void tilei(const std::vector<Nd4jLong>& repeats); /** * returns new array which is created by repeating of this array the number of times given by reps * repeats - contains numbers of repetitions */ NDArray<T> tile(const std::vector<Nd4jLong>& repeats) const; /** * change an array by repeating it the number of times given by reps (in-place operation) * repeats - contains numbers of repetitions * target - where to store result */ void tile(const std::vector<Nd4jLong>& repeats, NDArray<T>& target) const; /** * change an array by repeating it the number of times to acquire the new shape which is the same as target shape * target - where to store result */ void tile(NDArray<T>& target) const; /** * returns an array which is result of broadcasting of this and other arrays * other - input array */ NDArray<T>* broadcast(const NDArray<T>& other); /** * check whether array's rows (arg=0) or columns (arg=1) create orthogonal basis * arg - 0 -> row, 1 -> column */ bool hasOrthonormalBasis(const int arg); /** * check whether array is identity matrix */ bool isIdentityMatrix(); /** * check whether array is unitary matrix */ bool isUnitary(); /** * reduces dimensions in this array relying on index operation OpName * dimensions - vector of dimensions to reduce along * extraArgs - extra parameters for operation */ template<typename OpName> NDArray<T>* applyIndexReduce(const std::vector<int>& dimensions, const T *extraParams = nullptr) const; /** * reduces dimensions in array relying on index operation OpName * target - where to store result * dimensions - vector of dimensions to reduce along * extraArgs - extra parameters for operation */ template<typename OpName> void applyIndexReduce(const NDArray<T>* target, const std::vector<int>& dimensions, const T *extraParams = nullptr) const; /** * apply reduce3 operation OpName to this and other array, return result in new output array * other - input array * extraArgs - extra parameters for operation */ template<typename OpName> NDArray<T>* applyReduce3(const NDArray<T>* other, const T* extraParams = nullptr) const; /** * apply reduce3 operation OpName to this and other array, return result in new output array * other - input array * dimensions - vector of dimensions to reduce along (tads not axis) * extraArgs - extra parameters for operation */ template<typename OpName> NDArray<T>* applyAllReduce3(const NDArray<T>* other, const std::vector<int>& dimensions, const T* extraParams = nullptr) const; /** * apply reduce3 (exec) operation OpName to this and other array, return result in new output array * other - input array * dimensions - vector of dimensions to reduce along (same as reduceAlongDimension) * extraArgs - extra parameters for operation */ template<typename OpName> NDArray<T>* applyReduce3(const NDArray<T>* other, const std::vector<int>& dimensions, const T* extraParams = nullptr) const; /** * returns variance along given dimensions * biasCorrected - if true bias correction will be applied * dimensions - vector of dimensions to calculate variance along */ template<typename OpName> NDArray<T>* varianceAlongDimension(const bool biasCorrected, const std::vector<int>& dimensions) const; template<typename OpName> NDArray<T>* varianceAlongDimension(const bool biasCorrected, const std::initializer_list<int>& dimensions) const; template<typename OpName> void varianceAlongDimension(const NDArray<T>* target, const bool biasCorrected, const std::vector<int>& dimensions); template<typename OpName> void varianceAlongDimension(const NDArray<T>* target, const bool biasCorrected, const std::initializer_list<int>& dimensions); /** * operator returns sub-array with buffer pointing at this->_buffer with offset defined by given intervals * idx - intervals of indexes which define the sub-arrays to point on * keepUnitiesInShape - if false then eliminate unities from resulting array shape, for example {1,a,1,b} -> {a,b} */ NDArray<T> operator()(const Intervals& idx, bool keepUnitiesInShape = false) const; /** * operator returns sub-array with buffer pointing at this->_buffer with offset defined by given intervals * idx - intervals of indexes which define the sub-arrays to point on, idx has form {dim0Start,dim0End, dim1Start,dim1End, ....} and length (2 * this->rankOf()) * when (dimStart == dimEnd) then whole range will be used for current dimension * keepUnitiesInShape - if false then eliminate unities from resulting array shape, for example {1,a,1,b} -> {a,b} */ NDArray<T> operator()(const Nd4jLong* idx, bool keepUnitiesInShape = false) const; /** * addition operator: array + other * other - input array to add */ NDArray<T> operator+(const NDArray<T>& other) const; /** * addition operator: array + scalar * scalar - input scalar to add */ NDArray<T> operator+(const T scalar) const; /** * friend functions which implement addition operator: scalar + array * scalar - input scalar to add */ friend NDArray<float> nd4j::operator+(const float scalar, const NDArray<float>& arr); friend NDArray<float16> nd4j::operator+(const float16 scalar, const NDArray<float16>& arr); friend NDArray<double> nd4j::operator+(const double scalar, const NDArray<double>& arr); /** * addition unary operator array += other * other - input array to add */ void operator+=(const NDArray<T>& other); /** * subtraction unary operator array -= other * other - input array to add */ void operator-=(const NDArray<T>& other); void operator+=(const T other); void operator-=(const T other); /** * subtraction operator: array - other * other - input array to subtract */ NDArray<T> operator-(const NDArray<T>& other) const; /** * subtraction operator: array - scalar * scalar - input scalar to subtract */ NDArray<T> operator-(const T& scalar) const; /** * negative operator, it changes sign of all array elements on opposite */ NDArray<T> operator-() const; /** * friend functions which implement subtraction operator: scalar - array * scalar - input scalar to subtract */ friend NDArray<float> nd4j::operator-(const float scalar, const NDArray<float>& arr); friend NDArray<float16> nd4j::operator-(const float16 scalar, const NDArray<float16>& arr); friend NDArray<double> nd4j::operator-(const double scalar, const NDArray<double>& arr); /** * pairwise multiplication operator: array * other * other - input array to multiply on */ NDArray<T> operator*(const NDArray<T>& other) const; /** * multiplication operator: array * scalar * scalar - input scalar to multiply on */ NDArray<T> operator*(const T scalar) const; /** * pairwise multiplication unary operator array *= other * other - input array to multiply on */ void operator*=(const NDArray<T>& other); /** * multiplication unary operator array *= scalar * scalar - input scalar to multiply on */ void operator*=(const T scalar); /** * pairwise division operator: array / other * other - input array to divide on */ NDArray<T> operator/(const NDArray<T>& other) const; /** * division operator: array / scalar * scalar - input scalar to divide each array element on */ NDArray<T> operator/(const T scalar) const; /** * pairwise division unary operator: array /= other * other - input array to divide on */ void operator/=(const NDArray<T>& other); /** * division unary operator: array /= scalar * scalar - input scalar to divide on */ void operator/=(const T scalar); /** * friend function which implements mathematical multiplication of two arrays * left - input array * right - input array */ friend NDArray<T> mmul<>(const NDArray<T>& left, const NDArray<T>& right); /** * this method assigns elements of other array to the sub-array of this array defined by given intervals * other - input array to assign elements from * idx - intervals of indexes which define the sub-array */ void assign(const NDArray<T>& other, const Intervals& idx); /** * return vector containing _buffer as flat binary array */ std::vector<int8_t> asByteVector(); /** * makes array to be identity matrix (not necessarily square), that is set all diagonal elements = 1, rest = 0 */ void setIdentity(); /** * swaps the contents of tow arrays, * PLEASE NOTE: method doesn't take into account the shapes of arrays, shapes may be different except one condition: arrays lengths must be the same */ void swapUnsafe(NDArray<T>& other); /** * return vector with buffer which points on corresponding diagonal elements of array * type - means of vector to be returned: column ('c') or row ('r') */ NDArray<T>* diagonal(const char type ) const; /** * fill matrix with given value starting from specified diagonal in given direction, works only with 2D matrix * * diag - diagonal starting from matrix is filled. * diag = 0 corresponds to main diagonal, * diag < 0 below main diagonal * diag > 0 above main diagonal * direction - in what direction to fill matrix. There are 2 possible directions: * 'u' - fill up, mathematically this corresponds to lower triangular matrix * 'l' - fill down, mathematically this corresponds to upper triangular matrix */ void setValueInDiagMatrix(const T& value, const int diag, const char direction); /** * change an array by repeating it the number of times in order to acquire new shape equal to the input shape * * shape - contains new shape to broadcast array to * target - optional argument, if target != nullptr the resulting array will be placed in target, in opposite case tile operation is done in place */ void tileToShape(const std::vector<Nd4jLong>& shape, NDArray<T>* target = nullptr); void tileToShape(const std::initializer_list<Nd4jLong>& shape, NDArray<T>* target = nullptr); template <typename N> NDArray<N>* asT(); /** * calculates the trace of an array, that is sum of elements on main diagonal = sum array[i, i, i, ...] */ T getTrace() const; /** * fill array linearly as follows: arr[0] = from, arr[1] = from+step, arr[2] = from+2*step, ... */ void linspace(const T from, const T step = 1.0f); NDArray<T>* createUninitialized() const; ResultSet<T>* multipleTensorsAlongDimension(const std::vector<int>& indices, const std::vector<int>& dimensions) const; ResultSet<T>* allTensorsAlongDimension(const std::vector<int>& dimensions) const; ResultSet<T>* allTensorsAlongDimension(const std::initializer_list<int>& dimensions) const; ResultSet<T>* allExamples()const ; template <typename OpName> void saveResultOfBroadcast(const NDArray<T>& x, const NDArray<T>& y, const bool checkThisShape = false); /** * default destructor */ ~NDArray() noexcept; /** * set _shapeInfo */ FORCEINLINE void setShapeInfo(Nd4jLong *shapeInfo); /** * set _buffer */ FORCEINLINE void setBuffer(T* buffer); /** * set _isBuffAlloc and _isShapeAlloc */ FORCEINLINE void triggerAllocationFlag(bool bufferAllocated, bool shapeAllocated); /** * returns the value of "dim" dimension */ Nd4jLong sizeAt(const int dim) const; /** * returns order of array */ FORCEINLINE char ordering() const; /** * return _isView */ FORCEINLINE bool isView(); /** * returns shape portion of shapeInfo */ FORCEINLINE Nd4jLong* shapeOf() const; /** * returns strides portion of shapeInfo */ FORCEINLINE Nd4jLong* stridesOf() const; /** * returns rank of array */ FORCEINLINE int rankOf() const; /** * returns length of array */ FORCEINLINE Nd4jLong lengthOf() const; /** * returns number of rows in array */ FORCEINLINE Nd4jLong rows() const; /** * returns number of columns in array */ FORCEINLINE Nd4jLong columns() const; /** * returns size of array elements type */ FORCEINLINE int sizeOfT() const; /** * returns element-wise-stride */ FORCEINLINE Nd4jLong ews() const; // returns true if arrays have same shape FORCEINLINE bool isSameShape(const NDArray<T> *other) const; FORCEINLINE bool isSameShape(NDArray<T> &other) const; FORCEINLINE bool isSameShape(const std::initializer_list<Nd4jLong>& shape) const; FORCEINLINE bool isSameShape(const std::vector<Nd4jLong>& shape) const; /** * returns true if these two NDArrays have same rank, dimensions, strides, ews and order */ FORCEINLINE bool isSameShapeStrict(const NDArray<T> *other) const; /** * returns true if buffer && shapeInfo were defined (non nullptr) */ FORCEINLINE bool nonNull() const; /** * returns array element with given index from linear buffer * i - element index in array */ FORCEINLINE T getScalar(const Nd4jLong i) const; /** * returns array element with given index, takes into account offset between elements (element-wise-stride) * i - element index in array */ FORCEINLINE T getIndexedScalar(const Nd4jLong i) const; /** * returns element with given indexes from 2D array * i - number of row * j - number of column */ FORCEINLINE T getScalar(const Nd4jLong i, const Nd4jLong j) const; /** * returns element with given indexes from 3D array * i - height * j - width * k - depth */ FORCEINLINE T getScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const; /** * assigns given scalar to array element by given index, takes into account offset between elements (element-wise-stride) * i - element index in array * value - scalar value to assign */ FORCEINLINE void putIndexedScalar(const Nd4jLong i, const T value); /** * assigns given scalar to array element by given index, regards array buffer as linear * i - element index in array * value - scalar value to assign */ FORCEINLINE void putScalar(const Nd4jLong i, const T value); /** * assigns given scalar to 2D array element by given indexes * i - number of row * j - number of row * value - scalar value to assign */ FORCEINLINE void putScalar(const Nd4jLong i, const Nd4jLong j, const T value); /** * assigns given scalar to 3D array element by given indexes * i - height * j - width * k - depth * value - scalar value to assign */ FORCEINLINE void putScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const T value); /** * returns true if array is 2D */ FORCEINLINE bool isMatrix() const; /** * returns true if array is vector */ FORCEINLINE bool isVector() const; /** * returns true if array is column vector */ FORCEINLINE bool isColumnVector() const; /** * returns true if array is row vector */ FORCEINLINE bool isRowVector() const; /** * returns true if array is scalar */ FORCEINLINE bool isScalar() const; /** * inline accessing operator for matrix, i - absolute index */ FORCEINLINE T operator()(const Nd4jLong i) const; /** * inline modifying operator for matrix, i - absolute index */ FORCEINLINE T& operator()(const Nd4jLong i); /** * inline accessing operator for 2D array, i - row, j - column */ FORCEINLINE T operator()(const Nd4jLong i, const Nd4jLong j) const; /** * inline modifying operator for 2D array, i - row, j - column */ FORCEINLINE T& operator()(const Nd4jLong i, const Nd4jLong j); /** * inline accessing operator for 3D array, i - height, j - width, k - depth */ FORCEINLINE T operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const; /** * inline modifying operator for 3D array, i - height, j - width, k - depth */ FORCEINLINE T& operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k); /** * inline modifying operator for 4D array, i - height, j - width, k - depth */ FORCEINLINE T& operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w); /** * inline accessing operator for 4D array, i - height, j - width, k - depth */ FORCEINLINE T operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w) const; template <typename T2> FORCEINLINE std::vector<T2> asVectorT(); FORCEINLINE bool isAttached(); NDArray<T>* detach(); FORCEINLINE bool operator == (const NDArray<T> &other) const; }; ////////////////////////////////////////////////////////////////////////// ///// IMLEMENTATION OF INLINE METHODS ///// ////////////////////////////////////////////////////////////////////////// template <typename T> template <typename T2> std::vector<T2> NDArray<T>::asVectorT() { std::vector<T2> result(this->lengthOf()); #pragma omp parallel for simd for (int e = 0; e < this->lengthOf(); e++) result[e] = static_cast<T2>(this->getIndexedScalar(e)); return result; } template<typename T> bool NDArray<T>::isAttached() { return this->_workspace != nullptr; } ////////////////////////////////////////////////////////////////////////// template<typename T> void NDArray<T>::setShapeInfo(Nd4jLong *shapeInfo) { if(_isShapeAlloc && _workspace == nullptr) delete []_shapeInfo; _shapeInfo = shapeInfo; _isShapeAlloc = false; if (shapeInfo != nullptr) this->_length = shape::length(shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> void NDArray<T>::setBuffer(T* buffer) { if(_isBuffAlloc && _workspace == nullptr) delete []_buffer; _buffer = buffer; _isBuffAlloc = false; } ////////////////////////////////////////////////////////////////////////// template<typename T> void NDArray<T>::triggerAllocationFlag(bool bufferAllocated, bool shapeAllocated) { _isBuffAlloc = bufferAllocated; _isShapeAlloc = shapeAllocated; } ////////////////////////////////////////////////////////////////////////// template<typename T> char NDArray<T>::ordering() const { return shape::order(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isView() { return _isView; } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong* NDArray<T>::shapeOf() const { return shape::shapeOf(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong* NDArray<T>::stridesOf() const { return shape::stride(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> int NDArray<T>::rankOf() const { if (isEmpty()) return 0; return shape::rank(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::lengthOf() const { return _length; } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::rows() const { if (this->rankOf() == 1) return 1; if (this->rankOf() > 2) throw std::runtime_error("Array with rank > 2 can't have rows"); return shapeOf()[0]; } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::columns() const { if (this->rankOf() == 1) return this->lengthOf(); if (this->rankOf() > 2) throw std::runtime_error("Array with rank > 2 can't have columns"); return shapeOf()[1]; } ////////////////////////////////////////////////////////////////////////// template<typename T> int NDArray<T>::sizeOfT() const { return sizeof(T); } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::ews() const { if (this->isEmpty() || this->rankOf() == 0) return 1; return shape::elementWiseStride(_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::nonNull() const { if (isEmpty()) return true; return this->_buffer != nullptr && this->_shapeInfo != nullptr; } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isMatrix() const { if (isEmpty()) return false; return shape::isMatrix(this->_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isVector() const { if (isEmpty()) return false; return !isScalar() && shape::isVector(this->_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isColumnVector() const { if (isEmpty()) return false; return !isScalar() && shape::isColumnVector(this->_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isRowVector() const { if (isEmpty()) return false; // 1D edge case if (shape::rank(this->_shapeInfo) == 1) return true; return !isScalar() && shape::isRowVector(this->_shapeInfo); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isScalar() const { return shape::isScalar(this->_shapeInfo); } // accessing operator for matrix, i - absolute index template<typename T> T NDArray<T>::operator()(const Nd4jLong i) const { if (i >= shape::length(_shapeInfo)) throw std::invalid_argument("NDArray::operator(i): dinput index is out of array length !"); auto ews = shape::elementWiseStride(_shapeInfo); char order = ordering(); if(ews == 1 && order == 'c') return _buffer[i]; else if(ews > 1 && order == 'c') return _buffer[i*ews]; else { Nd4jLong idx[MAX_RANK]; shape::ind2subC(rankOf(), shapeOf(), i, idx); Nd4jLong offset = shape::getOffset(0, shapeOf(), stridesOf(), idx, rankOf()); return _buffer[offset]; } } ////////////////////////////////////////////////////////////////////////// // modifying operator for matrix, i - absolute index template<typename T> T& NDArray<T>::operator()(const Nd4jLong i) { if (i >= shape::length(_shapeInfo)) throw std::invalid_argument("NDArray::operator(i): input index is out of array length !"); auto ews = shape::elementWiseStride(_shapeInfo); auto order = ordering(); if(ews == 1 && order == 'c') return _buffer[i]; else if(ews > 1 && order == 'c') return _buffer[i*ews]; else { Nd4jLong idx[MAX_RANK]; shape::ind2subC(rankOf(), shapeOf(), i, idx); auto offset = shape::getOffset(0, shapeOf(), stridesOf(), idx, rankOf()); return _buffer[offset]; } } ////////////////////////////////////////////////////////////////////////// // accessing operator for 2D matrix, i - row, j - column template<typename T> T NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j) const { if (rankOf() != 2 || i >= shapeOf()[0] || j >= shapeOf()[1]) throw std::invalid_argument("NDArray::operator(i,j): one of input indexes is out of array length or rank!=2 !"); Nd4jLong coords[2] = {i, j}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } ////////////////////////////////////////////////////////////////////////// // modifying operator for 2D matrix, i - row, j - column template<typename T> T& NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j) { if (rankOf() != 2 || i >= shapeOf()[0] || j >= shapeOf()[1]) throw std::invalid_argument("NDArray::operator(i,j): one of input indexes is out of array length or rank!=2 !"); Nd4jLong coords[2] = {i, j}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } ////////////////////////////////////////////////////////////////////////// // accessing operator for 3D array, i - row, j - column template<typename T> T NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const { if (rankOf() != 3 || i >= shapeOf()[0] || j >= shapeOf()[1] || j >= shapeOf()[2]) throw std::invalid_argument("NDArray::operator(i,j,k): one of input indexes is out of array length or rank!=3 !"); Nd4jLong coords[3] = {i, j, k}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } ////////////////////////////////////////////////////////////////////////// // modifying operator for 3D array template<typename T> T& NDArray<T>::operator()(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) { if (rankOf() != 3 || i >= shapeOf()[0] || j >= shapeOf()[1] || k >= shapeOf()[2]) throw std::invalid_argument("NDArray::operator(i,j,k): one of input indexes is out of array length or rank!=3 !"); Nd4jLong coords[3] = {i, j, k}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } template<typename T> T NDArray<T>::operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w) const { if (rankOf() != 4 || t >= shapeOf()[0] || u >= shapeOf()[1] || v >= shapeOf()[2] || w >= shapeOf()[3]) throw std::invalid_argument("NDArray::operator(t,u,v,w): one of input indexes is out of array length or rank!=4 !"); Nd4jLong coords[4] = {t, u, v, w}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } template<typename T> T& NDArray<T>::operator()(const Nd4jLong t, const Nd4jLong u, const Nd4jLong v, const Nd4jLong w) { if (rankOf() != 4 || t >= shapeOf()[0] || u >= shapeOf()[1] || v >= shapeOf()[2] || w >= shapeOf()[3]) throw std::invalid_argument("NDArray::operator(t,u,v,w): one of input indexes is out of array length or rank!=4 !"); Nd4jLong coords[4] = {t, u, v, w}; auto xOffset = shape::getOffset(0, shapeOf(), stridesOf(), coords, rankOf()); return _buffer[xOffset]; } ////////////////////////////////////////////////////////////////////////// // Return value from linear buffer template<typename T> T NDArray<T>::getScalar(const Nd4jLong i) const { return (*this)(i); } ////////////////////////////////////////////////////////////////////////// template<typename T> T NDArray<T>::getIndexedScalar(const Nd4jLong i) const { return (*this)(i); } ////////////////////////////////////////////////////////////////////////// // Returns value from 2D matrix by coordinates/indexes template<typename T> T NDArray<T>::getScalar(const Nd4jLong i, const Nd4jLong j) const { return (*this)(i, j); } ////////////////////////////////////////////////////////////////////////// // returns value from 3D tensor by coordinates template<typename T> T NDArray<T>::getScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k) const { return (*this)(i, j, k); } ////////////////////////////////////////////////////////////////////////// template<typename T> void NDArray<T>::putIndexedScalar(const Nd4jLong i, const T value) { (*this)(i) = value; } ////////////////////////////////////////////////////////////////////////// // This method sets value in linear buffer to position i template<typename T> void NDArray<T>::putScalar(const Nd4jLong i, const T value) { (*this)(i) = value; } ////////////////////////////////////////////////////////////////////////// // This method sets value in 2D matrix to position i, j template<typename T> void NDArray<T>::putScalar(const Nd4jLong i, const Nd4jLong j, const T value) { (*this)(i,j) = value; } ////////////////////////////////////////////////////////////////////////// // This method sets value in 3D matrix to position i,j,k template<typename T> void NDArray<T>::putScalar(const Nd4jLong i, const Nd4jLong j, const Nd4jLong k, const T value) { (*this)(i,j,k) = value; } ////////////////////////////////////////////////////////////////////////// template<typename T> Nd4jLong NDArray<T>::memoryFootprint() { Nd4jLong size = this->lengthOf() * this->sizeOfT(); size += shape::shapeInfoByteLength(this->rankOf()); return size; } ////////////////////////////////////////////////////////////////////////// // still the definition of inline function must be in header file template<typename T> bool NDArray<T>::isSameShape(const std::vector<Nd4jLong>& shape) const{ if (this->isScalar() && shape.size() == 1 && shape[0] == 0) return true; if (this->rankOf() != (int) shape.size()) return false; for (int e = 0; e < this->rankOf(); e++) { if (this->shapeOf()[e] != shape.at(e) && shape.at(e) != -1) return false; } return true; } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isSameShape(const NDArray<T> *other) const { if (this->isEmpty() != other->isEmpty()) return false; return isSameShape(std::vector<Nd4jLong>(other->_shapeInfo+1, other->_shapeInfo+1+other->_shapeInfo[0])); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isSameShape(NDArray<T> &other) const { return isSameShape(&other); } ////////////////////////////////////////////////////////////////////////// template<typename T> bool NDArray<T>::isSameShape(const std::initializer_list<Nd4jLong>& other) const { return isSameShape(std::vector<Nd4jLong>(other)); } ////////////////////////////////////////////////////////////////////////// // returns true if these two NDArrays have same _shapeInfo // still the definition of inline function must be in header file template<typename T> bool NDArray<T>::isSameShapeStrict(const NDArray<T> *other) const { return shape::equalsStrict(_shapeInfo, other->_shapeInfo); } template<typename T> bool NDArray<T>::isEmpty() const { return ArrayOptions::arrayType(this->getShapeInfo()) == ArrayType::EMPTY; } template <typename T> bool NDArray<T>::operator ==(const NDArray<T> &other) const { if (!this->isSameShape(&other)) return false; return this->equalsTo(&other); } } #endif
omp_single_nowait.c
<ompts:test> <ompts:testdescription></ompts:testdescription> <ompts:ompversion>2.0</ompts:ompversion> <ompts:directive>omp single nowait</ompts:directive> <ompts:dependences>omp critical,omp atomic</ompts:dependences> <ompts:testcode> #include <stdio.h> #include "omp_testsuite.h" int my_iterations; #pragma omp threadprivate(my_iterations) int <ompts:testcode:functionname>omp_single_nowait</ompts:testcode:functionname>(FILE * logFile) { <ompts:orphan:vars> int nr_iterations; </ompts:orphan:vars> int total_iterations = 0; int i; nr_iterations = 0; my_iterations = 0; #pragma omp parallel private(i) { for (i = 0; i < LOOPCOUNT; i++) { <ompts:orphan> <ompts:check>#pragma omp single nowait</ompts:check> { #pragma omp atomic nr_iterations++; } /* end of single*/ </ompts:orphan> } /* end of for */ } /* end of parallel */ #pragma omp parallel private(i) { my_iterations = 0; for (i = 0; i < LOOPCOUNT; i++) { <ompts:orphan> <ompts:check>#pragma omp single nowait</ompts:check> { my_iterations++; } /* end of single*/ </ompts:orphan> } /* end of for */ #pragma omp critical { total_iterations += my_iterations; } } /* end of parallel */ return ((nr_iterations == LOOPCOUNT) && (total_iterations == LOOPCOUNT)); } /* end of check_single_nowait*/ </ompts:testcode> </ompts:test>
maxpool_with_mask.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. /* * Highly specialized code, only works for TP3 L1 */ #pragma once #include "core/common/common.h" #include "core/framework/op_kernel.h" #include "core/framework/tensor.h" #include "core/providers/cpu/nn/pool_base.h" namespace onnxruntime { namespace contrib { class MaxpoolWithMask : public OpKernel, public PoolBase { public: MaxpoolWithMask(const OpKernelInfo& info) : OpKernel(info), PoolBase(info) {} Status Compute(OpKernelContext* context) const override { const Tensor* X = context->Input<Tensor>(0); const Tensor* M = context->Input<Tensor>(1); const TensorShape& x_shape = X->Shape(); const TensorShape& m_shape = M->Shape(); ORT_RETURN_IF_NOT(x_shape.NumDimensions() >= 3, "Input dimension cannot be less than 3."); //TODO: fix this checker later //ONNXRUNTIME_RETURN_IF_NOT((x_shape[2] == m_shape[2]) && (x_shape[3] == m_shape[3]), " Input shape and mask shape mismatch: ", x_shape, " vs ", m_shape); std::vector<int64_t> pads = pads_; std::vector<int64_t> kernel_shape = kernel_shape_; std::vector<int64_t> output_dims = PoolBase::SetOutputSize(x_shape, x_shape[1], &pads, dilations_, ceil_mode_); Tensor* Y = context->Output(0, TensorShape(output_dims)); const float* X_data = X->template Data<float>(); const int32_t* M_data = M->template Data<int32_t>(); float* Y_data = Y->template MutableData<float>(); // The main loop int64_t channels = x_shape[1]; int64_t height = x_shape[2]; int64_t width = kernel_shape.size() > 1 ? x_shape[3] : 1; int64_t depth = kernel_shape.size() > 2 ? x_shape[4] : 1; int64_t pooled_height = output_dims[2]; int64_t pooled_width = kernel_shape.size() > 1 ? output_dims[3] : 1; int64_t pooled_depth = kernel_shape.size() > 2 ? output_dims[4] : 1; switch (kernel_shape.size()) { case 1: { int64_t x_step = height; int64_t y_step = pooled_height; const int64_t total_channels = x_shape[0] * channels; const int64_t total_mask_channels = m_shape[0] * m_shape[1]; #ifdef USE_OPENMP #pragma omp parallel for #endif for (int64_t c = 0; c < total_channels; ++c) { const float* x_d = X_data + c * x_step; const int32_t* m_d = M_data + (c * x_step) % total_mask_channels; float* y_d = Y_data + c * y_step; for (int64_t ph = 0; ph < pooled_height; ++ph) { int64_t hstart = ph * stride_h() - pads[0]; int64_t hend = std::min(hstart + kernel_shape[0], height); hstart = std::max(hstart, static_cast<int64_t>(0)); float Yh = std::numeric_limits<float>::lowest(); for (int64_t h = hstart; h < hend; ++h) { if (h >= 0 && m_d[h] == 0) break; // if mask == 0, stop if (x_d[h] > Yh) { Yh = x_d[h]; } } y_d[ph] = Yh; } } break; } case 2: { int64_t x_step = height * width; int64_t y_step = pooled_height * pooled_width; const int64_t total_channels = x_shape[0] * channels; const int64_t total_mask_channels = m_shape[0] * m_shape[1]; #ifdef USE_OPENMP #pragma omp parallel for #endif for (int64_t c = 0; c < total_channels; ++c) { const float* x_d = X_data + c * x_step; const int32_t* m_d = M_data + (c * x_step) % total_mask_channels; float* y_d = Y_data + c * y_step; for (int64_t ph = 0; ph < pooled_height; ++ph) { int64_t hstart = ph * stride_h() - pads[0]; int64_t hend = std::min(hstart + kernel_shape[0], height); hstart = std::max(hstart, static_cast<int64_t>(0)); for (int64_t pw = 0; pw < pooled_width; ++pw) { int64_t wstart = pw * stride_w() - pads[1]; int64_t wend = std::min(wstart + kernel_shape[1], width); wstart = std::max(wstart, static_cast<int64_t>(0)); const int64_t pool_index = ph * pooled_width + pw; float Yh = std::numeric_limits<float>::lowest(); for (int64_t h = hstart; h < hend; ++h) { for (int64_t w = wstart; w < wend; ++w) { const int64_t input_index = h * width + w; if (input_index > 0 && m_d[input_index] == 0) break; // if mask == 0, break if (x_d[input_index] > Yh) { Yh = x_d[input_index]; } } } y_d[pool_index] = Yh; } } } break; } case 3: { int64_t x_step = height * width * depth; int64_t y_step = pooled_height * pooled_width * pooled_depth; const int64_t total_channels = x_shape[0] * channels; const int64_t total_mask_channels = m_shape[0] * m_shape[1]; #ifdef USE_OPENMP #pragma omp parallel for #endif for (int64_t c = 0; c < total_channels; ++c) { const float* x_d = X_data + c * x_step; const int32_t* m_d = M_data + (c * x_step) % total_mask_channels; float* y_d = Y_data + c * y_step; for (int64_t ph = 0; ph < pooled_height; ++ph) { int64_t hstart = ph * stride_h() - pads[0]; int64_t hend = std::min(hstart + kernel_shape[0], height); hstart = std::max(hstart, static_cast<int64_t>(0)); for (int64_t pw = 0; pw < pooled_width; ++pw) { int64_t wstart = pw * stride_w() - pads[1]; int64_t wend = std::min(wstart + kernel_shape[1], width); wstart = std::max(wstart, static_cast<int64_t>(0)); for (int64_t pd = 0; pd < pooled_depth; ++pd) { int64_t dstart = pd * stride_d() - pads[2]; int64_t dend = std::min(dstart + kernel_shape[2], depth); dstart = std::max(dstart, static_cast<int64_t>(0)); const int64_t pool_index = ph * pooled_width * pooled_depth + pw * pooled_depth + pd; float Yh = std::numeric_limits<float>::lowest(); for (int64_t h = hstart; h < hend; ++h) { for (int64_t w = wstart; w < wend; ++w) { for (int64_t d = dstart; d < dend; ++d) { const int64_t input_index = h * width * depth + w * depth + d; if (input_index > 0 && m_d[input_index] == 0) break; // if mask == 0, break if (x_d[input_index] > Yh) { Yh = x_d[input_index]; } } } } y_d[pool_index] = Yh; } } } } break; } default: return Status(common::ONNXRUNTIME, common::INVALID_ARGUMENT, "Unsupported pooling size : "); } return Status::OK(); } }; } // namespace contrib } // namespace onnxruntime
GB_unaryop__lnot_bool_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_bool_int32 // op(A') function: GB_tran__lnot_bool_int32 // C type: bool // A type: int32_t // cast: bool cij = (bool) aij // unaryop: cij = !aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_BOOL || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_bool_int32 ( bool *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_bool_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hashOMP.h
#include <iostream> #include <cstdlib> #include <cstring> #include <fstream> #include <vector> #include <cstdio> #include<omp.h> #include <bits/stdc++.h> #include "e2r.h" using namespace std; void hash_table(elementToprocessor &obj) { omp_set_num_threads(8); char filename[50]; int rank; int element; int count1=0; ifstream input("65536r2e.csv"); ifstream input1("65536r2e.csv"); ifstream input2("65536r2e.csv"); vector<string> result; int col_count = 0; int line_count = 0; int x = 0; int i = 0; int max_el = 0; int max = 0; int z = 0; int flag = 0; for (string line; getline(input, line);) { //cout << line.size() << endl; //Count the number of lines in input file => Rows line_count++; for (int i = 0; i < line.size(); i++) { if (line[i] == ',') col_count++; //Count the number of commas in one row of input file => Columns } col_count++; if (col_count > max) { max = col_count; } col_count = 0; } int arr[line_count][max]; string str; int row = 0; int column = 0; for (string line; getline(input1, line);) { for (int z = 0; z < line.size(); z++) { if (line[z] == ',') { flag = 1; } if (flag == 1) { arr[row][column] = stoi(str); if (stoi(str) > max_el) { max_el = stoi(str); } flag = 0; str = ""; column++; } else { str = str + line[z]; } } arr[row][column] = stoi(str); if (stoi(str) > max_el) { max_el = stoi(str); } for (int j = column + 1; j < max; j++) { arr[row][j] = stoi("0"); } str = ""; row++; column = 0; } //cout<<"Max "<<max_el<<endl; int counter = 0; int RowRank = 0; int RankArray[max_el + 1], RankArrayarr[max_el + 1]; #pragma omp parallel for for (int i = 0; i < line_count; i++) { if(omp_get_thread_num() == 1) { count1++; } for (int j = 0; j < max; j++) { if (j == 0) { RowRank = arr[i][j]; } else if (j != 0 && arr[i][j] != 0) { RankArrayarr[arr[i][j]] = RowRank; } } } cout<<"Thread 1 in my rank"<<" "<<count1<<endl; obj.mem_allocate(max_el + 1); obj.hashArray(RankArrayarr); }
Array3D.h
// // Array3D.h // Copyright (c) 2016 Pixel Grammar, LLC // author: Douglas Creel // #ifndef ARRAY3D_H #define ARRAY3D_H #include <cassert> #include <iostream> template <typename T> class Array3D { public: Array3D() { } ~Array3D() { } void init(int w, int h, int d, T val) { m_width = w; m_height = h; m_depth = d; grid.resize(w); #pragma omp parallel for for (int i = 0; i < w; i++) { grid[i].resize(h); for (int j = 0; j < h; j++) { grid[i][j].resize(d); for (int k = 0; k < d; k++) { grid[i][j][k] = val; } } } } int getWidth() { return m_width; } int getHeight() { return m_height; } int getDepth() { return m_depth; } void set(T f, int w, int h, int d) { grid[w][h][d] = f; } typename T& operator() (int w, int h, int d) { double val = 0; if ((w < 0 || w >= m_width) || (h < 0 || h >= m_height) || (d < 0 || d >= m_depth)) return val; assert(w >= 0 && w < m_width); assert(h >= 0 && h < m_height); assert(d >= 0 && d < m_depth); return grid[w][h][d]; } private: std::vector<std::vector<std::vector<T> > > grid; int m_width, m_height, m_depth; }; #endif
vision.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % V V IIIII SSSSS IIIII OOO N N % % V V I SS I O O NN N % % V V I SSS I O O N N N % % V V I SS I O O N NN % % V IIIII SSSSS IIIII OOO N N % % % % % % MagickCore Computer Vision Methods % % % % Software Design % % Cristy % % September 2014 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/matrix.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/morphology.h" #include "MagickCore/morphology-private.h" #include "MagickCore/opencl-private.h" #include "MagickCore/paint.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/resource_.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/vision.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n n e c t e d C o m p o n e n t s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConnectedComponentsImage() returns the connected-components of the image % uniquely labeled. The returned connected components image colors member % defines the number of unique objects. Choose from 4 or 8-way connectivity. % % You are responsible for freeing the connected components objects resources % with this statement; % % objects = (CCObjectInfo *) RelinquishMagickMemory(objects); % % The format of the ConnectedComponentsImage method is: % % Image *ConnectedComponentsImage(const Image *image, % const size_t connectivity,CCObjectInfo **objects, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o connectivity: how many neighbors to visit, choose from 4 or 8. % % o objects: return the attributes of each unique object. % % o exception: return any errors or warnings in this structure. % */ static int CCObjectInfoCompare(const void *x,const void *y) { CCObjectInfo *p, *q; p=(CCObjectInfo *) x; q=(CCObjectInfo *) y; return((int) (q->area-(ssize_t) p->area)); } MagickExport Image *ConnectedComponentsImage(const Image *image, const size_t connectivity,CCObjectInfo **objects,ExceptionInfo *exception) { #define ConnectedComponentsImageTag "ConnectedComponents/Image" CacheView *component_view, *image_view, *object_view; CCObjectInfo *object; char *c; const char *artifact, *metrics[CCMaxMetrics]; double max_threshold, min_threshold; Image *component_image; MagickBooleanType status; MagickOffsetType progress; MatrixInfo *equivalences; RectangleInfo bounding_box; register ssize_t i; size_t size; ssize_t background_id, connect4[2][2] = { { -1, 0 }, { 0, -1 } }, connect8[4][2] = { { -1, -1 }, { -1, 0 }, { -1, 1 }, { 0, -1 } }, dx, dy, first, last, n, step, y; /* Initialize connected components image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (objects != (CCObjectInfo **) NULL) *objects=(CCObjectInfo *) NULL; component_image=CloneImage(image,0,0,MagickTrue,exception); if (component_image == (Image *) NULL) return((Image *) NULL); component_image->depth=MAGICKCORE_QUANTUM_DEPTH; if (AcquireImageColormap(component_image,MaxColormapSize,exception) == MagickFalse) { component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } /* Initialize connected components equivalences. */ size=image->columns*image->rows; if (image->columns != (size/image->rows)) { component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } equivalences=AcquireMatrixInfo(size,1,sizeof(ssize_t),exception); if (equivalences == (MatrixInfo *) NULL) { component_image=DestroyImage(component_image); return((Image *) NULL); } for (n=0; n < (ssize_t) (image->columns*image->rows); n++) (void) SetMatrixElement(equivalences,n,0,&n); object=(CCObjectInfo *) AcquireQuantumMemory(MaxColormapSize,sizeof(*object)); if (object == (CCObjectInfo *) NULL) { equivalences=DestroyMatrixInfo(equivalences); component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memset(object,0,MaxColormapSize*sizeof(*object)); for (i=0; i < (ssize_t) MaxColormapSize; i++) { object[i].id=i; object[i].bounding_box.x=(ssize_t) image->columns; object[i].bounding_box.y=(ssize_t) image->rows; GetPixelInfo(image,&object[i].color); } /* Find connected components. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); for (n=0; n < (ssize_t) (connectivity > 4 ? 4 : 2); n++) { if (status == MagickFalse) continue; dx=connectivity > 4 ? connect8[n][1] : connect4[n][1]; dy=connectivity > 4 ? connect8[n][0] : connect4[n][0]; for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y-1,image->columns,3,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } p+=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { PixelInfo pixel, target; ssize_t neighbor_offset, obj, offset, ox, oy, root; /* Is neighbor an authentic pixel and a different color than the pixel? */ GetPixelInfoPixel(image,p,&pixel); if (((x+dx) < 0) || ((x+dx) >= (ssize_t) image->columns) || ((y+dy) < 0) || ((y+dy) >= (ssize_t) image->rows)) { p+=GetPixelChannels(image); continue; } neighbor_offset=dy*(GetPixelChannels(image)*image->columns)+dx* GetPixelChannels(image); GetPixelInfoPixel(image,p+neighbor_offset,&target); if (IsFuzzyEquivalencePixelInfo(&pixel,&target) == MagickFalse) { p+=GetPixelChannels(image); continue; } /* Resolve this equivalence. */ offset=y*image->columns+x; neighbor_offset=dy*image->columns+dx; ox=offset; status=GetMatrixElement(equivalences,ox,0,&obj); while (obj != ox) { ox=obj; status=GetMatrixElement(equivalences,ox,0,&obj); } oy=offset+neighbor_offset; status=GetMatrixElement(equivalences,oy,0,&obj); while (obj != oy) { oy=obj; status=GetMatrixElement(equivalences,oy,0,&obj); } if (ox < oy) { status=SetMatrixElement(equivalences,oy,0,&ox); root=ox; } else { status=SetMatrixElement(equivalences,ox,0,&oy); root=oy; } ox=offset; status=GetMatrixElement(equivalences,ox,0,&obj); while (obj != root) { status=GetMatrixElement(equivalences,ox,0,&obj); status=SetMatrixElement(equivalences,ox,0,&root); } oy=offset+neighbor_offset; status=GetMatrixElement(equivalences,oy,0,&obj); while (obj != root) { status=GetMatrixElement(equivalences,oy,0,&obj); status=SetMatrixElement(equivalences,oy,0,&root); } status=SetMatrixElement(equivalences,y*image->columns+x,0,&root); p+=GetPixelChannels(image); } } } /* Label connected components. */ n=0; component_view=AcquireAuthenticCacheView(component_image,exception); for (y=0; y < (ssize_t) component_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(component_view,0,y,component_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) component_image->columns; x++) { ssize_t id, offset; offset=y*image->columns+x; status=GetMatrixElement(equivalences,offset,0,&id); if (id != offset) status=GetMatrixElement(equivalences,id,0,&id); else { id=n++; if (id >= (ssize_t) MaxColormapSize) break; } status=SetMatrixElement(equivalences,offset,0,&id); if (x < object[id].bounding_box.x) object[id].bounding_box.x=x; if (x >= (ssize_t) object[id].bounding_box.width) object[id].bounding_box.width=(size_t) x; if (y < object[id].bounding_box.y) object[id].bounding_box.y=y; if (y >= (ssize_t) object[id].bounding_box.height) object[id].bounding_box.height=(size_t) y; object[id].color.red+=QuantumScale*GetPixelRed(image,p); object[id].color.green+=QuantumScale*GetPixelGreen(image,p); object[id].color.blue+=QuantumScale*GetPixelBlue(image,p); if (image->alpha_trait != UndefinedPixelTrait) object[id].color.alpha+=QuantumScale*GetPixelAlpha(image,p); if (image->colorspace == CMYKColorspace) object[id].color.black+=QuantumScale*GetPixelBlack(image,p); object[id].centroid.x+=x; object[id].centroid.y+=y; object[id].area++; SetPixelIndex(component_image,(Quantum) id,q); p+=GetPixelChannels(image); q+=GetPixelChannels(component_image); } if (n > (ssize_t) MaxColormapSize) break; if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; progress++; proceed=SetImageProgress(image,ConnectedComponentsImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } component_view=DestroyCacheView(component_view); image_view=DestroyCacheView(image_view); equivalences=DestroyMatrixInfo(equivalences); if (n > (ssize_t) MaxColormapSize) { object=(CCObjectInfo *) RelinquishMagickMemory(object); component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"TooManyObjects"); } background_id=0; min_threshold=0.0; max_threshold=0.0; component_image->colors=(size_t) n; for (i=0; i < (ssize_t) component_image->colors; i++) { object[i].bounding_box.width-=(object[i].bounding_box.x-1); object[i].bounding_box.height-=(object[i].bounding_box.y-1); object[i].color.red/=(QuantumScale*object[i].area); object[i].color.green/=(QuantumScale*object[i].area); object[i].color.blue/=(QuantumScale*object[i].area); if (image->alpha_trait != UndefinedPixelTrait) object[i].color.alpha/=(QuantumScale*object[i].area); if (image->colorspace == CMYKColorspace) object[i].color.black/=(QuantumScale*object[i].area); object[i].centroid.x/=object[i].area; object[i].centroid.y/=object[i].area; max_threshold+=object[i].area; if (object[i].area > object[background_id].area) background_id=i; } max_threshold+=MagickEpsilon; n=(-1); artifact=GetImageArtifact(image,"connected-components:background-id"); if (artifact != (const char *) NULL) background_id=(ssize_t) StringToDouble(artifact,(char **) NULL); artifact=GetImageArtifact(image,"connected-components:area-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max area threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].area < min_threshold) || (object[i].area >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:keep-colors"); if (artifact != (const char *) NULL) { register const char *p; /* Keep selected objects based on color, merge others. */ for (i=0; i < (ssize_t) component_image->colors; i++) object[i].merge=MagickTrue; for (p=artifact; ; ) { char color[MagickPathExtent]; PixelInfo pixel; register const char *q; for (q=p; *q != '\0'; q++) if (*q == ';') break; (void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1, MagickPathExtent)); (void) QueryColorCompliance(color,AllCompliance,&pixel,exception); for (i=0; i < (ssize_t) component_image->colors; i++) if (IsFuzzyEquivalencePixelInfo(&object[i].color,&pixel) != MagickFalse) object[i].merge=MagickFalse; if (*q == '\0') break; p=q+1; } } artifact=GetImageArtifact(image,"connected-components:keep-ids"); if (artifact == (const char *) NULL) artifact=GetImageArtifact(image,"connected-components:keep"); if (artifact != (const char *) NULL) { /* Keep selected objects based on id, merge others. */ for (i=0; i < (ssize_t) component_image->colors; i++) object[i].merge=MagickTrue; for (c=(char *) artifact; *c != '\0'; ) { while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ',')) c++; first=(ssize_t) strtol(c,&c,10); if (first < 0) first+=(ssize_t) component_image->colors; last=first; while (isspace((int) ((unsigned char) *c)) != 0) c++; if (*c == '-') { last=(ssize_t) strtol(c+1,&c,10); if (last < 0) last+=(ssize_t) component_image->colors; } step=(ssize_t) (first > last ? -1 : 1); for ( ; first != (last+step); first+=step) object[first].merge=MagickFalse; } } artifact=GetImageArtifact(image,"connected-components:keep-top"); if (artifact != (const char *) NULL) { CCObjectInfo *top_objects; ssize_t top_ids; /* Keep top objects. */ top_ids=(ssize_t) StringToDouble(artifact,(char **) NULL); top_objects=(CCObjectInfo *) AcquireQuantumMemory(component_image->colors, sizeof(*top_objects)); if (top_objects == (CCObjectInfo *) NULL) { object=(CCObjectInfo *) RelinquishMagickMemory(object); component_image=DestroyImage(component_image); ThrowImageException(ResourceLimitError,"MemoryAllocationFailed"); } (void) memcpy(top_objects,object,component_image->colors*sizeof(*object)); qsort((void *) top_objects,component_image->colors,sizeof(*top_objects), CCObjectInfoCompare); for (i=top_ids+1; i < (ssize_t) component_image->colors; i++) object[top_objects[i].id].merge=MagickTrue; top_objects=(CCObjectInfo *) RelinquishMagickMemory(top_objects); } artifact=GetImageArtifact(image,"connected-components:remove-colors"); if (artifact != (const char *) NULL) { register const char *p; /* Remove selected objects based on color, keep others. */ for (p=artifact; ; ) { char color[MagickPathExtent]; PixelInfo pixel; register const char *q; for (q=p; *q != '\0'; q++) if (*q == ';') break; (void) CopyMagickString(color,p,(size_t) MagickMin(q-p+1, MagickPathExtent)); (void) QueryColorCompliance(color,AllCompliance,&pixel,exception); for (i=0; i < (ssize_t) component_image->colors; i++) if (IsFuzzyEquivalencePixelInfo(&object[i].color,&pixel) != MagickFalse) object[i].merge=MagickTrue; if (*q == '\0') break; p=q+1; } } artifact=GetImageArtifact(image,"connected-components:remove-ids"); if (artifact == (const char *) NULL) artifact=GetImageArtifact(image,"connected-components:remove"); if (artifact != (const char *) NULL) for (c=(char *) artifact; *c != '\0'; ) { /* Remove selected objects based on id, keep others. */ while ((isspace((int) ((unsigned char) *c)) != 0) || (*c == ',')) c++; first=(ssize_t) strtol(c,&c,10); if (first < 0) first+=(ssize_t) component_image->colors; last=first; while (isspace((int) ((unsigned char) *c)) != 0) c++; if (*c == '-') { last=(ssize_t) strtol(c+1,&c,10); if (last < 0) last+=(ssize_t) component_image->colors; } step=(ssize_t) (first > last ? -1 : 1); for ( ; first != (last+step); first+=step) object[first].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:perimeter-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max perimeter threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="perimeter"; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; RectangleInfo bounding_box; size_t pattern[4] = { 1, 0, 0, 0 }; ssize_t y; /* Compute perimeter of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=(-1); y < (ssize_t) bounding_box.height+1; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x-1, bounding_box.y+y,bounding_box.width+2,2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=(-1); x < (ssize_t) bounding_box.width+1; x++) { Quantum pixels[4]; register ssize_t v; size_t foreground; /* An Algorithm for Calculating Objects’ Shape Features in Binary Images, Lifeng He, Yuyan Chao. */ foreground=0; for (v=0; v < 2; v++) { register ssize_t u; for (u=0; u < 2; u++) { ssize_t offset; offset=v*(bounding_box.width+2)* GetPixelChannels(component_image)+u* GetPixelChannels(component_image); pixels[2*v+u]=GetPixelIndex(component_image,p+offset); if ((ssize_t) pixels[2*v+u] == i) foreground++; } } if (foreground == 1) pattern[1]++; else if (foreground == 2) { if ((((ssize_t) pixels[0] == i) && ((ssize_t) pixels[3] == i)) || (((ssize_t) pixels[1] == i) && ((ssize_t) pixels[2] == i))) pattern[0]++; /* diagonal */ else pattern[2]++; } else if (foreground == 3) pattern[3]++; p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); object[i].metric[n]=ceil(MagickSQ1_2*pattern[1]+1.0*pattern[2]+ MagickSQ1_2*pattern[3]+MagickSQ2*pattern[0]-0.5); } for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:circularity-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max circularity threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="circularity"; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; RectangleInfo bounding_box; size_t pattern[4] = { 1, 0, 0, 0 }; ssize_t y; /* Compute perimeter of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=(-1); y < (ssize_t) bounding_box.height; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x-1, bounding_box.y+y,bounding_box.width+2,2,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=(-1); x < (ssize_t) bounding_box.width; x++) { Quantum pixels[4]; register ssize_t v; size_t foreground; /* An Algorithm for Calculating Objects’ Shape Features in Binary Images, Lifeng He, Yuyan Chao. */ foreground=0; for (v=0; v < 2; v++) { register ssize_t u; for (u=0; u < 2; u++) { ssize_t offset; offset=v*(bounding_box.width+2)* GetPixelChannels(component_image)+u* GetPixelChannels(component_image); pixels[2*v+u]=GetPixelIndex(component_image,p+offset); if ((ssize_t) pixels[2*v+u] == i) foreground++; } } if (foreground == 1) pattern[1]++; else if (foreground == 2) { if ((((ssize_t) pixels[0] == i) && ((ssize_t) pixels[3] == i)) || (((ssize_t) pixels[1] == i) && ((ssize_t) pixels[2] == i))) pattern[0]++; /* diagonal */ else pattern[2]++; } else if (foreground == 3) pattern[3]++; p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); object[i].metric[n]=ceil(MagickSQ1_2*pattern[1]+1.0*pattern[2]+ MagickSQ1_2*pattern[3]+MagickSQ2*pattern[0]-0.5); object[i].metric[n]=4.0*MagickPI*object[i].area/(object[i].metric[n]* object[i].metric[n]); } for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:diameter-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max diameter threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="diameter"; for (i=0; i < (ssize_t) component_image->colors; i++) { object[i].metric[n]=ceil(sqrt(4.0*object[i].area/MagickPI)-0.5); if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } } artifact=GetImageArtifact(image,"connected-components:major-axis-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max ellipse major threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="major-axis"; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; double M00 = 0.0, M01 = 0.0, M02 = 0.0, M10 = 0.0, M11 = 0.0, M20 = 0.0; PointInfo centroid = { 0.0, 0.0 }; RectangleInfo bounding_box; register const Quantum *magick_restrict p; register ssize_t x; ssize_t y; /* Compute ellipse major axis of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M00++; M10+=x; M01+=y; } p+=GetPixelChannels(component_image); } } centroid.x=M10*PerceptibleReciprocal(M00); centroid.y=M01*PerceptibleReciprocal(M00); for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M11+=(x-centroid.x)*(y-centroid.y); M20+=(x-centroid.x)*(x-centroid.x); M02+=(y-centroid.y)*(y-centroid.y); } p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); object[i].metric[n]=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)+ sqrt(4.0*M11*M11+(M20-M02)*(M20-M02)))); } for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:minor-axis-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max ellipse minor threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="minor-axis"; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; double M00 = 0.0, M01 = 0.0, M02 = 0.0, M10 = 0.0, M11 = 0.0, M20 = 0.0; PointInfo centroid = { 0.0, 0.0 }; RectangleInfo bounding_box; register const Quantum *magick_restrict p; register ssize_t x; ssize_t y; /* Compute ellipse major axis of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M00++; M10+=x; M01+=y; } p+=GetPixelChannels(component_image); } } centroid.x=M10*PerceptibleReciprocal(M00); centroid.y=M01*PerceptibleReciprocal(M00); for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M11+=(x-centroid.x)*(y-centroid.y); M20+=(x-centroid.x)*(x-centroid.x); M02+=(y-centroid.y)*(y-centroid.y); } p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); object[i].metric[n]=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)- sqrt(4.0*M11*M11+(M20-M02)*(M20-M02)))); } for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image, "connected-components:eccentricity-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max eccentricity threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="eccentricy"; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; double M00 = 0.0, M01 = 0.0, M02 = 0.0, M10 = 0.0, M11 = 0.0, M20 = 0.0; PointInfo centroid = { 0.0, 0.0 }, ellipse_axis = { 0.0, 0.0 }; RectangleInfo bounding_box; register const Quantum *magick_restrict p; register ssize_t x; ssize_t y; /* Compute eccentricity of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M00++; M10+=x; M01+=y; } p+=GetPixelChannels(component_image); } } centroid.x=M10*PerceptibleReciprocal(M00); centroid.y=M01*PerceptibleReciprocal(M00); for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M11+=(x-centroid.x)*(y-centroid.y); M20+=(x-centroid.x)*(x-centroid.x); M02+=(y-centroid.y)*(y-centroid.y); } p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); ellipse_axis.x=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)+ sqrt(4.0*M11*M11+(M20-M02)*(M20-M02)))); ellipse_axis.y=sqrt((2.0*PerceptibleReciprocal(M00))*((M20+M02)- sqrt(4.0*M11*M11+(M20-M02)*(M20-M02)))); object[i].metric[n]=sqrt(1.0-(ellipse_axis.y*ellipse_axis.y* PerceptibleReciprocal(ellipse_axis.x*ellipse_axis.x))); } for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } artifact=GetImageArtifact(image,"connected-components:angle-threshold"); if (artifact != (const char *) NULL) { /* Merge any object not within the min and max ellipse angle threshold. */ (void) sscanf(artifact,"%lf%*[ -]%lf",&min_threshold,&max_threshold); metrics[++n]="angle"; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic) shared(status) \ magick_number_threads(component_image,component_image,component_image->colors,1) #endif for (i=0; i < (ssize_t) component_image->colors; i++) { CacheView *component_view; double M00 = 0.0, M01 = 0.0, M02 = 0.0, M10 = 0.0, M11 = 0.0, M20 = 0.0; PointInfo centroid = { 0.0, 0.0 }; RectangleInfo bounding_box; register const Quantum *magick_restrict p; register ssize_t x; ssize_t y; /* Compute ellipse angle of each object. */ if (status == MagickFalse) continue; component_view=AcquireAuthenticCacheView(component_image,exception); bounding_box=object[i].bounding_box; for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M00++; M10+=x; M01+=y; } p+=GetPixelChannels(component_image); } } centroid.x=M10*PerceptibleReciprocal(M00); centroid.y=M01*PerceptibleReciprocal(M00); for (y=0; y < (ssize_t) bounding_box.height; y++) { if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,p) == i) { M11+=(x-centroid.x)*(y-centroid.y); M20+=(x-centroid.x)*(x-centroid.x); M02+=(y-centroid.y)*(y-centroid.y); } p+=GetPixelChannels(component_image); } } component_view=DestroyCacheView(component_view); object[i].metric[n]=RadiansToDegrees(1.0/2.0*atan(2.0*M11* PerceptibleReciprocal(M20-M02))); if (fabs(M11) < 0.0) { if ((fabs(M20-M02) >= 0.0) && ((M20-M02) < 0.0)) object[i].metric[n]+=90.0; } else if (M11 < 0.0) { if (fabs(M20-M02) >= 0.0) { if ((M20-M02) < 0.0) object[i].metric[n]+=90.0; else object[i].metric[n]+=180.0; } } else if ((fabs(M20-M02) >= 0.0) && ((M20-M02) < 0.0)) object[i].metric[n]+=90.0; } for (i=0; i < (ssize_t) component_image->colors; i++) if (((object[i].metric[n] < min_threshold) || (object[i].metric[n] >= max_threshold)) && (i != background_id)) object[i].merge=MagickTrue; } /* Merge any object not within the min and max area threshold. */ component_view=AcquireAuthenticCacheView(component_image,exception); object_view=AcquireVirtualCacheView(component_image,exception); for (i=0; i < (ssize_t) component_image->colors; i++) { register ssize_t j; size_t id; if (status == MagickFalse) continue; if ((object[i].merge == MagickFalse) || (i == background_id)) continue; /* keep object */ /* Merge this object. */ for (j=0; j < (ssize_t) component_image->colors; j++) object[j].census=0; bounding_box=object[i].bounding_box; for (y=0; y < (ssize_t) bounding_box.height; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) bounding_box.width; x++) { register ssize_t n; if (status == MagickFalse) continue; j=(ssize_t) GetPixelIndex(component_image,p); if (j == i) for (n=0; n < (ssize_t) (connectivity > 4 ? 4 : 2); n++) { register const Quantum *p; /* Compute area of adjacent objects. */ if (status == MagickFalse) continue; dx=connectivity > 4 ? connect8[n][1] : connect4[n][1]; dy=connectivity > 4 ? connect8[n][0] : connect4[n][0]; p=GetCacheViewVirtualPixels(object_view,bounding_box.x+x+dx, bounding_box.y+y+dy,1,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; break; } j=(ssize_t) GetPixelIndex(component_image,p); if (j != i) object[j].census++; } p+=GetPixelChannels(component_image); } } /* Merge with object of greatest adjacent area. */ id=0; for (j=1; j < (ssize_t) component_image->colors; j++) if (object[j].census > object[id].census) id=(size_t) j; object[i].area=0.0; for (y=0; y < (ssize_t) bounding_box.height; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(component_view,bounding_box.x, bounding_box.y+y,bounding_box.width,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) bounding_box.width; x++) { if ((ssize_t) GetPixelIndex(component_image,q) == i) SetPixelIndex(component_image,(Quantum) id,q); q+=GetPixelChannels(component_image); } if (SyncCacheViewAuthenticPixels(component_view,exception) == MagickFalse) status=MagickFalse; } } object_view=DestroyCacheView(object_view); component_view=DestroyCacheView(component_view); artifact=GetImageArtifact(image,"connected-components:mean-color"); if (IsStringTrue(artifact) != MagickFalse) { /* Replace object with mean color. */ for (i=0; i < (ssize_t) component_image->colors; i++) component_image->colormap[i]=object[i].color; } (void) SyncImage(component_image,exception); artifact=GetImageArtifact(image,"connected-components:verbose"); if ((IsStringTrue(artifact) != MagickFalse) || (objects != (CCObjectInfo **) NULL)) { /* Report statistics on each unique object. */ for (i=0; i < (ssize_t) component_image->colors; i++) { object[i].bounding_box.width=0; object[i].bounding_box.height=0; object[i].bounding_box.x=(ssize_t) component_image->columns; object[i].bounding_box.y=(ssize_t) component_image->rows; object[i].centroid.x=0; object[i].centroid.y=0; object[i].census=object[i].area == 0.0 ? 0.0 : 1.0; object[i].area=0; } component_view=AcquireVirtualCacheView(component_image,exception); for (y=0; y < (ssize_t) component_image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(component_view,0,y,component_image->columns, 1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) component_image->columns; x++) { size_t id; id=(size_t) GetPixelIndex(component_image,p); if (x < object[id].bounding_box.x) object[id].bounding_box.x=x; if (x > (ssize_t) object[id].bounding_box.width) object[id].bounding_box.width=(size_t) x; if (y < object[id].bounding_box.y) object[id].bounding_box.y=y; if (y > (ssize_t) object[id].bounding_box.height) object[id].bounding_box.height=(size_t) y; object[id].centroid.x+=x; object[id].centroid.y+=y; object[id].area++; p+=GetPixelChannels(component_image); } } for (i=0; i < (ssize_t) component_image->colors; i++) { object[i].bounding_box.width-=(object[i].bounding_box.x-1); object[i].bounding_box.height-=(object[i].bounding_box.y-1); object[i].centroid.x=object[i].centroid.x/object[i].area; object[i].centroid.y=object[i].centroid.y/object[i].area; } component_view=DestroyCacheView(component_view); qsort((void *) object,component_image->colors,sizeof(*object), CCObjectInfoCompare); if (objects == (CCObjectInfo **) NULL) { register ssize_t j; artifact=GetImageArtifact(image, "connected-components:exclude-header"); if (IsStringTrue(artifact) == MagickFalse) { (void) fprintf(stdout,"Objects ("); artifact=GetImageArtifact(image, "connected-components:exclude-ids"); if (IsStringTrue(artifact) == MagickFalse) (void) fprintf(stdout,"id: "); (void) fprintf(stdout,"bounding-box centroid area mean-color"); for (j=0; j <= n; j++) (void) fprintf(stdout," %s",metrics[j]); (void) fprintf(stdout,"):\n"); } for (i=0; i < (ssize_t) component_image->colors; i++) if (object[i].census > 0.0) { char mean_color[MagickPathExtent]; GetColorTuple(&object[i].color,MagickFalse,mean_color); (void) fprintf(stdout," "); artifact=GetImageArtifact(image, "connected-components:exclude-ids"); if (IsStringTrue(artifact) == MagickFalse) (void) fprintf(stdout,"%.20g: ",(double) object[i].id); (void) fprintf(stdout, "%.20gx%.20g%+.20g%+.20g %.1f,%.1f %.*g %s",(double) object[i].bounding_box.width,(double) object[i].bounding_box.height,(double) object[i].bounding_box.x,(double) object[i].bounding_box.y, object[i].centroid.x,object[i].centroid.y, GetMagickPrecision(),(double) object[i].area,mean_color); for (j=0; j <= n; j++) (void) fprintf(stdout," %.*g",GetMagickPrecision(), object[i].metric[j]); (void) fprintf(stdout,"\n"); } } } if (objects == (CCObjectInfo **) NULL) object=(CCObjectInfo *) RelinquishMagickMemory(object); else *objects=object; return(component_image); }
copyin-3.c
/* { dg-do run } */ /* { dg-options "-O2" } */ /* { dg-require-effective-target tls_runtime } */ #include <omp.h> #include <stdlib.h> int thr; #pragma omp threadprivate (thr) int test (int l) { return l || (thr != omp_get_thread_num () * 2); } int main (void) { int l = 0; omp_set_dynamic (0); omp_set_num_threads (6); thr = 8; /* Broadcast the value to all threads. */ #pragma omp parallel copyin (thr) ; #pragma omp parallel reduction (||:l) { /* Now test if the broadcast succeeded. */ l = thr != 8; thr = omp_get_thread_num () * 2; #pragma omp barrier l = test (l); } if (l) abort (); return 0; }